From a7e2da598aed54fc85922002c01cadeb06e90148 Mon Sep 17 00:00:00 2001 From: Dhruv-89 Date: Thu, 22 Jan 2026 13:22:14 +0530 Subject: [PATCH 1/2] [Feature] cortex ask --do: AI-powered command execution with auto-repair --- README.md | 226 +- cortex/ask.py | 2239 +++++++++---- cortex/cli.py | 5152 +++++++----------------------- cortex/demo.py | 715 +---- cortex/do_runner.py | 63 + cortex/do_runner/Untitled | 1 + cortex/do_runner/__init__.py | 129 + cortex/do_runner/database.py | 478 +++ cortex/do_runner/diagnosis.py | 2804 ++++++++++++++++ cortex/do_runner/diagnosis_v2.py | 1857 +++++++++++ cortex/do_runner/executor.py | 468 +++ cortex/do_runner/handler.py | 3700 +++++++++++++++++++++ cortex/do_runner/managers.py | 287 ++ cortex/do_runner/models.py | 352 ++ cortex/do_runner/terminal.py | 2351 ++++++++++++++ cortex/do_runner/verification.py | 1050 ++++++ cortex/semantic_cache.py | 30 +- cortex/system_info_generator.py | 800 +++++ cortex/test.py | 0 cortex/watch_service.py | 716 +++++ docs/ASK_DO_ARCHITECTURE.md | 741 +++++ scripts/setup_ask_do.py | 637 ++++ scripts/setup_ask_do.sh | 435 +++ 23 files changed, 20018 insertions(+), 5213 deletions(-) create mode 100644 cortex/do_runner.py create mode 100644 cortex/do_runner/Untitled create mode 100644 cortex/do_runner/__init__.py create mode 100644 cortex/do_runner/database.py create mode 100644 cortex/do_runner/diagnosis.py create mode 100644 cortex/do_runner/diagnosis_v2.py create mode 100644 cortex/do_runner/executor.py create mode 100644 cortex/do_runner/handler.py create mode 100644 cortex/do_runner/managers.py create mode 100644 cortex/do_runner/models.py create mode 100644 cortex/do_runner/terminal.py create mode 100644 cortex/do_runner/verification.py create mode 100644 cortex/system_info_generator.py create mode 100644 cortex/test.py create mode 100644 cortex/watch_service.py create mode 100644 docs/ASK_DO_ARCHITECTURE.md create mode 100755 scripts/setup_ask_do.py create mode 100755 scripts/setup_ask_do.sh diff --git a/README.md b/README.md index 1c1c00e55..bc8e12553 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,8 @@

Cortex Linux

- Cortex is an AI layer for Linux Debian/Ubuntu
- Instead of memorizing commands, googling errors, and copy-pasting from Stack Overflow — describe what you need. + AI-Powered Package Manager for Debian/Ubuntu
+ Install software using natural language. No more memorizing package names.

@@ -14,7 +14,7 @@ CI Status - License + License Python 3.10+ @@ -40,8 +40,8 @@ ## What is Cortex? -Cortex is an AI layer for Linux Debian/Ubuntu
-Instead of memorizing commands, googling errors, and copy-pasting from Stack Overflow — describe what you need. +Cortex is an AI-native package manager that understands what you want to install, even when you don't know the exact package name. + ```bash # Instead of googling "what's the package name for PDF editing on Ubuntu?" cortex install "something to edit PDFs" @@ -64,15 +64,11 @@ cortex install "tools for video compression" | Feature | Description | |---------|-------------| | **Natural Language** | Describe what you need in plain English | -| **Voice Input** | Hands-free mode with Whisper speech recognition ([F9 to speak](docs/VOICE_INPUT.md)) | | **Dry-Run Default** | Preview all commands before execution | | **Sandboxed Execution** | Commands run in Firejail isolation | | **Full Rollback** | Undo any installation with `cortex rollback` | -| **Role Management** | AI-driven system personality detection and tailored recommendations | -| **Docker Permission Fixer** | Fix root-owned bind mount issues automatically | | **Audit Trail** | Complete history in `~/.cortex/history.db` | | **Hardware-Aware** | Detects GPU, CPU, memory for optimized packages | -| **Predictive Error Prevention** | AI-driven checks for potential installation failures | | **Multi-LLM Support** | Works with Claude, GPT-4, or local Ollama models | --- @@ -97,12 +93,8 @@ python3 -m venv venv source venv/bin/activate # 3. Install Cortex -# Using pyproject.toml (recommended) pip install -e . -# Or install with dev dependencies -pip install -e ".[dev]" - # 4. Configure AI Provider (choose one): ## Option A: Ollama (FREE - Local LLM, no API key needed) @@ -118,8 +110,6 @@ echo 'OPENAI_API_KEY=your-key-here' > .env cortex --version ``` -> **💡 Zero-Config:** If you already have API keys from Claude CLI (`~/.config/anthropic/`) or OpenAI CLI (`~/.config/openai/`), Cortex will auto-detect them! Environment variables work immediately without prompting. See [Zero Config API Keys](docs/ZERO_CONFIG_API_KEYS.md). - ### First Run ```bash @@ -130,26 +120,60 @@ cortex install nginx --dry-run cortex install nginx --execute ``` ---- +### AI Command Execution Setup (`ask --do`) + +For the full AI-powered command execution experience, run the setup script: -## 🚀 Upgrade to Pro +```bash +# Full setup (Ollama + Watch Service + Shell Hooks) +./scripts/setup_ask_do.sh -Unlock advanced features with Cortex Pro: +# Or use Python directly +python scripts/setup_ask_do.py -| Feature | Community (Free) | Pro ($20/mo) | Enterprise ($99/mo) | -|---------|------------------|--------------|---------------------| -| Natural language commands | ✅ | ✅ | ✅ | -| Hardware detection | ✅ | ✅ | ✅ | -| Installation history | 7 days | 90 days | Unlimited | -| GPU/CUDA optimization | Basic | Advanced | Advanced | -| Systems per license | 1 | 5 | 100 | -| Cloud LLM connectors | ❌ | ✅ | ✅ | -| Priority support | ❌ | ✅ | ✅ | -| SSO/SAML | ❌ | ❌ | ✅ | -| Compliance reports | ❌ | ❌ | ✅ | -| Support | Community | Priority | Dedicated | +# Options: +# --no-docker Skip Docker/Ollama setup (use cloud LLM only) +# --model phi Use a smaller model (2GB instead of 4GB) +# --skip-watch Skip watch service installation +# --uninstall Remove all components +``` + +This script will: +1. **Set up Ollama** with a local LLM (Mistral by default) in Docker +2. **Install the Watch Service** for terminal monitoring +3. **Configure Shell Hooks** for command logging +4. **Verify everything works** + +#### Quick Start After Setup + +```bash +# Start an interactive AI session +cortex ask --do + +# Or with a specific task +cortex ask --do "install nginx and configure it for reverse proxy" + +# Check watch service status +cortex watch --status +``` -**[Compare Plans →](https://cortexlinux.com/pricing)** | **[Start Free Trial →](https://cortexlinux.com/pricing)** +#### Manual Setup (Alternative) + +If you prefer manual setup: + +```bash +# Install the Cortex Watch service (runs automatically on login) +cortex watch --install --service + +# Check status +cortex watch --status + +# For Ollama (optional - for local LLM) +docker run -d --name ollama -p 11434:11434 -v ollama:/root/.ollama ollama/ollama +docker exec ollama ollama pull mistral +``` + +This enables Cortex to monitor your terminal activity during manual intervention mode, providing real-time AI feedback and error detection. --- @@ -169,16 +193,6 @@ cortex history cortex rollback ``` -### Role Management - -```bash -# Auto-detect your system role using AI analysis of local context and patterns -cortex role detect - -# Manually set your system role to receive specific AI recommendations -cortex role set -``` - ### Command Reference | Command | Description | @@ -186,35 +200,27 @@ cortex role set | `cortex install ` | Install packages matching natural language query | | `cortex install --dry-run` | Preview installation plan (default) | | `cortex install --execute` | Execute the installation | -| `cortex docker permissions` | Fix file ownership for Docker bind mounts | -| `cortex role detect` | Automatically identifies the system's purpose | -| `cortex role set ` | Manually declare a system role | +| `cortex ask ` | Ask questions about your system | +| `cortex ask --do` | Interactive AI command execution mode | | `cortex sandbox ` | Test packages in Docker sandbox | | `cortex history` | View all past installations | | `cortex rollback ` | Undo a specific installation | +| `cortex watch --install --service` | Install terminal monitoring service | +| `cortex watch --status` | Check terminal monitoring status | | `cortex --version` | Show version information | | `cortex --help` | Display help message | -#### Daemon Commands - -| Command | Description | -|---------|-------------| -| `cortex daemon install --execute` | Install and enable the cortexd daemon | -| `cortex daemon uninstall --execute` | Stop and remove the daemon | -| `cortex daemon ping` | Test daemon connectivity | -| `cortex daemon version` | Show daemon version | -| `cortex daemon config` | Show daemon configuration | -| `cortex daemon reload-config` | Reload daemon configuration | - ### Configuration Cortex stores configuration in `~/.cortex/`: ``` ~/.cortex/ -├── config.yaml # User preferences -├── history.db # Installation history (SQLite) -└── audit.log # Detailed audit trail +├── config.yaml # User preferences +├── history.db # Installation history (SQLite) +├── audit.log # Detailed audit trail +├── terminal_watch.log # Terminal monitoring log +└── watch_service.log # Watch service logs ``` --- @@ -238,10 +244,10 @@ Cortex stores configuration in `~/.cortex/`: │ LLM Router │ │ Claude / GPT-4 / Ollama │ │ │ -│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ -│ │ Anthropic │ │ OpenAI │ │ Ollama │ │ -│ │ Claude │ │ GPT-4 │ │ Local │ │ -│ └─────────────┘ └─────────────┘ └─────────────┘ │ +│ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ +│ │ Anthropic │ │ OpenAI │ │ Ollama │ │ +│ │ Claude │ │ GPT-4 │ │ Local │ │ +│ └─────────────┘ └─────────────┘ └─────────────┘ │ └─────────────────────────────────────────────────────────────────┘ │ ▼ @@ -269,45 +275,31 @@ Cortex stores configuration in `~/.cortex/`: ``` cortex/ -├── cortex/ # Main Python package +├── cortex/ # Main package │ ├── cli.py # Command-line interface +│ ├── ask.py # AI Q&A and command execution │ ├── coordinator.py # Installation orchestration │ ├── llm_router.py # Multi-LLM routing -│ ├── daemon_client.py # IPC client for cortexd │ ├── packages.py # Package manager wrapper │ ├── hardware_detection.py │ ├── installation_history.py +│ ├── watch_service.py # Terminal monitoring service +│ ├── do_runner/ # AI command execution +│ │ ├── handler.py # Main execution handler +│ │ ├── terminal.py # Terminal monitoring +│ │ ├── diagnosis.py # Error diagnosis & auto-fix +│ │ └── verification.py # Conflict detection │ └── utils/ # Utility modules -├── daemon/ # C++ background daemon (cortexd) -│ ├── src/ # Daemon source code -│ ├── include/ # Header files -│ ├── tests/ # Unit & integration tests -│ ├── scripts/ # Build and setup scripts -│ └── README.md # Daemon documentation -├── tests/ # Python test suite +├── tests/ # Test suite ├── docs/ # Documentation +│ └── ASK_DO_ARCHITECTURE.md # ask --do deep dive ├── examples/ # Example scripts └── scripts/ # Utility scripts + ├── setup_ask_do.py # Full ask --do setup + ├── setup_ask_do.sh # Bash setup alternative + └── setup_ollama.py # Ollama-only setup ``` -### Background Daemon (cortexd) - -Cortex includes an optional C++ background daemon for system-level operations: - -```bash -# Install the daemon -cortex daemon install --execute - -# Check daemon status -cortex daemon ping -cortex daemon version - -# Run daemon tests (no installation required) -cortex daemon run-tests -``` - -See [daemon/README.md](daemon/README.md) for full documentation. - --- ## Safety & Security @@ -334,27 +326,16 @@ Found a vulnerability? Please report it responsibly: ## Troubleshooting

-"No API key found" - -Cortex auto-detects API keys from multiple locations. If none are found: +"ANTHROPIC_API_KEY not set" ```bash -# Option 1: Set environment variables (used immediately, no save needed) -export ANTHROPIC_API_KEY=sk-ant-your-key -cortex install nginx --dry-run - -# Option 2: Save directly to Cortex config -echo 'ANTHROPIC_API_KEY=sk-ant-your-key' > ~/.cortex/.env - -# Option 3: Use Ollama (free, local, no key needed) -export CORTEX_PROVIDER=ollama -python scripts/setup_ollama.py +# Verify .env file exists +cat .env +# Should show: ANTHROPIC_API_KEY=sk-ant-... -# Option 4: If you have Claude CLI installed, Cortex will find it automatically -# Just run: cortex install nginx --dry-run +# If missing, create it: +echo 'ANTHROPIC_API_KEY=your-actual-key' > .env ``` - -See [Zero Config API Keys](docs/ZERO_CONFIG_API_KEYS.md) for details.
@@ -414,9 +395,6 @@ pip install -e . - [x] Hardware detection (GPU/CPU/Memory) - [x] Firejail sandboxing - [x] Dry-run preview mode -- [x] Docker bind-mount permission fixer -- [x] Automatic Role Discovery (AI-driven system context sensing) -- [x] Predictive Error Prevention (pre-install compatibility checks) ### In Progress - [ ] Conflict resolution UI @@ -472,37 +450,11 @@ pip install -e ".[dev]" # Install pre-commit hooks pre-commit install -``` - -### Running Tests -**Python Tests:** - -```bash -# Run all Python tests +# Run tests pytest tests/ -v - -# Run with coverage -pytest tests/ -v --cov=cortex ``` -**Daemon Tests (C++):** - -```bash -# Build daemon with tests -cd daemon && ./scripts/build.sh Release --with-tests - -# Run all daemon tests (no daemon installation required) -cortex daemon run-tests - -# Run specific test types -cortex daemon run-tests --unit # Unit tests only -cortex daemon run-tests --integration # Integration tests only -cortex daemon run-tests -t config # Specific test -``` - -> **Note:** Daemon tests run against a static library and don't require the daemon to be installed as a systemd service. They test the code directly. - See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines. --- @@ -524,7 +476,7 @@ See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed guidelines. ## License -BUSL-1.1 (Business Source License 1.1) - Free for personal use on 1 system. See [LICENSE](LICENSE) for details. +Apache 2.0 - See [LICENSE](LICENSE) for details. --- diff --git a/cortex/ask.py b/cortex/ask.py index 6af08362c..2f9954c72 100644 --- a/cortex/ask.py +++ b/cortex/ask.py @@ -1,371 +1,539 @@ """Natural language query interface for Cortex. Handles user questions about installed packages, configurations, -and system state using LLM with semantic caching. Also provides -educational content and tracks learning progress. +and system state using an agentic LLM loop with command execution. + +The --do mode enables write and execute capabilities with user confirmation +and privilege management. """ import json -import logging import os -import platform import re -import shutil +import shlex import sqlite3 import subprocess -from datetime import datetime, timezone -from pathlib import Path +from enum import Enum from typing import Any -from cortex.config_utils import get_ollama_model - -# Module logger for debug diagnostics -logger = logging.getLogger(__name__) - -# Maximum number of tokens to request from LLM -MAX_TOKENS = 2000 - - -class SystemInfoGatherer: - """Gathers local system information for context-aware responses.""" - - @staticmethod - def get_python_version() -> str: - """Get installed Python version.""" - return platform.python_version() - - @staticmethod - def get_python_path() -> str: - """Get Python executable path.""" - import sys - - return sys.executable - - @staticmethod - def get_os_info() -> dict[str, str]: - """Get OS information.""" - return { - "system": platform.system(), - "release": platform.release(), - "version": platform.version(), - "machine": platform.machine(), - } - - @staticmethod - def get_installed_package(package: str) -> str | None: - """Check if a package is installed via apt and return version.""" - try: - result = subprocess.run( - ["dpkg-query", "-W", "-f=${Version}", package], - capture_output=True, - text=True, - timeout=5, - ) - if result.returncode == 0: - return result.stdout.strip() - except (subprocess.SubprocessError, FileNotFoundError): - # If dpkg-query is unavailable or fails, return None silently. - # We avoid user-visible logs to keep CLI output clean. - pass - return None - - @staticmethod - def get_pip_package(package: str) -> str | None: - """Check if a Python package is installed via pip.""" - try: - result = subprocess.run( - ["pip3", "show", package], - capture_output=True, - text=True, - timeout=5, - ) - if result.returncode == 0: - for line in result.stdout.splitlines(): - if line.startswith("Version:"): - return line.split(":", 1)[1].strip() - except (subprocess.SubprocessError, FileNotFoundError): - # If pip is unavailable or the command fails, return None silently. - pass - return None - - @staticmethod - def check_command_exists(cmd: str) -> bool: - """Check if a command exists in PATH.""" - return shutil.which(cmd) is not None - - @staticmethod - def get_gpu_info() -> dict[str, Any]: - """Get GPU information if available.""" - gpu_info: dict[str, Any] = {"available": False, "nvidia": False, "cuda": None} - - # Check for nvidia-smi - if shutil.which("nvidia-smi"): - gpu_info["nvidia"] = True - gpu_info["available"] = True - try: - result = subprocess.run( - ["nvidia-smi", "--query-gpu=name,driver_version", "--format=csv,noheader"], - capture_output=True, - text=True, - timeout=5, - ) - if result.returncode == 0: - gpu_info["model"] = result.stdout.strip().split(",")[0] - except (subprocess.SubprocessError, FileNotFoundError): - # If nvidia-smi is unavailable or fails, keep defaults. - pass - - # Check CUDA version - try: - result = subprocess.run( - ["nvcc", "--version"], - capture_output=True, - text=True, - timeout=5, - ) - if result.returncode == 0: - for line in result.stdout.splitlines(): - if "release" in line.lower(): - parts = line.split("release") - if len(parts) > 1: - gpu_info["cuda"] = parts[1].split(",")[0].strip() - except (subprocess.SubprocessError, FileNotFoundError): - # If nvcc is unavailable or fails, leave CUDA info unset. - pass - - return gpu_info - - def gather_context(self) -> dict[str, Any]: - """Gather relevant system context for LLM.""" - return { - "python_version": self.get_python_version(), - "python_path": self.get_python_path(), - "os": self.get_os_info(), - "gpu": self.get_gpu_info(), - } - - -class LearningTracker: - """Tracks educational topics the user has explored.""" - - _progress_file: Path | None = None - - # Patterns that indicate educational questions - EDUCATIONAL_PATTERNS = [ - r"^explain\b", - r"^teach\s+me\b", - r"^what\s+is\b", - r"^what\s+are\b", - r"^how\s+does\b", - r"^how\s+do\b", - r"^how\s+to\b", - r"\bbest\s+practices?\b", - r"^tutorial\b", - r"^guide\s+to\b", - r"^learn\s+about\b", - r"^introduction\s+to\b", - r"^basics\s+of\b", +from pydantic import BaseModel, Field, field_validator + + +class LLMResponseType(str, Enum): + """Type of response from the LLM.""" + COMMAND = "command" + ANSWER = "answer" + DO_COMMANDS = "do_commands" # For --do mode: commands that modify the system + + +class DoCommand(BaseModel): + """A single command for --do mode with explanation.""" + command: str = Field(description="The shell command to execute") + purpose: str = Field(description="Brief explanation of what this command does") + requires_sudo: bool = Field(default=False, description="Whether this command requires sudo") + + +class SystemCommand(BaseModel): + """Pydantic model for a system command to be executed. + + The LLM must return either a command to execute for data gathering, + or a final answer to the user's question. + In --do mode, it can also return a list of commands to execute. + """ + response_type: LLMResponseType = Field( + description="Whether this is a command to execute, a final answer, or do commands" + ) + command: str | None = Field( + default=None, + description="The shell command to execute (only for response_type='command')" + ) + answer: str | None = Field( + default=None, + description="The final answer to the user (only for response_type='answer')" + ) + do_commands: list[DoCommand] | None = Field( + default=None, + description="List of commands to execute (only for response_type='do_commands')" + ) + reasoning: str = Field( + default="", + description="Brief explanation of why this command/answer was chosen" + ) + + @field_validator("command") + @classmethod + def validate_command_not_empty(cls, v: str | None, info) -> str | None: + if info.data.get("response_type") == LLMResponseType.COMMAND: + if not v or not v.strip(): + raise ValueError("Command cannot be empty when response_type is 'command'") + return v + + @field_validator("answer") + @classmethod + def validate_answer_not_empty(cls, v: str | None, info) -> str | None: + if info.data.get("response_type") == LLMResponseType.ANSWER: + if not v or not v.strip(): + raise ValueError("Answer cannot be empty when response_type is 'answer'") + return v + + @field_validator("do_commands") + @classmethod + def validate_do_commands_not_empty(cls, v: list[DoCommand] | None, info) -> list[DoCommand] | None: + if info.data.get("response_type") == LLMResponseType.DO_COMMANDS: + if not v or len(v) == 0: + raise ValueError("do_commands cannot be empty when response_type is 'do_commands'") + return v + + +class CommandValidator: + """Validates and filters commands to ensure they are read-only. + + Only allows commands that fetch data, blocks any that modify the system. + """ + + # Commands that are purely read-only and safe + ALLOWED_COMMANDS: set[str] = { + # System info + "uname", "hostname", "uptime", "whoami", "id", "groups", "w", "who", "last", + "date", "cal", "timedatectl", + # File/directory listing (read-only) + "ls", "pwd", "tree", "file", "stat", "readlink", "realpath", "dirname", "basename", + "find", "locate", "which", "whereis", "type", "command", + # Text viewing (read-only) + "cat", "head", "tail", "less", "more", "wc", "nl", "strings", + # Text processing (non-modifying) + "grep", "egrep", "fgrep", "awk", "sed", "cut", "sort", "uniq", "tr", "column", + "diff", "comm", "join", "paste", "expand", "unexpand", "fold", "fmt", + # Package queries (read-only) + "dpkg-query", "dpkg", "apt-cache", "apt-mark", "apt-config", "aptitude", "apt", + "pip3", "pip", "python3", "python", "gem", "npm", "cargo", "go", + # System info commands + "lsb_release", "hostnamectl", "lscpu", "lsmem", "lsblk", "lspci", "lsusb", + "lshw", "dmidecode", "hwinfo", "inxi", + # Process/resource info + "ps", "top", "htop", "pgrep", "pidof", "pstree", "free", "vmstat", "iostat", + "mpstat", "sar", "nproc", "getconf", + # Disk/filesystem info + "df", "du", "mount", "findmnt", "blkid", "lsof", "fuser", "fdisk", + # Network info (read-only) + "ip", "ifconfig", "netstat", "ss", "route", "arp", "ping", "traceroute", + "tracepath", "nslookup", "dig", "host", "getent", "hostname", + # GPU info + "nvidia-smi", "nvcc", "rocm-smi", "clinfo", + # Environment + "env", "printenv", "echo", "printf", + # Systemd info (read-only) + "systemctl", "journalctl", "loginctl", "timedatectl", "localectl", + # Kernel/modules + "uname", "lsmod", "modinfo", "sysctl", + # Misc info + "getconf", "locale", "xdpyinfo", "xrandr", + # Container/virtualization info + "docker", "podman", "kubectl", "crictl", "nerdctl", + "lxc-ls", "virsh", "vboxmanage", + # Development tools (version checks) + "git", "node", "nodejs", "deno", "bun", "ruby", "perl", "php", "java", "javac", + "rustc", "gcc", "g++", "clang", "clang++", "make", "cmake", "ninja", "meson", + "dotnet", "mono", "swift", "kotlin", "scala", "groovy", "gradle", "mvn", "ant", + # Database clients (info/version) + "mysql", "psql", "sqlite3", "mongosh", "redis-cli", + # Web/network tools + "curl", "wget", "httpie", "openssl", "ssh", "scp", "rsync", + # Cloud CLIs + "aws", "gcloud", "az", "doctl", "linode-cli", "vultr-cli", + "terraform", "ansible", "vagrant", "packer", + # Other common tools + "jq", "yq", "xmllint", "ffmpeg", "ffprobe", "imagemagick", "convert", + "gh", "hub", "lab", # GitHub/GitLab CLIs + "snap", "flatpak", # For version/list only + "systemd-analyze", "bootctl", + } + + # Version check flags - these make ANY command safe (read-only) + VERSION_FLAGS: set[str] = { + "--version", "-v", "-V", "--help", "-h", "-help", + "version", "help", "--info", "-version", + } + + # Subcommands that are blocked for otherwise allowed commands + BLOCKED_SUBCOMMANDS: dict[str, set[str]] = { + "dpkg": {"--configure", "-i", "--install", "--remove", "-r", "--purge", "-P", + "--unpack", "--clear-avail", "--forget-old-unavail", "--update-avail", + "--merge-avail", "--set-selections", "--clear-selections"}, + "apt-mark": {"auto", "manual", "hold", "unhold", "showauto", "showmanual"}, # only show* are safe + "pip3": {"install", "uninstall", "download", "wheel", "cache"}, + "pip": {"install", "uninstall", "download", "wheel", "cache"}, + "python3": {"-c"}, # Block arbitrary code execution + "python": {"-c"}, + "npm": {"install", "uninstall", "update", "ci", "run", "exec", "init", "publish"}, + "gem": {"install", "uninstall", "update", "cleanup", "pristine"}, + "cargo": {"install", "uninstall", "build", "run", "clean", "publish"}, + "go": {"install", "get", "build", "run", "clean", "mod"}, + "systemctl": {"start", "stop", "restart", "reload", "enable", "disable", + "mask", "unmask", "edit", "set-property", "reset-failed", + "daemon-reload", "daemon-reexec", "kill", "isolate", + "set-default", "set-environment", "unset-environment"}, + "mount": {"--bind", "-o", "--move"}, # Block actual mounting + "fdisk": {"-l"}, # Only allow listing (-l), block everything else (inverted logic handled below) + "sysctl": {"-w", "--write", "-p", "--load"}, # Block writes + # Container tools - block modifying commands + "docker": {"run", "exec", "build", "push", "pull", "rm", "rmi", "kill", "stop", "start", + "restart", "pause", "unpause", "create", "commit", "tag", "load", "save", + "import", "export", "login", "logout", "network", "volume", "system", "prune"}, + "podman": {"run", "exec", "build", "push", "pull", "rm", "rmi", "kill", "stop", "start", + "restart", "pause", "unpause", "create", "commit", "tag", "load", "save", + "import", "export", "login", "logout", "network", "volume", "system", "prune"}, + "kubectl": {"apply", "create", "delete", "edit", "patch", "replace", "scale", "exec", + "run", "expose", "set", "rollout", "drain", "cordon", "uncordon", "taint"}, + # Git - block modifying commands + "git": {"push", "commit", "add", "rm", "mv", "reset", "revert", "merge", "rebase", + "checkout", "switch", "restore", "stash", "clean", "init", "clone", "pull", + "fetch", "cherry-pick", "am", "apply"}, + # Cloud CLIs - block modifying commands + "aws": {"s3", "ec2", "iam", "lambda", "rds", "ecs", "eks"}, # Block service commands (allow sts, configure list) + "gcloud": {"compute", "container", "functions", "run", "sql", "storage"}, + # Snap/Flatpak - block modifying commands + "snap": {"install", "remove", "refresh", "revert", "enable", "disable", "set", "unset"}, + "flatpak": {"install", "uninstall", "update", "repair"}, + } + + # Commands that are completely blocked (never allowed, even with --version) + BLOCKED_COMMANDS: set[str] = { + # Dangerous/destructive + "rm", "rmdir", "unlink", "shred", + "mv", "cp", "install", "mkdir", "touch", + # Editors (sed is allowed for text processing, redirections are blocked separately) + "nano", "vim", "vi", "emacs", "ed", + # Package modification (apt-get is dangerous, apt is allowed with restrictions) + "apt-get", "dpkg-reconfigure", "update-alternatives", + # System modification + "shutdown", "reboot", "poweroff", "halt", "init", "telinit", + "useradd", "userdel", "usermod", "groupadd", "groupdel", "groupmod", + "passwd", "chpasswd", "chage", + "chmod", "chown", "chgrp", "chattr", "setfacl", + "ln", "mkfifo", "mknod", + # Dangerous utilities + "dd", "mkfs", "fsck", "parted", "gdisk", "cfdisk", "sfdisk", + "kill", "killall", "pkill", + "nohup", "disown", "bg", "fg", + "crontab", "at", "batch", + "su", "sudo", "doas", "pkexec", + # Network modification + "iptables", "ip6tables", "nft", "ufw", "firewall-cmd", + "ifup", "ifdown", "dhclient", + # Shell/code execution + "bash", "sh", "zsh", "fish", "dash", "csh", "tcsh", "ksh", + "eval", "exec", "source", + "xargs", # Can be used to execute arbitrary commands + "tee", # Writes to files + } + + # Patterns that indicate dangerous operations (NOT including safe chaining) + DANGEROUS_PATTERNS: list[str] = [ + r">\s*[^|]", # Output redirection (except pipes) + r">>\s*", # Append redirection + r"<\s*", # Input redirection + r"\$\(", # Command substitution + r"`[^`]+`", # Backtick command substitution + r"\|.*(?:sh|bash|zsh|exec|eval|xargs)", # Piping to shell ] - - # Compiled patterns shared across all instances for efficiency - _compiled_patterns: list[re.Pattern[str]] = [ - re.compile(p, re.IGNORECASE) for p in EDUCATIONAL_PATTERNS + + # Chaining patterns that we'll split instead of block + CHAINING_PATTERNS: list[str] = [ + r";\s*", # Command chaining + r"\s*&&\s*", # AND chaining + r"\s*\|\|\s*", # OR chaining ] - - def __init__(self) -> None: - """Initialize the learning tracker. - - Uses pre-compiled educational patterns for efficient matching - across multiple queries. Patterns are shared as class variables - to avoid recompilation overhead. - """ - - @property - def progress_file(self) -> Path: - """Lazily compute the progress file path to avoid import-time errors.""" - if self._progress_file is None: - try: - self._progress_file = Path.home() / ".cortex" / "learning_history.json" - except RuntimeError: - # Fallback for restricted environments where home is inaccessible - import tempfile - - self._progress_file = ( - Path(tempfile.gettempdir()) / ".cortex" / "learning_history.json" - ) - return self._progress_file - - def is_educational_query(self, question: str) -> bool: - """Determine if a question is educational in nature.""" - return any(pattern.search(question) for pattern in self._compiled_patterns) - - def extract_topic(self, question: str) -> str: - """Extract the main topic from an educational question.""" - # Remove common prefixes - topic = question.lower() - prefixes_to_remove = [ - r"^explain\s+", - r"^teach\s+me\s+about\s+", - r"^teach\s+me\s+", - r"^what\s+is\s+", - r"^what\s+are\s+", - r"^how\s+does\s+", - r"^how\s+do\s+", - r"^how\s+to\s+", - r"^tutorial\s+on\s+", - r"^guide\s+to\s+", - r"^learn\s+about\s+", - r"^introduction\s+to\s+", - r"^basics\s+of\s+", - r"^best\s+practices\s+for\s+", - ] - for prefix in prefixes_to_remove: - topic = re.sub(prefix, "", topic, flags=re.IGNORECASE) - - # Clean up and truncate - topic = topic.strip("? ").strip() - - # Truncate at word boundaries to keep topic identifier meaningful - # If topic exceeds 50 chars, truncate at the last space within those 50 chars - # to preserve whole words. If the first 50 chars contain no spaces, - # keep the full 50-char prefix. - if len(topic) > 50: - truncated = topic[:50] - # Try to split at word boundary; keep full 50 chars if no spaces found - words = truncated.rsplit(" ", 1) - # Handle case where topic starts with space after prefix removal - topic = words[0] if words[0] else truncated - - return topic - - def record_topic(self, question: str) -> None: - """Record that the user explored an educational topic. - - Note: This method performs a read-modify-write cycle on the history file - without file locking. If multiple cortex ask processes run concurrently, - concurrent updates could theoretically be lost. This is acceptable for a - single-user CLI tool where concurrent invocations are rare and learning - history is non-critical, but worth noting for future enhancements. + + @classmethod + def split_chained_commands(cls, command: str) -> list[str]: + """Split a chained command into individual commands.""" + # Split by ;, &&, or || + parts = re.split(r'\s*(?:;|&&|\|\|)\s*', command) + return [p.strip() for p in parts if p.strip()] + + @classmethod + def validate_command(cls, command: str) -> tuple[bool, str]: + """Validate a command for safety. + + Args: + command: The shell command to validate + + Returns: + Tuple of (is_valid, error_message) """ - if not self.is_educational_query(question): - return - - topic = self.extract_topic(question) - if not topic: - return - - history = self._load_history() - if not isinstance(history, dict): - history = {"topics": {}, "total_queries": 0} - - # Ensure history has expected structure (defensive defaults for malformed data) - history.setdefault("topics", {}) - history.setdefault("total_queries", 0) - if not isinstance(history.get("topics"), dict): - history["topics"] = {} - - # Ensure total_queries is an integer - if not isinstance(history.get("total_queries"), int): - try: - history["total_queries"] = int(history["total_queries"]) - except (ValueError, TypeError): - history["total_queries"] = 0 - - # Use UTC timestamps for consistency and accurate sorting - utc_now = datetime.now(timezone.utc).isoformat() - - # Update or add topic - if topic in history["topics"]: - # Check if the topic data is actually a dict before accessing it - if not isinstance(history["topics"][topic], dict): - # If topic data is malformed, reinitialize it - history["topics"][topic] = { - "count": 1, - "first_accessed": utc_now, - "last_accessed": utc_now, + if not command or not command.strip(): + return False, "Empty command" + + command = command.strip() + + # Check for dangerous patterns (NOT chaining - we handle that separately) + for pattern in cls.DANGEROUS_PATTERNS: + if re.search(pattern, command): + return False, f"Command contains blocked pattern (redirections or subshells)" + + # Check if command has chaining - if so, validate each part + has_chaining = any(re.search(p, command) for p in cls.CHAINING_PATTERNS) + if has_chaining: + subcommands = cls.split_chained_commands(command) + for subcmd in subcommands: + is_valid, error = cls._validate_single_command(subcmd) + if not is_valid: + return False, f"In chained command '{subcmd}': {error}" + return True, "" + + return cls._validate_single_command(command) + + @classmethod + def _validate_single_command(cls, command: str) -> tuple[bool, str]: + """Validate a single (non-chained) command.""" + if not command or not command.strip(): + return False, "Empty command" + + command = command.strip() + + # Parse the command + try: + parts = shlex.split(command) + except ValueError as e: + return False, f"Invalid command syntax: {e}" + + if not parts: + return False, "Empty command" + + # Get base command (handle sudo prefix) + base_cmd = parts[0] + cmd_args = parts[1:] + + if base_cmd == "sudo": + return False, "sudo is not allowed - only read-only commands permitted" + + # Check if this is a version/help check - these are always safe + # Allow ANY command if it only has version/help flags + if cmd_args and all(arg in cls.VERSION_FLAGS for arg in cmd_args): + return True, "" # Safe: just checking version/help + + # Also allow if first arg is a version flag (e.g., "docker --version" or "git version") + if cmd_args and cmd_args[0] in cls.VERSION_FLAGS: + return True, "" # Safe: version/help check + + # Check if command is completely blocked (unless it's a version check) + if base_cmd in cls.BLOCKED_COMMANDS: + return False, f"Command '{base_cmd}' is not allowed - it can modify the system" + + # Check if command is in allowed list + if base_cmd not in cls.ALLOWED_COMMANDS: + return False, f"Command '{base_cmd}' is not in the allowed list of read-only commands" + + # Check for blocked subcommands + if base_cmd in cls.BLOCKED_SUBCOMMANDS: + blocked = cls.BLOCKED_SUBCOMMANDS[base_cmd] + for arg in cmd_args: + # Handle fdisk specially - only -l is allowed + if base_cmd == "fdisk": + if arg not in ["-l", "--list"]: + return False, f"fdisk only allows -l/--list for listing partitions" + elif arg in blocked: + return False, f"Subcommand '{arg}' is not allowed for '{base_cmd}' - it can modify the system" + + # Special handling for pip/pip3 - only allow show, list, freeze, check, config + if base_cmd in ["pip", "pip3"]: + if cmd_args: + allowed_pip_cmds = {"show", "list", "freeze", "check", "config", "--version", "-V", "help", "--help"} + if cmd_args[0] not in allowed_pip_cmds: + return False, f"pip command '{cmd_args[0]}' is not allowed - only read-only commands like 'show', 'list', 'freeze' are permitted" + + # Special handling for apt-mark - only showhold, showauto, showmanual + if base_cmd == "apt-mark": + if cmd_args: + allowed_apt_mark = {"showhold", "showauto", "showmanual"} + if cmd_args[0] not in allowed_apt_mark: + return False, f"apt-mark command '{cmd_args[0]}' is not allowed - only showhold, showauto, showmanual are permitted" + + # Special handling for docker/podman - allow info and list commands + if base_cmd in ["docker", "podman"]: + if cmd_args: + allowed_docker_cmds = { + "ps", "images", "info", "version", "inspect", "logs", "top", "stats", + "port", "diff", "history", "search", "events", "container", "image", + "--version", "-v", "help", "--help", + } + # Also allow "container ls", "image ls", etc. + if cmd_args[0] not in allowed_docker_cmds: + return False, f"docker command '{cmd_args[0]}' is not allowed - only read-only commands like 'ps', 'images', 'info', 'inspect', 'logs' are permitted" + # Check container/image subcommands + if cmd_args[0] in ["container", "image"] and len(cmd_args) > 1: + allowed_sub = {"ls", "list", "inspect", "history", "prune"} # prune for info only + if cmd_args[1] not in allowed_sub and cmd_args[1] not in cls.VERSION_FLAGS: + return False, f"docker {cmd_args[0]} '{cmd_args[1]}' is not allowed - only ls, list, inspect are permitted" + + # Special handling for kubectl - allow get, describe, logs + if base_cmd == "kubectl": + if cmd_args: + allowed_kubectl_cmds = { + "get", "describe", "logs", "top", "cluster-info", "config", "version", + "api-resources", "api-versions", "explain", "auth", + "--version", "-v", "help", "--help", } + if cmd_args[0] not in allowed_kubectl_cmds: + return False, f"kubectl command '{cmd_args[0]}' is not allowed - only read-only commands like 'get', 'describe', 'logs' are permitted" + + # Special handling for git - allow status, log, show, diff, branch, remote, config (get) + if base_cmd == "git": + if cmd_args: + allowed_git_cmds = { + "status", "log", "show", "diff", "branch", "remote", "tag", "describe", + "ls-files", "ls-tree", "ls-remote", "rev-parse", "rev-list", "cat-file", + "config", "shortlog", "blame", "annotate", "grep", "reflog", + "version", "--version", "-v", "help", "--help", + } + if cmd_args[0] not in allowed_git_cmds: + return False, f"git command '{cmd_args[0]}' is not allowed - only read-only commands like 'status', 'log', 'diff', 'branch' are permitted" + # Block git config --set/--add + if cmd_args[0] == "config" and any(a in cmd_args for a in ["--add", "--unset", "--remove-section", "--rename-section"]): + return False, "git config modifications are not allowed" + + # Special handling for snap/flatpak - allow list and info commands + if base_cmd == "snap": + if cmd_args: + allowed_snap = {"list", "info", "find", "version", "connections", "services", "logs", "--version", "help", "--help"} + if cmd_args[0] not in allowed_snap: + return False, f"snap command '{cmd_args[0]}' is not allowed - only list, info, find are permitted" + + if base_cmd == "flatpak": + if cmd_args: + allowed_flatpak = {"list", "info", "search", "remote-ls", "remotes", "history", "--version", "help", "--help"} + if cmd_args[0] not in allowed_flatpak: + return False, f"flatpak command '{cmd_args[0]}' is not allowed - only list, info, search are permitted" + + # Special handling for AWS CLI - allow read-only commands + if base_cmd == "aws": + if cmd_args: + allowed_aws = {"--version", "help", "--help", "sts", "configure"} + # sts get-caller-identity is safe, configure list is safe + if cmd_args[0] not in allowed_aws: + return False, f"aws command '{cmd_args[0]}' is not allowed - use 'sts get-caller-identity' or 'configure list' for read-only queries" + + # Special handling for apt - only allow list, show, search, policy, depends + if base_cmd == "apt": + if cmd_args: + allowed_apt = {"list", "show", "search", "policy", "depends", "rdepends", "madison", "--version", "help", "--help"} + if cmd_args[0] not in allowed_apt: + return False, f"apt command '{cmd_args[0]}' is not allowed - only list, show, search, policy are permitted for read-only queries" else: - try: - # Safely increment count, handle missing key - history["topics"][topic]["count"] = history["topics"][topic].get("count", 0) + 1 - history["topics"][topic]["last_accessed"] = utc_now - except (KeyError, TypeError, AttributeError): - # If topic data is malformed, reinitialize it - history["topics"][topic] = { - "count": 1, - "first_accessed": utc_now, - "last_accessed": utc_now, - } - else: - history["topics"][topic] = { - "count": 1, - "first_accessed": utc_now, - "last_accessed": utc_now, - } - - history["total_queries"] = history.get("total_queries", 0) + 1 - self._save_history(history) - - def get_history(self) -> dict[str, Any]: - """Get the learning history.""" - return self._load_history() - - def get_recent_topics(self, limit: int = 5) -> list[str]: - """Get recently explored topics.""" - history = self._load_history() - topics = history.get("topics", {}) - - # Filter out malformed entries and sort by last_accessed - valid_topics = [ - (name, data) - for name, data in topics.items() - if isinstance(data, dict) and "last_accessed" in data - ] - sorted_topics = sorted( - valid_topics, - key=lambda x: x[1].get("last_accessed", ""), - reverse=True, - ) - return [t[0] for t in sorted_topics[:limit]] - - def _load_history(self) -> dict[str, Any]: - """Load learning history from file.""" - if not self.progress_file.exists(): - return {"topics": {}, "total_queries": 0} - - try: - with open(self.progress_file, encoding="utf-8") as f: - return json.load(f) - except (json.JSONDecodeError, OSError): - return {"topics": {}, "total_queries": 0} - - def _save_history(self, history: dict[str, Any]) -> None: - """Save learning history to file. - - Silently handles save failures to keep CLI clean, but logs at debug level - for diagnostics. Failures may occur due to permission issues or disk space. + return False, "apt requires a subcommand like 'list', 'show', or 'search'" + + return True, "" + + @classmethod + def execute_command(cls, command: str, timeout: int = 10) -> tuple[bool, str, str]: + """Execute a validated command and return the result. + + For chained commands (&&, ||, ;), executes each command separately + and combines the output. + + Args: + command: The shell command to execute + timeout: Maximum execution time in seconds + + Returns: + Tuple of (success, stdout, stderr) """ + # Validate first + is_valid, error = cls.validate_command(command) + if not is_valid: + return False, "", f"Command blocked: {error}" + + # Check if this is a chained command + has_chaining = any(re.search(p, command) for p in cls.CHAINING_PATTERNS) + + if has_chaining: + # Split and execute each command separately + subcommands = cls.split_chained_commands(command) + all_stdout = [] + all_stderr = [] + overall_success = True + + for subcmd in subcommands: + try: + result = subprocess.run( + subcmd, + shell=True, + capture_output=True, + text=True, + timeout=timeout, + ) + + if result.stdout.strip(): + all_stdout.append(f"# {subcmd}\n{result.stdout.strip()}") + if result.stderr.strip(): + all_stderr.append(f"# {subcmd}\n{result.stderr.strip()}") + + if result.returncode != 0: + overall_success = False + # For && chaining, stop on first failure + if "&&" in command: + break + + except subprocess.TimeoutExpired: + all_stderr.append(f"# {subcmd}\nCommand timed out after {timeout} seconds") + overall_success = False + break + except Exception as e: + all_stderr.append(f"# {subcmd}\nExecution failed: {e}") + overall_success = False + break + + return ( + overall_success, + "\n\n".join(all_stdout), + "\n\n".join(all_stderr), + ) + + # Single command try: - self.progress_file.parent.mkdir(parents=True, exist_ok=True) - with open(self.progress_file, "w", encoding="utf-8") as f: - json.dump(history, f, indent=2) - except OSError as e: - # Log at debug level to help diagnose permission/disk issues - # without breaking CLI output or crashing the application - logger.debug( - f"Failed to save learning history to {self.progress_file}: {e}", - exc_info=False, + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=timeout, ) + return ( + result.returncode == 0, + result.stdout.strip(), + result.stderr.strip(), + ) + except subprocess.TimeoutExpired: + return False, "", f"Command timed out after {timeout} seconds" + except Exception as e: + return False, "", f"Command execution failed: {e}" class AskHandler: - """Handles natural language questions about the system.""" + """Handles natural language questions about the system using an agentic loop. + + The handler uses an iterative approach: + 1. LLM generates a read-only command to gather information + 2. Command is validated and executed + 3. Output is sent back to LLM + 4. LLM either generates another command or provides final answer + 5. Max 5 iterations before giving up + + In --do mode, the handler can execute write and modify commands with + user confirmation and privilege management. + """ + + MAX_ITERATIONS = 5 + MAX_DO_ITERATIONS = 15 # More iterations for --do mode since it's solving problems def __init__( self, api_key: str, provider: str = "claude", model: str | None = None, + debug: bool = False, + do_mode: bool = False, ): """Initialize the ask handler. @@ -373,40 +541,215 @@ def __init__( api_key: API key for the LLM provider provider: Provider name ("openai", "claude", or "ollama") model: Optional model name override + debug: Enable debug output to shell + do_mode: Enable write/execute mode with user confirmation """ self.api_key = api_key self.provider = provider.lower() self.model = model or self._default_model() - self.info_gatherer = SystemInfoGatherer() - self.learning_tracker = LearningTracker() + self.debug = debug + self.do_mode = do_mode + + # Import rich console for debug output + if self.debug: + from rich.console import Console + from rich.panel import Panel + self._console = Console() + else: + self._console = None + + # For expandable output storage + self._last_output: str | None = None + self._last_output_command: str | None = None + + # Interrupt flag - can be set externally to stop execution + self._interrupted = False + + # Initialize DoHandler for --do mode + self._do_handler = None + if self.do_mode: + try: + from cortex.do_runner import DoHandler + # Pass LLM callback so DoHandler can make LLM calls for interactive session + self._do_handler = DoHandler(llm_callback=self._call_llm_for_do) + except (ImportError, OSError, Exception) as e: + # Log error but don't fail - do mode just won't work + if self.debug and self._console: + self._console.print(f"[yellow]Warning: Could not initialize DoHandler: {e}[/yellow]") + pass # Initialize cache try: from cortex.semantic_cache import SemanticCache self.cache: SemanticCache | None = SemanticCache() - except (ImportError, OSError): + except (ImportError, OSError, sqlite3.OperationalError, Exception): self.cache = None self._initialize_client() + def interrupt(self): + """Interrupt the current operation. Call this from signal handlers.""" + self._interrupted = True + # Also interrupt the DoHandler if it exists + if self._do_handler: + self._do_handler._interrupted = True + + def reset_interrupt(self): + """Reset the interrupt flag before starting a new operation.""" + self._interrupted = False + if self._do_handler: + self._do_handler._interrupted = False + def _default_model(self) -> str: if self.provider == "openai": - return "gpt-4" + return "gpt-4o" # Use gpt-4o for 128K context elif self.provider == "claude": return "claude-sonnet-4-20250514" elif self.provider == "ollama": - return self._get_ollama_model() + return "llama3.2" elif self.provider == "fake": return "fake" - return "gpt-4" - - def _get_ollama_model(self) -> str: - """Determine which Ollama model to use. - - Delegates to the shared ``get_ollama_model()`` utility function. - """ - return get_ollama_model() + return "gpt-4o" + + def _debug_print(self, title: str, content: str, style: str = "dim") -> None: + """Print debug output if debug mode is enabled.""" + if self.debug and self._console: + from rich.panel import Panel + self._console.print(Panel(content, title=f"[bold]{title}[/bold]", style=style)) + + def _print_query_summary(self, question: str, commands_run: list[str], answer: str) -> None: + """Print a condensed summary for question queries with improved visual design.""" + if not self._console: + return + + from rich.panel import Panel + from rich.table import Table + from rich.text import Text + from rich import box + + # Clean the answer - remove any JSON/shell script that might have leaked + clean_answer = answer + import re + + # Check if answer looks like JSON or contains shell script fragments + if clean_answer.startswith('{') or '{"' in clean_answer[:100]: + # Try to extract just the answer field if present + answer_match = re.search(r'"answer"\s*:\s*"([^"]*)"', clean_answer, re.DOTALL) + if answer_match: + clean_answer = answer_match.group(1) + # Unescape common JSON escapes + clean_answer = clean_answer.replace('\\n', '\n').replace('\\"', '"') + + # Remove shell script-like content that shouldn't be in the answer + if re.search(r'^(if \[|while |for |echo \$|sed |awk |grep -)', clean_answer, re.MULTILINE): + # This looks like shell script leaked - try to extract readable parts + readable_lines = [] + for line in clean_answer.split('\n'): + # Keep lines that look like actual content, not script + if not re.match(r'^(if \[|fi$|done$|else$|then$|do$|while |for |echo \$|sed |awk )', line.strip()): + if line.strip() and not line.strip().startswith('#!'): + readable_lines.append(line) + if readable_lines: + clean_answer = '\n'.join(readable_lines[:20]) # Limit to 20 lines + + self._console.print() + + # Query section + q_display = question[:80] + "..." if len(question) > 80 else question + self._console.print(Panel( + f"[bold]{q_display}[/bold]", + title="[bold white on blue] 🔍 Query [/bold white on blue]", + title_align="left", + border_style="blue", + padding=(0, 1), + expand=False, + )) + + # Info gathered section + if commands_run: + info_table = Table( + show_header=False, + box=box.SIMPLE, + padding=(0, 1), + expand=True, + ) + info_table.add_column("", style="dim") + + for cmd in commands_run[:4]: + cmd_display = cmd[:60] + "..." if len(cmd) > 60 else cmd + info_table.add_row(f"$ {cmd_display}") + if len(commands_run) > 4: + info_table.add_row(f"[dim]... and {len(commands_run) - 4} more commands[/dim]") + + self._console.print(Panel( + info_table, + title=f"[bold] 📊 Info Gathered ({len(commands_run)} commands) [/bold]", + title_align="left", + border_style="dim", + padding=(0, 0), + )) + + # Answer section - make it prominent + if clean_answer.strip(): + # Truncate very long answers + if len(clean_answer) > 800: + display_answer = clean_answer[:800] + "\n\n[dim]... (answer truncated)[/dim]" + else: + display_answer = clean_answer + + self._console.print(Panel( + display_answer, + title="[bold white on green] 💡 Answer [/bold white on green]", + title_align="left", + border_style="green", + padding=(1, 2), + )) + + def _show_expandable_output(self, console, output: str, command: str) -> None: + """Show output with expand/collapse capability.""" + from rich.panel import Panel + from rich.text import Text + + lines = output.split('\n') + total_lines = len(lines) + + # Always show first 3 lines as preview + preview_count = 3 + + if total_lines <= preview_count + 2: + # Small output - just show it all + console.print(Panel( + output, + title=f"[dim]Output[/dim]", + title_align="left", + border_style="dim", + padding=(0, 1), + )) + return + + # Show collapsed preview with expand option + preview = '\n'.join(lines[:preview_count]) + remaining = total_lines - preview_count + + # Build the panel content + content = Text() + content.append(preview) + content.append(f"\n\n[dim]─── {remaining} more lines hidden ───[/dim]", style="dim") + + console.print(Panel( + content, + title=f"[dim]Output ({total_lines} lines)[/dim]", + subtitle="[dim italic]Type 'e' to expand[/dim italic]", + subtitle_align="right", + title_align="left", + border_style="dim", + padding=(0, 1), + )) + + # Store for potential expansion + self._last_output = output + self._last_output_command = command def _initialize_client(self): if self.provider == "openai": @@ -419,7 +762,11 @@ def _initialize_client(self): elif self.provider == "claude": try: from anthropic import Anthropic - + import logging + # Suppress noisy retry logging from anthropic client + logging.getLogger("anthropic").setLevel(logging.WARNING) + logging.getLogger("anthropic._base_client").setLevel(logging.WARNING) + self.client = Anthropic(api_key=self.api_key) except ImportError: raise ImportError("Anthropic package not installed. Run: pip install anthropic") @@ -431,162 +778,650 @@ def _initialize_client(self): else: raise ValueError(f"Unsupported provider: {self.provider}") - def _get_system_prompt(self, context: dict[str, Any]) -> str: - return f"""You are a helpful Linux system assistant and tutor. You help users with both system-specific questions AND educational queries about Linux, packages, and best practices. - -System Context: -{json.dumps(context, indent=2)} - -**Query Type Detection** - -Automatically detect the type of question and respond appropriately: - -**Educational Questions (tutorials, explanations, learning)** - -Triggered by questions like: "explain...", "teach me...", "how does X work", "what is...", "best practices for...", "tutorial on...", "learn about...", "guide to..." - -For educational questions: -1. Provide structured, tutorial-style explanations -2. Include practical code examples with proper formatting -3. Highlight best practices and common pitfalls to avoid -4. Break complex topics into digestible sections -5. Use clear section labels and bullet points for readability -6. Mention related topics the user might want to explore next -7. Tailor examples to the user's system when relevant (e.g., use apt for Debian-based systems) - -**Diagnostic Questions (system-specific, troubleshooting)** - -Triggered by questions about: current system state, "why is my...", "what packages...", "check my...", specific errors, system status - -For diagnostic questions: -1. Analyze the provided system context -2. Give specific, actionable answers -3. Be concise but informative -4. If you don't have enough information, say so clearly - -**Output Formatting Rules (CRITICAL - Follow exactly)** - -1. NEVER use markdown headings (# or ##) - they render poorly in terminals -2. For section titles, use **Bold Text** on its own line instead -3. Use bullet points (-) for lists -4. Use numbered lists (1. 2. 3.) for sequential steps -5. Use triple backticks with language name for code blocks (```bash) -6. Use *italic* sparingly for emphasis -7. Keep lines under 100 characters when possible -8. Add blank lines between sections for readability -9. For tables, use simple text formatting, not markdown tables - -Example of good formatting: -**Installation Steps** - -1. Update your package list: -```bash -sudo apt update -``` - -2. Install the package: -```bash -sudo apt install nginx -``` - -**Key Points** -- Point one here -- Point two here""" - - def _call_openai(self, question: str, system_prompt: str) -> str: - response = self.client.chat.completions.create( - model=self.model, - messages=[ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": question}, - ], - temperature=0.3, - max_tokens=MAX_TOKENS, - ) - # Defensive: content may be None or choices could be empty in edge cases + def _get_system_prompt(self) -> str: + if self.do_mode: + return self._get_do_mode_system_prompt() + return self._get_read_only_system_prompt() + + def _get_read_only_system_prompt(self) -> str: + return """You are a Linux system assistant that answers questions by executing read-only shell commands. + +SCOPE RESTRICTION - VERY IMPORTANT: +You are ONLY a Linux/system administration assistant. You can ONLY help with: +- Linux system administration, configuration, and troubleshooting +- Package management (apt, snap, flatpak, pip, npm, etc.) +- Service management (systemctl, docker, etc.) +- File system operations and permissions +- Networking and security +- Development environment setup +- Server configuration + +If the user asks about anything unrelated to Linux/technical topics (social chat, personal advice, +creative writing, general knowledge questions not related to their system, etc.), you MUST respond with: +{ + "response_type": "answer", + "answer": "I'm Cortex, a Linux system assistant. I can only help with Linux system administration, package management, and technical tasks on your machine. I can't help with non-technical topics. Is there something I can help you with on your system?", + "reasoning": "User query is outside my scope as a Linux system assistant" +} + +Your task is to help answer the user's question about their system by: +1. Generating shell commands to gather the needed information +2. Analyzing the command output +3. Either generating another command if more info is needed, or providing the final answer + +IMPORTANT RULES: +- You can ONLY use READ-ONLY commands that fetch data (no modifications allowed) +- Allowed commands include: cat, ls, grep, find, dpkg-query, apt-cache, pip3 show/list, ps, df, free, uname, lscpu, etc. +- NEVER use commands that modify the system (rm, mv, cp, apt install, pip install, etc.) +- NEVER use sudo +- NEVER use output redirection (>, >>), command chaining (;, &&, ||), or command substitution ($(), ``) + +CRITICAL: You must respond with ONLY a JSON object - no other text before or after. +Do NOT include explanations outside the JSON. Put all reasoning inside the "reasoning" field. + +JSON format: +{ + "response_type": "command" | "answer", + "command": "" (only if response_type is "command"), + "answer": "" (only if response_type is "answer"), + "reasoning": "" +} + +Examples of ALLOWED commands: +- cat /etc/os-release +- dpkg-query -W -f='${Version}' python3 +- pip3 show numpy +- pip3 list +- ls -la /usr/bin/python* +- uname -a +- lscpu +- free -h +- df -h +- ps aux | grep python +- apt-cache show nginx +- systemctl status nginx (read-only status check) + +Examples of BLOCKED commands (NEVER use these): +- sudo anything +- apt install/remove +- pip install/uninstall +- rm, mv, cp, mkdir, touch +- echo "text" > file +- command1 && command2""" + + def _get_do_mode_system_prompt(self) -> str: + return """You are a Linux system assistant that can READ, WRITE, and EXECUTE commands to solve problems. + +SCOPE RESTRICTION - VERY IMPORTANT: +You are ONLY a Linux/system administration assistant. You can ONLY help with: +- Linux system administration, configuration, and troubleshooting +- Package management (apt, snap, flatpak, pip, npm, etc.) +- Service management (systemctl, docker, etc.) +- File system operations and permissions +- Networking and security +- Development environment setup +- Server configuration + +If the user asks about anything unrelated to Linux/technical topics (social chat, personal advice, +creative writing, general knowledge questions not related to their system, etc.), you MUST respond with: +{ + "response_type": "answer", + "answer": "I'm Cortex, a Linux system assistant. I can only help with Linux system administration, package management, and technical tasks on your machine. I can't help with non-technical topics. What would you like me to do on your system?", + "reasoning": "User query is outside my scope as a Linux system assistant" +} + +You are in DO MODE - you have the ability to make changes to the system to solve the user's problem. + +Your task is to: +1. Understand the user's problem or request +2. Quickly gather essential information (1-3 read commands MAX) +3. Plan and propose a solution with specific commands using "do_commands" +4. Execute the solution with the user's permission +5. Handle failures gracefully with repair attempts + +CRITICAL WORKFLOW RULES: +- DO NOT spend more than 3-4 iterations gathering information +- After gathering basic system info (OS, existing packages), IMMEDIATELY propose do_commands +- If you know how to install/configure something, propose do_commands right away +- Be action-oriented: the user wants you to DO something, not just analyze +- You can always gather more info AFTER the user approves the commands if needed + +WORKFLOW: +1. Quickly gather essential info (OS version, if package exists) - MAX 2-3 commands +2. IMMEDIATELY propose "do_commands" with your installation/setup plan +3. The do_commands will be shown to the user for approval before execution +4. Commands are executed using a TASK TREE system with auto-repair capabilities: + - If a command fails, Cortex will automatically diagnose the error + - Repair sub-tasks may be spawned and executed with additional permission requests + - Terminal monitoring is available during manual intervention +5. After execution, verify the changes worked and provide a final "answer" +6. If execution_failures appear in history, propose alternative solutions + +CRITICAL: You must respond with ONLY a JSON object - no other text before or after. +Do NOT include explanations outside the JSON. Put all reasoning inside the "reasoning" field. + +For gathering information (read-only): +{ + "response_type": "command", + "command": "", + "reasoning": "" +} + +For proposing changes (write/execute): +{ + "response_type": "do_commands", + "do_commands": [ + { + "command": "", + "purpose": "", + "requires_sudo": true/false + } + ], + "reasoning": "" +} + +For final answer: +{ + "response_type": "answer", + "answer": "", + "reasoning": "" +} + +For proposing repair commands after failures: +{ + "response_type": "do_commands", + "do_commands": [ + { + "command": "", + "purpose": "", + "requires_sudo": true/false + } + ], + "reasoning": "" +} + +HANDLING FAILURES: +- When you see "execution_failures" in history, analyze the error messages carefully +- Common errors and their fixes: + * "Permission denied" → Add sudo, check ownership, or run with elevated privileges + * "No such file or directory" → Create parent directories first (mkdir -p) + * "Command not found" → Install the package (apt install) + * "Service not running" → Start the service first (systemctl start) + * "Configuration syntax error" → Read config file, find and fix the error +- Always provide detailed reasoning when proposing repairs +- If the original approach won't work, suggest an alternative approach +- You may request multiple rounds of commands to diagnose and fix issues + +IMPORTANT RULES: +- BE ACTION-ORIENTED: After 2-3 info commands, propose do_commands immediately +- DO NOT over-analyze: You have enough info once you know the OS and if basic packages exist +- For installation tasks: Propose the installation commands right away +- For do_commands, each command should be atomic and specific +- Always include a clear purpose for each command +- Mark requires_sudo: true if the command needs root privileges +- Be careful with destructive commands - always explain what they do +- After making changes, verify they worked before giving final answer +- If something fails, diagnose and try alternative approaches +- Multiple permission requests may be made during a single session for repair commands + +ANTI-PATTERNS TO AVOID: +- Don't keep gathering info for more than 3 iterations +- Don't check every possible thing before proposing a solution +- Don't be overly cautious - the user wants action +- If you know how to solve the problem, propose do_commands NOW + +PROTECTED PATHS (will require user authentication): +- /etc/* - System configuration +- /boot/* - Boot configuration +- /usr/bin, /usr/sbin, /sbin, /bin - System binaries +- /root - Root home directory +- /var/log, /var/lib/apt - System data + +COMMAND RESTRICTIONS: +- Use SINGLE commands only - no chaining with &&, ||, or ; +- Use pipes (|) sparingly and only for filtering +- No output redirection (>, >>) in read commands +- If you need multiple commands, return them separately in sequence + +Examples of READ commands: +- cat /etc/nginx/nginx.conf +- ls -la /var/log/ +- systemctl status nginx +- grep -r "error" /var/log/syslog +- dpkg -l | grep nginx +- apt list --installed | grep docker (use apt list, not apt install) + +Examples of WRITE/EXECUTE commands (use with do_commands): +- echo 'server_name example.com;' >> /etc/nginx/sites-available/default +- systemctl restart nginx +- apt install -y nginx +- chmod 755 /var/www/html +- mkdir -p /etc/myapp +- cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.backup + +Examples of REPAIR commands after failures: +- sudo chown -R $USER:$USER /path/to/file # Fix ownership issues +- sudo mkdir -p /path/to/directory # Create missing directories +- sudo apt install -y missing-package # Install missing dependencies +- journalctl -u service-name -n 50 --no-pager # Diagnose service issues""" + + # Maximum characters of command output to include in history + MAX_OUTPUT_CHARS = 2000 + + def _truncate_output(self, output: str) -> str: + """Truncate command output to avoid context length issues.""" + if len(output) <= self.MAX_OUTPUT_CHARS: + return output + # Keep first and last portions + half = self.MAX_OUTPUT_CHARS // 2 + return f"{output[:half]}\n\n... [truncated {len(output) - self.MAX_OUTPUT_CHARS} chars] ...\n\n{output[-half:]}" + + def _build_iteration_prompt( + self, + question: str, + history: list[dict[str, str]] + ) -> str: + """Build the prompt for the current iteration.""" + prompt = f"User Question: {question}\n\n" + + if history: + prompt += "Previous commands and results:\n" + for i, entry in enumerate(history, 1): + # Handle execution_failures context from do_commands + if entry.get("type") == "execution_failures": + prompt += f"\n--- EXECUTION FAILURES (Need Repair) ---\n" + prompt += f"Message: {entry.get('message', 'Commands failed')}\n" + for fail in entry.get("failures", []): + prompt += f"\nFailed Command: {fail.get('command', 'unknown')}\n" + prompt += f"Purpose: {fail.get('purpose', 'unknown')}\n" + prompt += f"Error: {fail.get('error', 'unknown')}\n" + prompt += "\nPlease analyze these failures and propose repair commands or alternative approaches.\n" + continue + + # Handle regular commands + prompt += f"\n--- Attempt {i} ---\n" + + # Check if this is a do_command execution result + if "executed_by" in entry: + prompt += f"Command (executed by {entry['executed_by']}): {entry.get('command', 'unknown')}\n" + prompt += f"Purpose: {entry.get('purpose', 'unknown')}\n" + if entry.get('success'): + truncated_output = self._truncate_output(entry.get('output', '')) + prompt += f"Status: SUCCESS\nOutput:\n{truncated_output}\n" + else: + prompt += f"Status: FAILED\nError: {entry.get('error', 'unknown')}\n" + else: + prompt += f"Command: {entry.get('command', 'unknown')}\n" + if entry.get('success'): + truncated_output = self._truncate_output(entry.get('output', '')) + prompt += f"Output:\n{truncated_output}\n" + else: + prompt += f"Error: {entry.get('error', 'unknown')}\n" + + prompt += "\n" + + # Check if there were recent failures + has_failures = any( + e.get("type") == "execution_failures" or + (e.get("executed_by") and not e.get("success")) + for e in history[-5:] # Check last 5 entries + ) + + if has_failures: + prompt += "IMPORTANT: There were command failures. Please:\n" + prompt += "1. Analyze the error messages to understand what went wrong\n" + prompt += "2. Propose repair commands using 'do_commands' response type\n" + prompt += "3. Or suggest an alternative approach if the original won't work\n" + else: + prompt += "Based on the above results, either provide another command to gather more information, or provide the final answer.\n" + else: + prompt += "Generate a shell command to gather the information needed to answer this question.\n" + + prompt += "\nRespond with a JSON object as specified in the system prompt." + return prompt + + def _clean_llm_response(self, text: str) -> str: + """Clean raw LLM response to prevent JSON from being displayed to user. + + Extracts meaningful content like reasoning or answer from raw JSON, + or returns a generic error message if the response is pure JSON. + + NOTE: This is only called as a fallback when JSON parsing fails. + We should NOT return placeholder messages for valid response types. + """ + import re + + # If it looks like pure JSON, don't show it to user + text = text.strip() + + # Check for partial JSON (starts with ], }, or other JSON fragments) + if text.startswith((']', '},', ',"', '"response_type"', '"do_commands"', '"command"', '"reasoning"')): + return "" # Return empty so loop continues + + if text.startswith('{') and text.endswith('}'): + # Try to extract useful fields + try: + data = json.loads(text) + # Try to get meaningful content in order of preference + if data.get("answer"): + return data["answer"] + if data.get("reasoning") and data.get("response_type") == "answer": + # Only use reasoning if it's an answer type + reasoning = data["reasoning"] + if not any(p in reasoning for p in ['"command":', '"do_commands":', '"requires_sudo":']): + return f"Analysis: {reasoning}" + # For do_commands or command types, return empty to let parsing retry + if data.get("do_commands") or data.get("command"): + return "" # Return empty so the proper parsing can happen + # Pure JSON with no useful fields + return "" + except json.JSONDecodeError: + pass + + # Check for JSON-like patterns in the text + json_patterns = [ + r'"response_type"\s*:\s*"', + r'"do_commands"\s*:\s*\[', + r'"command"\s*:\s*"', + r'"requires_sudo"\s*:\s*', + r'\[\s*\{', # Start of array of objects + r'\}\s*,\s*\{', # Object separator + r'\]\s*,\s*"', # End of array followed by key + ] + + # If text contains raw JSON patterns, try to extract non-JSON parts + has_json_patterns = any(re.search(p, text) for p in json_patterns) + if has_json_patterns: + # Try to find text before or after JSON + parts = re.split(r'\{[\s\S]*"response_type"[\s\S]*\}', text) + clean_parts = [p.strip() for p in parts if p.strip() and len(p.strip()) > 20] + if clean_parts: + # Filter out parts that still look like JSON + clean_parts = [p for p in clean_parts if not any(j in p for j in ['":', '",', '{}', '[]'])] + if clean_parts: + return " ".join(clean_parts) + + # No good text found, return generic message + return "I'm processing your request. Please wait for the proper output." + + # Text doesn't look like JSON, return as-is + return text + + def _parse_llm_response(self, response_text: str) -> SystemCommand: + """Parse the LLM response into a SystemCommand object.""" + # Try to extract JSON from the response + original_text = response_text.strip() + response_text = original_text + + # Handle markdown code blocks + if "```json" in response_text: + response_text = response_text.split("```json")[1].split("```")[0].strip() + elif "```" in response_text: + parts = response_text.split("```") + if len(parts) >= 2: + response_text = parts[1].split("```")[0].strip() + + # Try direct JSON parsing first try: - content = response.choices[0].message.content or "" - except (IndexError, AttributeError): - content = "" - return content.strip() - - def _call_claude(self, question: str, system_prompt: str) -> str: - response = self.client.messages.create( - model=self.model, - max_tokens=MAX_TOKENS, - temperature=0.3, - system=system_prompt, - messages=[{"role": "user", "content": question}], - ) - # Defensive: content list or text may be missing/None + data = json.loads(response_text) + except json.JSONDecodeError: + # Try to find JSON object in the text (LLM sometimes adds prose before/after) + json_match = re.search(r'\{[\s\S]*"response_type"[\s\S]*\}', original_text) + if json_match: + try: + # Find the complete JSON object by matching braces + json_str = json_match.group() + # Balance braces to get complete JSON + brace_count = 0 + json_end = 0 + for i, char in enumerate(json_str): + if char == '{': + brace_count += 1 + elif char == '}': + brace_count -= 1 + if brace_count == 0: + json_end = i + 1 + break + + if json_end > 0: + json_str = json_str[:json_end] + + data = json.loads(json_str) + except json.JSONDecodeError: + # If still fails, don't show raw JSON to user + clean_answer = self._clean_llm_response(original_text) + return SystemCommand( + response_type=LLMResponseType.ANSWER, + answer=clean_answer, + reasoning="Could not parse structured response, treating as direct answer" + ) + else: + # No JSON found, clean up before treating as direct answer + clean_answer = self._clean_llm_response(original_text) + return SystemCommand( + response_type=LLMResponseType.ANSWER, + answer=clean_answer, + reasoning="No JSON structure found, treating as direct answer" + ) + try: - text = getattr(response.content[0], "text", None) or "" - except (IndexError, AttributeError): - text = "" - return text.strip() + # Handle do_commands - convert dict list to DoCommand objects + if data.get("response_type") == "do_commands" and "do_commands" in data: + data["do_commands"] = [ + DoCommand(**cmd) if isinstance(cmd, dict) else cmd + for cmd in data["do_commands"] + ] + + return SystemCommand(**data) + except Exception as e: + # If SystemCommand creation fails, don't show raw JSON to user + clean_answer = self._clean_llm_response(original_text) + return SystemCommand( + response_type=LLMResponseType.ANSWER, + answer=clean_answer, + reasoning=f"Failed to create SystemCommand: {e}" + ) - def _call_ollama(self, question: str, system_prompt: str) -> str: - import urllib.error - import urllib.request + def _call_llm(self, system_prompt: str, user_prompt: str) -> str: + """Call the LLM and return the response text.""" + # Check for interrupt before making API call + if self._interrupted: + raise InterruptedError("Operation interrupted by user") + + if self.provider == "openai": + response = self.client.chat.completions.create( + model=self.model, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + temperature=0.3, + max_tokens=1000, + ) + try: + content = response.choices[0].message.content or "" + except (IndexError, AttributeError): + content = "" + return content.strip() + + elif self.provider == "claude": + response = self.client.messages.create( + model=self.model, + max_tokens=1000, + temperature=0.3, + system=system_prompt, + messages=[{"role": "user", "content": user_prompt}], + ) + try: + text = getattr(response.content[0], "text", None) or "" + except (IndexError, AttributeError): + text = "" + return text.strip() + + elif self.provider == "ollama": + import urllib.request - url = f"{self.ollama_url}/api/generate" - prompt = f"{system_prompt}\n\nQuestion: {question}" + url = f"{self.ollama_url}/api/generate" + prompt = f"{system_prompt}\n\n{user_prompt}" - data = json.dumps( - { + data = json.dumps({ "model": self.model, "prompt": prompt, "stream": False, - "options": {"temperature": 0.3, "num_predict": MAX_TOKENS}, + "options": {"temperature": 0.3}, + }).encode("utf-8") + + req = urllib.request.Request( + url, data=data, headers={"Content-Type": "application/json"} + ) + + with urllib.request.urlopen(req, timeout=60) as response: + result = json.loads(response.read().decode("utf-8")) + return result.get("response", "").strip() + + elif self.provider == "fake": + # For testing - return a simple answer + fake_response = os.environ.get("CORTEX_FAKE_RESPONSE", "") + if fake_response: + return fake_response + return json.dumps({ + "response_type": "answer", + "answer": "Test mode response", + "reasoning": "Fake provider for testing" + }) + else: + raise ValueError(f"Unsupported provider: {self.provider}") + + def _call_llm_for_do(self, user_request: str, context: dict | None = None) -> dict: + """Call LLM to process a natural language request for the interactive session. + + This is passed to DoHandler as a callback so it can make LLM calls + during the interactive session. + + Args: + user_request: The user's natural language request + context: Optional context dict with executed_commands, session_actions, etc. + + Returns: + Dict with either: + - {"response_type": "do_commands", "do_commands": [...], "reasoning": "..."} + - {"response_type": "answer", "answer": "...", "reasoning": "..."} + - {"response_type": "command", "command": "...", "reasoning": "..."} + """ + context = context or {} + + system_prompt = """You are a Linux system assistant in an interactive session. +The user has just completed some tasks and now wants to do something else. + +SCOPE RESTRICTION: +You can ONLY help with Linux/technical topics. If the user asks about anything unrelated +(social chat, personal advice, general knowledge, etc.), respond with: +{ + "response_type": "answer", + "answer": "I'm Cortex, a Linux system assistant. I can only help with Linux system administration and technical tasks. What would you like me to do on your system?", + "reasoning": "User query is outside my scope" +} + +Based on their request, decide what to do: +1. If they want to EXECUTE commands (install, configure, start, etc.), respond with do_commands +2. If they want INFORMATION (show, explain, how to), respond with an answer +3. If they want to RUN a single read-only command, respond with command + +CRITICAL: Respond with ONLY a JSON object - no other text. + +For executing commands: +{ + "response_type": "do_commands", + "do_commands": [ + {"command": "...", "purpose": "...", "requires_sudo": true/false} + ], + "reasoning": "..." +} + +For providing information: +{ + "response_type": "answer", + "answer": "...", + "reasoning": "..." +} + +For running a read-only command: +{ + "response_type": "command", + "command": "...", + "reasoning": "..." +} +""" + + # Build context-aware prompt + user_prompt = f"Context:\n" + if context.get("original_query"): + user_prompt += f"- Original task: {context['original_query']}\n" + if context.get("executed_commands"): + user_prompt += f"- Commands already executed: {', '.join(context['executed_commands'][:5])}\n" + if context.get("session_actions"): + user_prompt += f"- Actions in this session: {', '.join(context['session_actions'][:3])}\n" + + user_prompt += f"\nUser request: {user_request}\n" + user_prompt += "\nRespond with a JSON object." + + try: + response_text = self._call_llm(system_prompt, user_prompt) + + # Parse the response + parsed = self._parse_llm_response(response_text) + + # Convert to dict + result = { + "response_type": parsed.response_type.value, + "reasoning": parsed.reasoning, } - ).encode("utf-8") - - req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"}) - - with urllib.request.urlopen(req, timeout=60) as response: - result = json.loads(response.read().decode("utf-8")) - return result.get("response", "").strip() - - def _call_fake(self, question: str, system_prompt: str) -> str: - """Return predefined fake response for testing.""" - fake_response = os.environ.get("CORTEX_FAKE_RESPONSE", "") - if fake_response: - return fake_response - # Default fake responses for common questions - q_lower = question.lower() - if "python" in q_lower and "version" in q_lower: - return f"You have Python {platform.python_version()} installed." - return "I cannot answer that question in test mode." - - def ask(self, question: str, system_prompt: str | None = None) -> str: + + if parsed.response_type == LLMResponseType.DO_COMMANDS and parsed.do_commands: + result["do_commands"] = [ + {"command": cmd.command, "purpose": cmd.purpose, "requires_sudo": cmd.requires_sudo} + for cmd in parsed.do_commands + ] + elif parsed.response_type == LLMResponseType.COMMAND and parsed.command: + result["command"] = parsed.command + elif parsed.response_type == LLMResponseType.ANSWER and parsed.answer: + result["answer"] = parsed.answer + + return result + + except Exception as e: + return { + "response_type": "error", + "error": str(e), + } + + def ask(self, question: str) -> str: """Ask a natural language question about the system. + Uses an agentic loop to execute read-only commands and gather information + to answer the user's question. + + In --do mode, can also execute write/modify commands with user confirmation. + Args: question: Natural language question - system_prompt: Optional override for the system prompt Returns: Human-readable answer string Raises: ValueError: If question is empty - RuntimeError: If offline and no cached response exists + RuntimeError: If LLM API call fails """ if not question or not question.strip(): raise ValueError("Question cannot be empty") question = question.strip() - - # Use provided system prompt or generate default - if system_prompt is None: - context = self.info_gatherer.gather_context() - system_prompt = self._get_system_prompt(context) - - # Cache lookup uses both question and system context (via system_prompt) for system-specific answers - cache_key = f"ask:{question}" - - # Try cache first - if self.cache is not None: + system_prompt = self._get_system_prompt() + + # Don't cache in do_mode (each run is unique) + cache_key = f"ask:v2:{question}" + if self.cache is not None and not self.do_mode: cached = self.cache.get_commands( prompt=cache_key, provider=self.provider, @@ -594,58 +1429,394 @@ def ask(self, question: str, system_prompt: str | None = None) -> str: system_prompt=system_prompt, ) if cached is not None and len(cached) > 0: - # Track topic access even for cached responses - self.learning_tracker.record_topic(question) return cached[0] - # Call LLM - try: - if self.provider == "openai": - answer = self._call_openai(question, system_prompt) - elif self.provider == "claude": - answer = self._call_claude(question, system_prompt) - elif self.provider == "ollama": - answer = self._call_ollama(question, system_prompt) - elif self.provider == "fake": - answer = self._call_fake(question, system_prompt) - else: - raise ValueError(f"Unsupported provider: {self.provider}") - except Exception as e: - raise RuntimeError(f"LLM API call failed: {str(e)}") - - # Cache the response silently - if self.cache is not None and answer: + # Agentic loop + history: list[dict[str, Any]] = [] + tried_commands: list[str] = [] + max_iterations = self.MAX_DO_ITERATIONS if self.do_mode else self.MAX_ITERATIONS + + if self.debug: + mode_str = "[DO MODE]" if self.do_mode else "" + self._debug_print("Ask Query", f"{mode_str} Question: {question}", style="cyan") + + # Import console for progress output + from rich.console import Console + loop_console = Console() + + for iteration in range(max_iterations): + # Check for interrupt at start of each iteration + if self._interrupted: + self._interrupted = False # Reset for next request + return "Operation interrupted by user." + + if self.debug: + self._debug_print( + f"Iteration {iteration + 1}/{max_iterations}", + f"Calling LLM ({self.provider}/{self.model})...", + style="blue" + ) + + # Show progress to user (even without --debug) + if self.do_mode and iteration > 0: + from rich.panel import Panel + loop_console.print() + loop_console.print(Panel( + f"[bold cyan]Analyzing results...[/bold cyan] [dim]Step {iteration + 1}[/dim]", + border_style="dim cyan", + padding=(0, 1), + expand=False, + )) + + # Build prompt with history + user_prompt = self._build_iteration_prompt(question, history) + + # Call LLM try: - self.cache.put_commands( - prompt=cache_key, - provider=self.provider, - model=self.model, - system_prompt=system_prompt, - commands=[answer], + response_text = self._call_llm(system_prompt, user_prompt) + # Check for interrupt after LLM call + if self._interrupted: + self._interrupted = False + return "Operation interrupted by user." + except InterruptedError: + # Explicitly interrupted + self._interrupted = False + return "Operation interrupted by user." + except Exception as e: + if self._interrupted: + self._interrupted = False + return "Operation interrupted by user." + raise RuntimeError(f"LLM API call failed: {str(e)}") + + if self.debug: + self._debug_print("LLM Raw Response", response_text[:500] + ("..." if len(response_text) > 500 else ""), style="dim") + + # Parse response + parsed = self._parse_llm_response(response_text) + + if self.debug: + self._debug_print( + "LLM Parsed Response", + f"Type: {parsed.response_type.value}\n" + f"Reasoning: {parsed.reasoning}\n" + f"Command: {parsed.command or 'N/A'}\n" + f"Do Commands: {len(parsed.do_commands) if parsed.do_commands else 0}\n" + f"Answer: {(parsed.answer[:100] + '...') if parsed.answer and len(parsed.answer) > 100 else parsed.answer or 'N/A'}", + style="yellow" ) - except (OSError, sqlite3.Error): - pass # Silently fail cache writes - - # Track educational topics for learning history - self.learning_tracker.record_topic(question) - - return answer - - def get_learning_history(self) -> dict[str, Any]: - """Get the user's learning history. - - Returns: - Dictionary with topics explored and statistics - """ - return self.learning_tracker.get_history() - - def get_recent_topics(self, limit: int = 5) -> list[str]: - """Get recently explored educational topics. - - Args: - limit: Maximum number of topics to return - + + # Show what the LLM decided to do + if self.do_mode and not self.debug: + from rich.panel import Panel + if parsed.response_type == LLMResponseType.COMMAND and parsed.command: + loop_console.print(Panel( + f"[bold]🔍 Gathering info[/bold]\n[cyan]{parsed.command}[/cyan]", + border_style="blue", + padding=(0, 1), + expand=False, + )) + elif parsed.response_type == LLMResponseType.DO_COMMANDS and parsed.do_commands: + loop_console.print(Panel( + f"[bold green]📋 Ready to execute[/bold green] [white]{len(parsed.do_commands)} command(s)[/white]", + border_style="green", + padding=(0, 1), + expand=False, + )) + elif parsed.response_type == LLMResponseType.ANSWER and parsed.answer: + pass # Will be handled below + else: + # LLM returned an unexpected or empty response + loop_console.print(f"[dim yellow]⏳ Waiting for LLM to propose commands...[/dim yellow]") + + # If LLM provides a final answer, return it + if parsed.response_type == LLMResponseType.ANSWER: + answer = parsed.answer or "" + + # Skip empty answers (parsing fallback that should continue loop) + if not answer.strip(): + if self.do_mode: + loop_console.print(f"[dim] (waiting for LLM to propose commands...)[/dim]") + continue + + if self.debug: + self._debug_print("Final Answer", answer, style="green") + + # Cache the response (not in do_mode) + if self.cache is not None and answer and not self.do_mode: + try: + self.cache.put_commands( + prompt=cache_key, + provider=self.provider, + model=self.model, + system_prompt=system_prompt, + commands=[answer], + ) + except (OSError, sqlite3.Error): + pass + + # Print condensed summary for questions + self._print_query_summary(question, tried_commands, answer) + + return answer + + # Handle do_commands in --do mode + if parsed.response_type == LLMResponseType.DO_COMMANDS and self.do_mode: + if not parsed.do_commands: + # LLM said do_commands but provided none - ask it to try again + loop_console.print(f"[yellow]⚠ LLM response incomplete, retrying...[/yellow]") + history.append({ + "type": "error", + "message": "Response contained no commands. Please provide specific commands to execute.", + }) + continue + + result = self._handle_do_commands(parsed, question, history) + if result is not None: + # Result is either a completion message or None (continue loop) + return result + + # LLM wants to execute a read-only command + if parsed.command: + command = parsed.command + tried_commands.append(command) + + if self.debug: + self._debug_print("Executing Command", f"$ {command}", style="magenta") + + # Validate and execute the command + success, stdout, stderr = CommandValidator.execute_command(command) + + # Show execution result to user with expandable output + if self.do_mode and not self.debug: + if success: + output_lines = len(stdout.split('\n')) if stdout else 0 + loop_console.print(f"[green] ✓ Got {output_lines} lines of output[/green]") + + # Show expandable output + if stdout and output_lines > 0: + self._show_expandable_output(loop_console, stdout, command) + else: + loop_console.print(f"[yellow] ⚠ Command failed: {stderr[:100]}[/yellow]") + + if self.debug: + if success: + output_preview = stdout[:1000] + ("..." if len(stdout) > 1000 else "") if stdout else "(empty output)" + self._debug_print("Command Output (SUCCESS)", output_preview, style="green") + else: + self._debug_print("Command Output (FAILED)", f"Error: {stderr}", style="red") + + history.append({ + "command": command, + "success": success, + "output": stdout if success else "", + "error": stderr if not success else "", + }) + continue # Continue to next iteration with new info + + # If we get here, no valid action was taken + # This means LLM returned something we couldn't use + if self.do_mode and not self.debug: + if parsed.reasoning: + # Show reasoning if available + loop_console.print(f"[dim] LLM: {parsed.reasoning[:100]}{'...' if len(parsed.reasoning) > 100 else ''}[/dim]") + + # Max iterations reached + if self.do_mode: + if tried_commands: + commands_list = "\n".join(f" - {cmd}" for cmd in tried_commands) + result = f"The LLM gathered information but didn't propose any commands to execute.\n\nInfo gathered with:\n{commands_list}\n\nTry being more specific about what you want to do." + else: + result = "The LLM couldn't determine what commands to run. Try rephrasing your request with more specific details." + + loop_console.print(f"[yellow]⚠ {result}[/yellow]") + else: + commands_list = "\n".join(f" - {cmd}" for cmd in tried_commands) + result = f"Could not find an answer after {max_iterations} attempts.\n\nTried commands:\n{commands_list}" + + if self.debug: + self._debug_print("Max Iterations Reached", result, style="red") + + return result + + def _handle_do_commands( + self, + parsed: SystemCommand, + question: str, + history: list[dict[str, Any]] + ) -> str | None: + """Handle do_commands response type - execute with user confirmation. + + Uses task tree execution for advanced auto-repair capabilities: + - Spawns repair sub-tasks when commands fail + - Requests additional permissions during execution + - Monitors terminals during manual intervention + - Provides detailed failure reasoning + Returns: - List of topic strings + Result string if completed, None if should continue loop, + or "USER_DECLINED:..." if user declined. """ - return self.learning_tracker.get_recent_topics(limit) + if not self._do_handler or not parsed.do_commands: + return None + + from rich.console import Console + console = Console() + + # Prepare commands for analysis + commands = [ + (cmd.command, cmd.purpose) for cmd in parsed.do_commands + ] + + # Analyze for protected paths + analyzed = self._do_handler.analyze_commands_for_protected_paths(commands) + + # Show reasoning + console.print() + console.print(f"[bold cyan]🤖 Cortex Analysis:[/bold cyan] {parsed.reasoning}") + console.print() + + # Show task tree preview + console.print("[dim]📋 Planned tasks:[/dim]") + for i, (cmd, purpose, protected) in enumerate(analyzed, 1): + protected_note = f" [yellow](protected: {', '.join(protected)})[/yellow]" if protected else "" + console.print(f"[dim] {i}. {cmd[:60]}...{protected_note}[/dim]") + console.print() + + # Request user confirmation + if self._do_handler.request_user_confirmation(analyzed): + # User approved - execute using task tree for better error handling + run = self._do_handler.execute_with_task_tree(analyzed, question) + + # Add execution results to history + for cmd_log in run.commands: + history.append({ + "command": cmd_log.command, + "success": cmd_log.status.value == "success", + "output": cmd_log.output, + "error": cmd_log.error, + "purpose": cmd_log.purpose, + "executed_by": "cortex" if "Manual execution" not in (cmd_log.purpose or "") else "user_manual", + }) + + # Check if any commands were completed manually during execution + manual_completed = self._do_handler.get_completed_manual_commands() + if manual_completed: + history.append({ + "type": "commands_completed_manually", + "commands": manual_completed, + "message": f"User manually executed these commands successfully: {', '.join(manual_completed)}. Do NOT re-propose them.", + }) + + # Check if there were failures that need LLM input + failures = [c for c in run.commands if c.status.value == "failed"] + if failures: + # Add failure context to history for LLM to help with + failure_summary = [] + for f in failures: + failure_summary.append({ + "command": f.command, + "error": f.error[:500] if f.error else "Unknown error", + "purpose": f.purpose, + }) + + history.append({ + "type": "execution_failures", + "failures": failure_summary, + "message": f"{len(failures)} command(s) failed during execution. Please analyze and suggest fixes.", + }) + + # Continue loop so LLM can suggest next steps + return None + + # All commands succeeded (automatically or manually) + successes = [c for c in run.commands if c.status.value == "success"] + if successes and not failures: + # Everything worked - return success message + summary = run.summary or f"Successfully executed {len(successes)} command(s)" + return f"✅ {summary}" + + # Return summary for now - LLM will provide final answer in next iteration + return None + else: + # User declined automatic execution - provide manual instructions with monitoring + run = self._do_handler.provide_manual_instructions(analyzed, question) + + # Check if any commands were completed manually + manual_completed = self._do_handler.get_completed_manual_commands() + + # Check success/failure status from the run + from cortex.do_runner.models import CommandStatus + successful_count = sum(1 for c in run.commands if c.status == CommandStatus.SUCCESS) + failed_count = sum(1 for c in run.commands if c.status == CommandStatus.FAILED) + total_expected = len(analyzed) + + if manual_completed and successful_count > 0: + # Commands were completed successfully - go to end + history.append({ + "type": "commands_completed_manually", + "commands": manual_completed, + "message": f"User manually executed {successful_count} commands successfully.", + }) + return f"✅ Commands completed manually. {successful_count} succeeded." + + # Commands were NOT all successful - ask user what they want to do + console.print() + from rich.panel import Panel + from rich.prompt import Prompt + + status_msg = [] + if successful_count > 0: + status_msg.append(f"[green]✓ {successful_count} succeeded[/green]") + if failed_count > 0: + status_msg.append(f"[red]✗ {failed_count} failed[/red]") + remaining = total_expected - successful_count - failed_count + if remaining > 0: + status_msg.append(f"[yellow]○ {remaining} not executed[/yellow]") + + console.print(Panel( + " | ".join(status_msg) if status_msg else "[yellow]No commands were executed[/yellow]", + title="[bold] Manual Intervention Result [/bold]", + border_style="yellow", + padding=(0, 1), + )) + + console.print() + console.print("[bold]What would you like to do?[/bold]") + console.print("[dim] • Type your request to retry or modify the approach[/dim]") + console.print("[dim] • Say 'done', 'no', or 'skip' to finish without retrying[/dim]") + console.print() + + try: + user_response = Prompt.ask("[cyan]Your response[/cyan]").strip() + except (EOFError, KeyboardInterrupt): + user_response = "done" + + # Check if user wants to end + end_keywords = ["done", "no", "skip", "exit", "quit", "stop", "cancel", "n", "finish", "end"] + if user_response.lower() in end_keywords or not user_response: + # User doesn't want to retry - go to end + history.append({ + "type": "manual_intervention_ended", + "message": f"User ended manual intervention. {successful_count} commands succeeded.", + }) + if successful_count > 0: + return f"✅ Session ended. {successful_count} command(s) completed successfully." + else: + return f"Session ended. No commands were executed." + + # User wants to retry or modify - add their input to history + history.append({ + "type": "manual_intervention_feedback", + "user_input": user_response, + "previous_commands": [(cmd, purpose, []) for cmd, purpose, _ in analyzed], + "successful_count": successful_count, + "failed_count": failed_count, + "message": f"User requested: {user_response}. Previous attempt had {successful_count} successes and {failed_count} failures.", + }) + + console.print() + console.print(f"[cyan]🔄 Processing your request: {user_response[:50]}{'...' if len(user_response) > 50 else ''}[/cyan]") + + # Continue the loop with user's new input as additional context + # The LLM will see the history and the user's feedback + return None diff --git a/cortex/cli.py b/cortex/cli.py index 267228b0e..eac50e186 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -1,174 +1,42 @@ import argparse -import json import logging import os -import select +import subprocess import sys import time -import uuid -from collections.abc import Callable -from datetime import datetime, timezone -from pathlib import Path -from typing import TYPE_CHECKING, Any - -from rich.console import Console -from rich.markdown import Markdown -from rich.panel import Panel -from rich.table import Table - -from cortex.api_key_detector import auto_detect_api_key, setup_api_key +from datetime import datetime +from typing import Any + from cortex.ask import AskHandler from cortex.branding import VERSION, console, cx_header, cx_print, show_banner from cortex.coordinator import InstallationCoordinator, InstallationStep, StepStatus from cortex.demo import run_demo -from cortex.dependency_importer import DependencyImporter, PackageEcosystem, ParseResult +from cortex.dependency_importer import ( + DependencyImporter, + PackageEcosystem, + ParseResult, + format_package_list, +) from cortex.env_manager import EnvironmentManager, get_env_manager -from cortex.i18n import SUPPORTED_LANGUAGES, LanguageConfig, get_language, set_language, t from cortex.installation_history import InstallationHistory, InstallationStatus, InstallationType from cortex.llm.interpreter import CommandInterpreter from cortex.network_config import NetworkConfig from cortex.notification_manager import NotificationManager -from cortex.predictive_prevention import FailurePrediction, PredictiveErrorManager, RiskLevel -from cortex.role_manager import RoleManager from cortex.stack_manager import StackManager -from cortex.stdin_handler import StdinHandler -from cortex.uninstall_impact import ( - ImpactResult, - ImpactSeverity, - ServiceStatus, - UninstallImpactAnalyzer, -) -from cortex.update_checker import UpdateChannel, should_notify_update -from cortex.updater import Updater, UpdateStatus from cortex.validators import validate_api_key, validate_install_request -from cortex.version_manager import get_version_string - -# CLI Help Constants -HELP_SKIP_CONFIRM = "Skip confirmation prompt" - -if TYPE_CHECKING: - from cortex.daemon_client import DaemonClient, DaemonResponse - from cortex.shell_env_analyzer import ShellEnvironmentAnalyzer # Suppress noisy log messages in normal operation logging.getLogger("httpx").setLevel(logging.WARNING) logging.getLogger("cortex.installation_history").setLevel(logging.ERROR) - sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) class CortexCLI: - RISK_COLORS = { - RiskLevel.NONE: "green", - RiskLevel.LOW: "green", - RiskLevel.MEDIUM: "yellow", - RiskLevel.HIGH: "orange1", - RiskLevel.CRITICAL: "red", - } - # Installation messages - INSTALL_FAIL_MSG = "Installation failed" - def __init__(self, verbose: bool = False): self.spinner_chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] self.spinner_idx = 0 self.verbose = verbose - self.predict_manager = None - - @property - def risk_labels(self) -> dict[RiskLevel, str]: - """ - Localized mapping from RiskLevel enum values to human-readable strings. - - Returns a dictionary mapping each tier (RiskLevel.NONE to CRITICAL) - to its corresponding localized label via the t() translation helper. - """ - return { - RiskLevel.NONE: t("predictive.no_risk"), - RiskLevel.LOW: t("predictive.low_risk"), - RiskLevel.MEDIUM: t("predictive.medium_risk"), - RiskLevel.HIGH: t("predictive.high_risk"), - RiskLevel.CRITICAL: t("predictive.critical_risk"), - } - - # Define a method to handle Docker-specific permission repairs - def docker_permissions(self, args: argparse.Namespace) -> int: - """Handle the diagnosis and repair of Docker file permissions. - - This method coordinates the environment-aware scanning of the project - directory and applies ownership reclamation logic. It ensures that - administrative actions (sudo) are never performed without user - acknowledgment unless the non-interactive flag is present. - - Args: - args: The parsed command-line arguments containing the execution - context and safety flags. - - Returns: - int: 0 if successful or the operation was gracefully cancelled, - 1 if a system or logic error occurred. - """ - from cortex.permission_manager import PermissionManager - - try: - manager = PermissionManager(os.getcwd()) - cx_print("🔍 Scanning for Docker-related permission issues...", "info") - - # Validate Docker Compose configurations for missing user mappings - # to help prevent future permission drift. - manager.check_compose_config() - - # Retrieve execution context from argparse. - execute_flag = getattr(args, "execute", False) - yes_flag = getattr(args, "yes", False) - - # SAFETY GUARD: If executing repairs, prompt for confirmation unless - # the --yes flag was provided. This follows the project safety - # standard: 'No silent sudo execution'. - if execute_flag and not yes_flag: - mismatches = manager.diagnose() - if mismatches: - cx_print( - f"⚠️ Found {len(mismatches)} paths requiring ownership reclamation.", - "warning", - ) - try: - # Interactive confirmation prompt for administrative repair. - console.print( - "[bold cyan]Reclaim ownership using sudo? (y/n): [/bold cyan]", end="" - ) - response = StdinHandler.get_input() - if response.lower() not in ("y", "yes"): - cx_print("Operation cancelled", "info") - return 0 - except (EOFError, KeyboardInterrupt): - # Graceful handling of terminal exit or manual interruption. - console.print() - cx_print("Operation cancelled", "info") - return 0 - - # Delegate repair logic to PermissionManager. If execute is False, - # a dry-run report is generated. If True, repairs are batched to - # avoid system ARG_MAX shell limits. - if manager.fix_permissions(execute=execute_flag): - if execute_flag: - cx_print("✨ Permissions fixed successfully!", "success") - return 0 - - return 1 - - except (PermissionError, FileNotFoundError, OSError) as e: - # Handle system-level access issues or missing project files. - cx_print(f"❌ Permission check failed: {e}", "error") - return 1 - except NotImplementedError as e: - # Report environment incompatibility (e.g., native Windows). - cx_print(f"❌ {e}", "error") - return 1 - except Exception as e: - # Safety net for unexpected runtime exceptions to prevent CLI crashes. - cx_print(f"❌ Unexpected error: {e}", "error") - return 1 def _debug(self, message: str): """Print debug info only in verbose mode""" @@ -176,50 +44,37 @@ def _debug(self, message: str): console.print(f"[dim][DEBUG] {message}[/dim]") def _get_api_key(self) -> str | None: - # 1. Check explicit provider override first (fake/ollama need no key) - explicit_provider = os.environ.get("CORTEX_PROVIDER", "").lower() - if explicit_provider == "fake": - self._debug("Using Fake provider for testing") - return "fake-key" - if explicit_provider == "ollama": + # Check if using Ollama or Fake provider (no API key needed) + provider = self._get_provider() + if provider == "ollama": self._debug("Using Ollama (no API key required)") - return "ollama-local" - - # 2. Try auto-detection + prompt to save (setup_api_key handles both) - success, key, detected_provider = setup_api_key() - if success: - self._debug(f"Using {detected_provider} API key") - # Store detected provider so _get_provider can use it - self._detected_provider = detected_provider - return key + return "ollama-local" # Placeholder for Ollama + if provider == "fake": + self._debug("Using Fake provider for testing") + return "fake-key" # Placeholder for Fake provider - # Still no key - self._print_error(t("api_key.not_found")) - cx_print(t("api_key.configure_prompt"), "info") - cx_print(t("api_key.ollama_hint"), "info") - return None + is_valid, detected_provider, error = validate_api_key() + if not is_valid: + self._print_error(error) + cx_print("Run [bold]cortex wizard[/bold] to configure your API key.", "info") + cx_print("Or use [bold]CORTEX_PROVIDER=ollama[/bold] for offline mode.", "info") + return None + api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + return api_key def _get_provider(self) -> str: - # 1. Check explicit provider override FIRST (highest priority) + # Check environment variable for explicit provider choice explicit_provider = os.environ.get("CORTEX_PROVIDER", "").lower() if explicit_provider in ["ollama", "openai", "claude", "fake"]: - self._debug(f"Using explicit CORTEX_PROVIDER={explicit_provider}") return explicit_provider - # 2. Use provider from auto-detection (set by _get_api_key) - detected = getattr(self, "_detected_provider", None) - if detected == "anthropic": - return "claude" - elif detected == "openai": - return "openai" - - # 3. Check env vars (may have been set by auto-detect) + # Auto-detect based on available API keys if os.environ.get("ANTHROPIC_API_KEY"): return "claude" elif os.environ.get("OPENAI_API_KEY"): return "openai" - # 4. Fallback to Ollama for offline mode + # Fallback to Ollama for offline mode return "ollama" def _print_status(self, emoji: str, message: str): @@ -234,7 +89,7 @@ def _print_status(self, emoji: str, message: str): cx_print(message, status) def _print_error(self, message: str): - cx_print(f"{t('ui.error_prefix')}: {message}", "error") + cx_print(f"Error: {message}", "error") def _print_success(self, message: str): cx_print(message, "success") @@ -319,169 +174,6 @@ def notify(self, args): return 1 # ------------------------------- - - def _ask_ai_and_render(self, question: str) -> int: - """Invoke AI with question and render response as Markdown.""" - api_key = self._get_api_key() - if not api_key: - self._print_error("No API key found. Please configure an API provider.") - return 1 - - provider = self._get_provider() - try: - handler = AskHandler(api_key=api_key, provider=provider) - answer = handler.ask(question) - console.print(Markdown(answer)) - return 0 - except ImportError as e: - self._print_error(str(e)) - cx_print("Install required SDK or use CORTEX_PROVIDER=ollama", "info") - return 1 - except (ValueError, RuntimeError) as e: - self._print_error(str(e)) - return 1 - - def role(self, args: argparse.Namespace) -> int: - """ - Handles system role detection and manual configuration via AI context sensing. - - This method supports two subcommands: - - 'detect': Analyzes the system and suggests appropriate roles based on - installed binaries, hardware, and activity patterns. - - 'set': Manually assigns a role slug and provides tailored package recommendations. - - Args: - args: The parsed command-line arguments containing the role_action - and optional role_slug. - - Returns: - int: Exit code - 0 on success, 1 on error. - """ - manager = RoleManager() - action = getattr(args, "role_action", None) - - # Step 1: Ensure a subcommand is provided to maintain a valid return state. - if not action: - self._print_error("Please specify a subcommand (detect/set)") - return 1 - - if action == "detect": - # Retrieve environmental facts including active persona and installation history. - context = manager.get_system_context() - - # Step 2: Extract the most recent patterns for AI analysis. - # Python handles list slicing gracefully even if the list has fewer than 10 items. - patterns = context.get("patterns", []) - limited_patterns = patterns[-10:] - patterns_str = ( - "\n".join([f" • {p}" for p in limited_patterns]) or " • No patterns sensed" - ) - - signals_str = ", ".join(context.get("binaries", [])) or "none detected" - gpu_status = ( - "GPU Acceleration available" if context.get("has_gpu") else "Standard CPU only" - ) - - # Generate a unique timestamp for cache-busting and session tracking. - timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M") - - # Construct the architectural analysis prompt for the LLM. - question = ( - f"### SYSTEM ARCHITECT ANALYSIS [TIME: {timestamp}] ###\n" - f"ENVIRONMENTAL CONTEXT:\n" - f"- CURRENTLY SET ROLE: {context.get('active_role')}\n" - f"- Detected Binaries: [{signals_str}]\n" - f"- Hardware Acceleration: {gpu_status}\n" - f"- Installation History: {'Present' if context.get('has_install_history') else 'None'}\n\n" - f"OPERATIONAL_HISTORY (Technical Intents & Installed Packages):\n{patterns_str}\n\n" - f"TASK: Acting as a Senior Systems Architect, analyze the existing role and signals. " - f"Suggest 3-5 professional roles that complement the system.\n\n" - f"--- STRICT RESPONSE FORMAT ---\n" - f"YOUR RESPONSE MUST START WITH THE NUMBER '1.' AND CONTAIN ONLY THE LIST. " - f"DO NOT PROVIDE INTRODUCTIONS. DO NOT PROVIDE REASONING. DO NOT PROVIDE A SUMMARY. " - f"FAILURE TO COMPLY WILL BREAK THE CLI PARSER.\n\n" - f"Detected roles:\n" - f"1." - ) - - cx_print("🧠 AI is sensing system context and activity patterns...", "thinking") - if self._ask_ai_and_render(question) != 0: - return 1 - console.print() - - # Record the detection event in the installation history database for audit purposes. - history = InstallationHistory() - history.record_installation( - InstallationType.CONFIG, - ["system-detection"], - ["cortex role detect"], - datetime.now(timezone.utc), - ) - - console.print( - "\n[dim italic]💡 To install any recommended packages, simply run:[/dim italic]" - ) - console.print("[bold cyan] cortex install [/bold cyan]\n") - return 0 - - elif action == "set": - if not args.role_slug: - self._print_error("Role slug is required for 'set' command.") - return 1 - - role_slug = args.role_slug - - # Step 3: Persist the role and handle both validation and persistence errors. - try: - manager.save_role(role_slug) - history = InstallationHistory() - history.record_installation( - InstallationType.CONFIG, - [role_slug], - [f"cortex role set {role_slug}"], - datetime.now(timezone.utc), - ) - except ValueError as e: - self._print_error(f"Invalid role slug: {e}") - return 1 - except RuntimeError as e: - self._print_error(f"Failed to persist role: {e}") - return 1 - - cx_print(f"✓ Role set to: [bold cyan]{role_slug}[/bold cyan]", "success") - - context = manager.get_system_context() - # Generate a unique request ID for cache-busting and tracking purposes. - req_id = f"{datetime.now().strftime('%H:%M:%S.%f')}-{uuid.uuid4().hex[:4]}" - - cx_print(f"🔍 Fetching tailored AI recommendations for {role_slug}...", "info") - - # Construct the recommendation prompt for the LLM. - rec_question = ( - f"### ARCHITECTURAL ADVISORY [ID: {req_id}] ###\n" - f"NEW_TARGET_PERSONA: {role_slug}\n" - f"OS: {sys.platform} | GPU: {'Enabled' if context.get('has_gpu') else 'None'}\n\n" - f"TASK: Generate 3-5 unique packages for '{role_slug}' ONLY.\n" - f"--- PREFERRED RESPONSE FORMAT ---\n" - f"Please start with '1.' and provide only the list of roles. " - f"Omit introductions, reasoning, and summaries.\n\n" - f"💡 Recommended packages for {role_slug}:\n" - f" - " - ) - - if self._ask_ai_and_render(rec_question) != 0: - return 1 - - console.print( - "\n[dim italic]💡 Ready to upgrade? Install any of these using:[/dim italic]" - ) - console.print("[bold cyan] cortex install [/bold cyan]\n") - return 0 - - else: - self._print_error("Unknown role command") - return 1 - def demo(self): """ Run the one-command investor demo @@ -521,21 +213,21 @@ def stack(self, args: argparse.Namespace) -> int: def _handle_stack_list(self, manager: StackManager) -> int: """List all available stacks.""" stacks = manager.list_stacks() - cx_print(f"\n📦 {t('stack.available')}:\n", "info") + cx_print("\n📦 Available Stacks:\n", "info") for stack in stacks: pkg_count = len(stack.get("packages", [])) console.print(f" [green]{stack.get('id', 'unknown')}[/green]") - console.print(f" {stack.get('name', t('stack.unnamed'))}") - console.print(f" {stack.get('description', t('stack.no_description'))}") + console.print(f" {stack.get('name', 'Unnamed Stack')}") + console.print(f" {stack.get('description', 'No description')}") console.print(f" [dim]({pkg_count} packages)[/dim]\n") - cx_print(t("stack.use_command"), "info") + cx_print("Use: cortex stack to install a stack", "info") return 0 def _handle_stack_describe(self, manager: StackManager, stack_id: str) -> int: """Describe a specific stack.""" stack = manager.find_stack(stack_id) if not stack: - self._print_error(t("stack.not_found", name=stack_id)) + self._print_error(f"Stack '{stack_id}' not found. Use --list to see available stacks.") return 1 description = manager.describe_stack(stack_id) console.print(description) @@ -548,18 +240,20 @@ def _handle_stack_install(self, manager: StackManager, args: argparse.Namespace) if suggested_name != original_name: cx_print( - f"💡 {t('stack.gpu_fallback', original=original_name, suggested=suggested_name)}", + f"💡 No GPU detected, using '{suggested_name}' instead of '{original_name}'", "info", ) stack = manager.find_stack(suggested_name) if not stack: - self._print_error(t("stack.not_found", name=suggested_name)) + self._print_error( + f"Stack '{suggested_name}' not found. Use --list to see available stacks." + ) return 1 packages = stack.get("packages", []) if not packages: - self._print_error(t("stack.no_packages", name=suggested_name)) + self._print_error(f"Stack '{suggested_name}' has no packages configured.") return 1 if args.dry_run: @@ -569,28 +263,28 @@ def _handle_stack_install(self, manager: StackManager, args: argparse.Namespace) def _handle_stack_dry_run(self, stack: dict[str, Any], packages: list[str]) -> int: """Preview packages that would be installed without executing.""" - cx_print(f"\n📋 {t('stack.installing', name=stack['name'])}", "info") - console.print(f"\n{t('stack.dry_run_preview')}:") + cx_print(f"\n📋 Stack: {stack['name']}", "info") + console.print("\nPackages that would be installed:") for pkg in packages: console.print(f" • {pkg}") - console.print(f"\n{t('stack.packages_total', count=len(packages))}") - cx_print(f"\n{t('stack.dry_run_note')}", "warning") + console.print(f"\nTotal: {len(packages)} packages") + cx_print("\nDry run only - no commands executed", "warning") return 0 def _handle_stack_real_install(self, stack: dict[str, Any], packages: list[str]) -> int: """Install all packages in the stack.""" - cx_print(f"\n🚀 {t('stack.installing', name=stack['name'])}\n", "success") + cx_print(f"\n🚀 Installing stack: {stack['name']}\n", "success") # Batch into a single LLM request packages_str = " ".join(packages) result = self.install(software=packages_str, execute=True, dry_run=False) if result != 0: - self._print_error(t("stack.failed", name=stack["name"])) + self._print_error(f"Failed to install stack '{stack['name']}'") return 1 - self._print_success(f"\n✅ {t('stack.installed', name=stack['name'])}") - console.print(t("stack.packages_installed", count=len(packages))) + self._print_success(f"\n✅ Stack '{stack['name']}' installed successfully!") + console.print(f"Installed {len(packages)} packages") return 0 # --- Sandbox Commands (Docker-based package testing) --- @@ -601,22 +295,23 @@ def sandbox(self, args: argparse.Namespace) -> int: DockerSandbox, SandboxAlreadyExistsError, SandboxNotFoundError, + SandboxTestStatus, ) action = getattr(args, "sandbox_action", None) if not action: - cx_print(f"\n🐳 {t('sandbox.header')}\n", "info") - console.print(t("sandbox.usage")) - console.print(f"\n{t('sandbox.commands_header')}:") - console.print(f" create {t('sandbox.cmd_create')}") - console.print(f" install {t('sandbox.cmd_install')}") - console.print(f" test [package] {t('sandbox.cmd_test')}") - console.print(f" promote {t('sandbox.cmd_promote')}") - console.print(f" cleanup {t('sandbox.cmd_cleanup')}") - console.print(f" list {t('sandbox.cmd_list')}") - console.print(f" exec {t('sandbox.cmd_exec')}") - console.print(f"\n{t('sandbox.example_workflow')}:") + cx_print("\n🐳 Docker Sandbox - Test packages safely before installing\n", "info") + console.print("Usage: cortex sandbox [options]") + console.print("\nCommands:") + console.print(" create Create a sandbox environment") + console.print(" install Install package in sandbox") + console.print(" test [package] Run tests in sandbox") + console.print(" promote Install tested package on main system") + console.print(" cleanup Remove sandbox environment") + console.print(" list List all sandboxes") + console.print(" exec Execute command in sandbox") + console.print("\nExample workflow:") console.print(" cortex sandbox create test-env") console.print(" cortex sandbox install test-env nginx") console.print(" cortex sandbox test test-env") @@ -739,8 +434,8 @@ def _sandbox_promote(self, sandbox, args: argparse.Namespace) -> int: if not skip_confirm: console.print(f"\nPromote '{package}' to main system? [Y/n]: ", end="") try: - response = StdinHandler.get_input() - if response and response.lower() not in ("y", "yes"): + response = input().strip().lower() + if response and response not in ("y", "yes"): cx_print("Promotion cancelled", "warning") return 0 except (EOFError, KeyboardInterrupt): @@ -799,7 +494,7 @@ def _sandbox_list(self, sandbox) -> int: def _sandbox_exec(self, sandbox, args: argparse.Namespace) -> int: """Execute command in sandbox.""" name = args.name - command = args.cmd + command = args.command result = sandbox.exec_command(name, command) @@ -810,50 +505,14 @@ def _sandbox_exec(self, sandbox, args: argparse.Namespace) -> int: return result.exit_code - def _display_prediction_warning(self, prediction: FailurePrediction) -> None: - """Display formatted prediction warning.""" - color = self.RISK_COLORS.get(prediction.risk_level, "white") - label = self.risk_labels.get(prediction.risk_level, "Unknown") - - console.print() - if prediction.risk_level >= RiskLevel.HIGH: - console.print(f"⚠️ [bold red]{t('predictive.risks_detected')}:[/bold red]") - else: - console.print(f"ℹ️ [bold {color}]{t('predictive.risks_detected')}:[/bold {color}]") - - if prediction.reasons: - console.print(f"\n[bold]{label}:[/bold]") - for reason in prediction.reasons: - console.print(f" - {reason}") - - if prediction.recommendations: - console.print(f"\n[bold]{t('predictive.recommendation')}:[/bold]") - for i, rec in enumerate(prediction.recommendations, 1): - console.print(f" {i}. {rec}") - - if prediction.predicted_errors: - console.print(f"\n[bold]{t('predictive.predicted_errors')}:[/bold]") - for err in prediction.predicted_errors: - msg = f"{err[:100]}..." if len(err) > 100 else err - console.print(f" ! [dim]{msg}[/dim]") - - def _confirm_risky_operation(self, prediction: FailurePrediction) -> bool: - """Prompt user for confirmation of a risky operation.""" - if prediction.risk_level == RiskLevel.HIGH or prediction.risk_level == RiskLevel.CRITICAL: - cx_print(f"\n{t('predictive.high_risk_warning')}", "warning") - - console.print(f"\n{t('predictive.continue_anyway')} [y/N]: ", end="", markup=False) - try: - response = StdinHandler.get_input().lower() - return response in ("y", "yes") - except (EOFError, KeyboardInterrupt): - console.print() - return False - # --- End Sandbox Commands --- - def ask(self, question: str) -> int: - """Answer a natural language question about the system.""" + def ask(self, question: str | None, debug: bool = False, do_mode: bool = False) -> int: + """Answer a natural language question about the system. + + In --do mode, Cortex can execute write and modify commands with user confirmation. + If no question is provided in --do mode, starts an interactive session. + """ api_key = self._get_api_key() if not api_key: return 1 @@ -861,14 +520,37 @@ def ask(self, question: str) -> int: provider = self._get_provider() self._debug(f"Using provider: {provider}") + # Setup cortex user if in do mode + if do_mode: + try: + from cortex.do_runner import setup_cortex_user + cx_print("🔧 Do mode enabled - Cortex can execute commands to solve problems", "info") + # Don't fail if user creation fails - we have fallbacks + setup_cortex_user() + except Exception as e: + self._debug(f"Cortex user setup skipped: {e}") + try: handler = AskHandler( api_key=api_key, provider=provider, + debug=debug, + do_mode=do_mode, ) + + # If no question and in do mode, start interactive session + if question is None and do_mode: + return self._run_interactive_do_session(handler) + elif question is None: + self._print_error("Please provide a question or use --do for interactive mode") + return 1 + answer = handler.ask(question) - # Render as markdown for proper formatting in terminal - console.print(Markdown(answer)) + # Don't print raw JSON or processing messages + if answer and not (answer.strip().startswith('{') or + "I'm processing your request" in answer or + "I have a plan to execute" in answer): + console.print(answer) return 0 except ImportError as e: # Provide a helpful message if provider SDK is missing @@ -883,64 +565,330 @@ def ask(self, question: str) -> int: except RuntimeError as e: self._print_error(str(e)) return 1 - - def _ask_with_session_key(self, question: str, api_key: str, provider: str) -> int: - """Answer a question using provided session API key without re-prompting. - - This wrapper is used by continuous voice mode to avoid re-calling _get_api_key(). - """ - self._debug(f"Using provider: {provider}") - + + def _run_interactive_do_session(self, handler: AskHandler) -> int: + """Run an interactive --do session where user can type queries.""" + import signal + from rich.panel import Panel + from rich.prompt import Prompt + + # Create a session + from cortex.do_runner import DoRunDatabase + db = DoRunDatabase() + session_id = db.create_session() + + # Pass session_id to handler + if handler._do_handler: + handler._do_handler.current_session_id = session_id + + # Track if we're currently processing a request + processing_request = False + request_interrupted = False + + class SessionInterrupt(Exception): + """Exception raised to interrupt the current request and return to prompt.""" + pass + + class SessionExit(Exception): + """Exception raised to exit the session immediately (Ctrl+C).""" + pass + + def handle_ctrl_z(signum, frame): + """Handle Ctrl+Z - stop current operation, return to prompt.""" + nonlocal request_interrupted + + # Set interrupt flag on the handler - this will be checked in the loop + handler.interrupt() + + # If DoHandler has an active process, stop it + if handler._do_handler and handler._do_handler._current_process: + try: + handler._do_handler._current_process.terminate() + handler._do_handler._current_process.wait(timeout=1) + except: + try: + handler._do_handler._current_process.kill() + except: + pass + handler._do_handler._current_process = None + + # If we're processing a request, interrupt it immediately + if processing_request: + request_interrupted = True + console.print() + console.print(f"[yellow]⚠ Ctrl+Z - Stopping current operation...[/yellow]") + # Raise exception to break out and return to prompt + raise SessionInterrupt("Interrupted by Ctrl+Z") + else: + # Not processing anything, just inform the user + console.print() + console.print(f"[dim]Ctrl+Z - Type 'exit' to end the session[/dim]") + + def handle_ctrl_c(signum, frame): + """Handle Ctrl+C - exit the session immediately.""" + # Stop any active process first + if handler._do_handler and handler._do_handler._current_process: + try: + handler._do_handler._current_process.terminate() + handler._do_handler._current_process.wait(timeout=1) + except: + try: + handler._do_handler._current_process.kill() + except: + pass + handler._do_handler._current_process = None + + console.print() + console.print("[cyan]👋 Session ended (Ctrl+C).[/cyan]") + raise SessionExit("Exited by Ctrl+C") + + # Set up signal handlers for the entire session + # Ctrl+Z (SIGTSTP) -> stop current operation, return to prompt + # Ctrl+C (SIGINT) -> exit session immediately + original_sigtstp = signal.signal(signal.SIGTSTP, handle_ctrl_z) + original_sigint = signal.signal(signal.SIGINT, handle_ctrl_c) + try: - handler = AskHandler( - api_key=api_key, - provider=provider, - ) - answer = handler.ask(question) - console.print(answer) - return 0 - except ImportError as e: - self._print_error(str(e)) - cx_print( - "Install the required SDK or set CORTEX_PROVIDER=ollama for local mode.", "info" - ) - return 1 - except ValueError as e: - self._print_error(str(e)) - return 1 - except RuntimeError as e: - self._print_error(str(e)) - return 1 + console.print() + console.print(Panel( + "[bold cyan]🚀 Cortex Interactive Session[/bold cyan]\n\n" + f"[dim]Session ID: {session_id[:30]}...[/dim]\n\n" + "Type what you want to do and Cortex will help you.\n" + "Commands will be shown for approval before execution.\n\n" + "[dim]Examples:[/dim]\n" + " • install docker and run nginx\n" + " • setup a postgresql database\n" + " • configure nginx to proxy port 3000\n" + " • check system resources\n\n" + "[dim]Type 'exit' or 'quit' to end the session.[/dim]\n" + "[dim]Press Ctrl+Z to stop current operation | Ctrl+C to exit immediately[/dim]", + title="[bold green]Welcome[/bold green]", + border_style="cyan", + )) + console.print() + + session_history = [] # Track what was done in this session + run_count = 0 + + while True: + try: + # Show compact session status (not the full history panel) + if session_history: + console.print(f"[dim]Session: {len(session_history)} task(s) | {run_count} run(s) | Type 'history' to see details[/dim]") + + # Get user input + query = Prompt.ask("[bold cyan]What would you like to do?[/bold cyan]") + + if not query.strip(): + continue + + # Check for exit + if query.lower().strip() in ["exit", "quit", "bye", "q"]: + db.end_session(session_id) + console.print() + console.print(f"[cyan]👋 Session ended ({run_count} runs). Run 'cortex do history' to see past runs.[/cyan]") + break + + # Check for help + if query.lower().strip() in ["help", "?"]: + console.print() + console.print("[bold]Available commands:[/bold]") + console.print(" [green]exit[/green], [green]quit[/green] - End the session") + console.print(" [green]history[/green] - Show session history") + console.print(" [green]clear[/green] - Clear session history") + console.print(" Or type any request in natural language!") + console.print() + continue + + # Check for history + if query.lower().strip() == "history": + if session_history: + from rich.table import Table + from rich.panel import Panel + + console.print() + table = Table( + show_header=True, + header_style="bold cyan", + title=f"[bold]Session History[/bold]", + title_style="bold", + ) + table.add_column("#", style="dim", width=3) + table.add_column("Query", style="white", max_width=45) + table.add_column("Status", justify="center", width=8) + table.add_column("Commands", justify="center", width=10) + table.add_column("Run ID", style="dim", max_width=20) + + for i, item in enumerate(session_history, 1): + status = "[green]✓ Success[/green]" if item.get("success") else "[red]✗ Failed[/red]" + query_short = item['query'][:42] + "..." if len(item['query']) > 42 else item['query'] + cmd_count = str(item.get('commands_count', 0)) if item.get('success') else "-" + run_id = item.get('run_id', '-')[:18] + "..." if item.get('run_id') and len(item.get('run_id', '')) > 18 else item.get('run_id', '-') + table.add_row(str(i), query_short, status, cmd_count, run_id) + + console.print(table) + console.print() + console.print(f"[dim]Total: {len(session_history)} tasks | {run_count} runs | Session: {session_id[:20]}...[/dim]") + console.print() + else: + console.print("[dim]No tasks completed yet.[/dim]") + continue + + # Check for clear + if query.lower().strip() == "clear": + session_history.clear() + console.print("[dim]Session history cleared.[/dim]") + continue + + # Update session with query + db.update_session(session_id, query=query) + + # Process the query + console.print() + processing_request = True + request_interrupted = False + handler.reset_interrupt() # Reset interrupt flag before new request + + try: + answer = handler.ask(query) + + # Check if request was interrupted + if request_interrupted: + console.print("[yellow]⚠ Request was interrupted[/yellow]") + session_history.append({ + "query": query, + "success": False, + "error": "Interrupted by user", + }) + continue + + # Get the run_id and command count if one was created + run_id = None + commands_count = 0 + if handler._do_handler and handler._do_handler.current_run: + run_id = handler._do_handler.current_run.run_id + # Count commands from the run + if handler._do_handler.current_run.commands: + commands_count = len(handler._do_handler.current_run.commands) + run_count += 1 + db.update_session(session_id, increment_runs=True) + + # Track in session history + session_history.append({ + "query": query, + "success": True, + "answer": answer[:100] if answer else "", + "run_id": run_id, + "commands_count": commands_count, + }) + + # Print response if it's informational (filter out JSON) + if answer and not answer.startswith("USER_DECLINED"): + # Don't print raw JSON or processing messages + if not (answer.strip().startswith('{') or + "I'm processing your request" in answer or + "I have a plan to execute" in answer): + console.print(answer) + + except SessionInterrupt: + # Ctrl+Z/Ctrl+C pressed - return to prompt immediately + console.print() + session_history.append({ + "query": query, + "success": False, + "error": "Interrupted by user", + }) + continue # Go back to "What would you like to do?" prompt + except Exception as e: + if request_interrupted: + console.print("[yellow]⚠ Request was interrupted[/yellow]") + else: + # Show user-friendly error without internal details + error_msg = str(e) + if isinstance(e, AttributeError): + console.print("[yellow]⚠ Something went wrong. Please try again.[/yellow]") + # Log the actual error for debugging + import logging + logging.debug(f"Internal error: {e}") + else: + console.print(f"[red]⚠ {error_msg}[/red]") + session_history.append({ + "query": query, + "success": False, + "error": "Interrupted" if request_interrupted else str(e), + }) + finally: + processing_request = False + request_interrupted = False + + console.print() + + except SessionInterrupt: + # Ctrl+Z - just return to prompt + console.print() + continue + except SessionExit: + # Ctrl+C - exit session immediately + db.end_session(session_id) + break + except (KeyboardInterrupt, EOFError): + # Fallback for any other interrupts + db.end_session(session_id) + console.print() + console.print("[cyan]👋 Session ended.[/cyan]") + break + + finally: + # Always restore signal handlers when session ends + signal.signal(signal.SIGTSTP, original_sigtstp) + signal.signal(signal.SIGINT, original_sigint) + + return 0 - def _install_with_session_key( + def install( self, software: str, - api_key: str, - provider: str, execute: bool = False, dry_run: bool = False, - ) -> int: - """Install software using provided session API key without re-prompting. - - This wrapper is used by continuous voice mode to avoid re-calling _get_api_key(). - """ - history = InstallationHistory() - install_id = None - start_time = datetime.now() - + parallel: bool = False, + ): # Validate input first is_valid, error = validate_install_request(software) if not is_valid: self._print_error(error) return 1 - software = self._normalize_software_name(software) + # Special-case the ml-cpu stack: + # The LLM sometimes generates outdated torch==1.8.1+cpu installs + # which fail on modern Python. For the "pytorch-cpu jupyter numpy pandas" + # combo, force a supported CPU-only PyTorch recipe instead. + normalized = " ".join(software.split()).lower() + + if normalized == "pytorch-cpu jupyter numpy pandas": + software = ( + "pip3 install torch torchvision torchaudio " + "--index-url https://download.pytorch.org/whl/cpu && " + "pip3 install jupyter numpy pandas" + ) + + api_key = self._get_api_key() + if not api_key: + return 1 + + provider = self._get_provider() self._debug(f"Using provider: {provider}") - self._debug("Using session API key: ") + self._debug(f"API key: {api_key[:10]}...{api_key[-4:]}") + + # Initialize installation history + history = InstallationHistory() + install_id = None + start_time = datetime.now() try: self._print_status("🧠", "Understanding request...") + interpreter = CommandInterpreter(api_key=api_key, provider=provider) + self._print_status("📦", "Planning installation...") for _ in range(10): @@ -950,11 +898,15 @@ def _install_with_session_key( commands = interpreter.parse(f"install {software}") if not commands: - self._print_error(t("install.no_commands")) + self._print_error( + "No commands generated. Please try again with a different request." + ) return 1 + # Extract packages from commands for tracking packages = history._extract_packages_from_commands(commands) + # Record installation start if execute or dry_run: install_id = history.record_installation( InstallationType.INSTALL, packages, commands, start_time @@ -966,627 +918,36 @@ def _install_with_session_key( print(f" {i}. {cmd}") if dry_run: - print(f"\n({t('install.dry_run_message')})") + print("\n(Dry run mode - commands not executed)") if install_id: history.update_installation(install_id, InstallationStatus.SUCCESS) return 0 if execute: - print(f"\n{t('install.executing')}") - coordinator = InstallationCoordinator(commands=commands) - result = coordinator.execute() - - if result.success: - if install_id: - history.update_installation(install_id, InstallationStatus.SUCCESS) - return 0 - else: - error_msg = result.message or "Installation failed" - if install_id: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - self._print_error(error_msg) - return 1 - else: - # Neither dry_run nor execute - just show commands - return 0 - - except Exception as e: - error_msg = str(e) - if install_id: - history.update_installation(install_id, InstallationStatus.FAILED, error_msg) - self._print_error(error_msg) - return 1 - - def voice(self, continuous: bool = False, model: str | None = None) -> int: - """Handle voice input mode. - - Args: - continuous: If True, stay in voice mode until Ctrl+C. - If False, record single input and exit. - model: Whisper model name (e.g., 'base.en', 'small.en'). - If None, uses CORTEX_WHISPER_MODEL env var or 'base.en'. - """ - import queue - import threading - - try: - from cortex.voice import ( - MicrophoneNotFoundError, - ModelNotFoundError, - VoiceInputError, - VoiceInputHandler, - ) - except ImportError: - self._print_error("Voice dependencies not installed.") - cx_print("Install with: pip install cortex-linux[voice]", "info") - return 1 - - api_key = self._get_api_key() - if not api_key: - return 1 - - # Capture provider once for session - provider = self._get_provider() - self._debug(f"Session using provider: {provider}") - - # Display model information if specified - if model: - model_info = { - "tiny.en": "(39 MB, fastest, good for clear speech)", - "base.en": "(140 MB, balanced speed/accuracy)", - "small.en": "(466 MB, better accuracy)", - "medium.en": "(1.5 GB, high accuracy)", - "tiny": "(39 MB, multilingual)", - "base": "(290 MB, multilingual)", - "small": "(968 MB, multilingual)", - "medium": "(3 GB, multilingual)", - "large": "(6 GB, best accuracy, multilingual)", - } - cx_print(f"Using Whisper model: {model} {model_info.get(model, '')}", "info") - - # Queue for thread-safe communication between worker and main thread - input_queue = queue.Queue() - response_queue = queue.Queue() - - def process_voice_command(text: str) -> None: - """Process transcribed voice command.""" - if not text: - return - - # Determine if this is an install command or a question - text_lower = text.lower().strip() - is_install = any( - text_lower.startswith(word) for word in ["install", "setup", "add", "get", "put"] - ) - if is_install: - # Remove the command verb for install - software = text - for verb in ["install", "setup", "add", "get", "put"]: - if text_lower.startswith(verb): - software = text[len(verb) :].strip() - break - - # Validate software name - if not software or len(software) > 200: - cx_print("Invalid software name", "error") - return + def progress_callback(current, total, step): + status_emoji = "⏳" + if step.status == StepStatus.SUCCESS: + status_emoji = "✅" + elif step.status == StepStatus.FAILED: + status_emoji = "❌" + print(f"\n[{current}/{total}] {status_emoji} {step.description}") + print(f" Command: {step.command}") - # Check for dangerous characters that shouldn't be in package names - dangerous_chars = [";", "&", "|", "`", "$", "(", ")"] - if any(char in software for char in dangerous_chars): - cx_print("Invalid characters detected in software name", "error") - return + print("\nExecuting commands...") - cx_print(f"Installing: {software}", "info") + if parallel: + import asyncio - # Handle prompt based on mode - def _drain_queues() -> None: - """Clear any stale prompt/response messages from previous interactions.""" + from cortex.install_parallel import run_parallel_install - try: - while not response_queue.empty(): - response_queue.get_nowait() - except Exception: - pass - - try: - while not input_queue.empty(): - input_queue.get_nowait() - except Exception: - pass - - def _flush_stdin() -> None: - """Flush any pending input from stdin.""" - try: - # Use select to check for pending input without blocking - while select.select([sys.stdin], [], [], 0.0)[0]: - sys.stdin.read(1) - except (OSError, ValueError, TypeError): - # OSError: fd not valid, ValueError: fd negative, TypeError: not selectable - pass - - def _resolve_choice() -> str: - """Prompt user until a valid choice is provided.""" - - def _prompt_inline() -> str: - console.print() - console.print("[bold cyan]Choose an action:[/bold cyan]") - console.print(" [1] Dry run (preview commands)") - console.print(" [2] Execute (run commands)") - console.print(" [3] Cancel") - console.print(" [dim](Ctrl+C to cancel)[/dim]") - console.print() - - try: - _flush_stdin() # Clear any buffered input - choice = input("Enter choice [1/2/3]: ").strip() - # Blank input defaults to dry-run (1) - return choice or "1" - except (KeyboardInterrupt, EOFError): - return "3" - - if input_handler_thread is None: - # Single-shot mode: inline prompt handling (no input handler thread running) - _flush_stdin() # Clear any buffered input before prompting - choice_local = _prompt_inline() - while choice_local not in {"1", "2", "3"}: - cx_print("Invalid choice. Please enter 1, 2, or 3.", "warning") - choice_local = _prompt_inline() - return choice_local - - # Continuous mode: use queue-based communication with input handler thread - _drain_queues() - while True: - input_queue.put({"type": "prompt", "software": software}) - - try: - response = response_queue.get(timeout=60) - choice_local = response.get("choice") - except queue.Empty: - cx_print("\nInput timeout - cancelled.", "warning") - return "3" - - if choice_local in {"1", "2", "3"}: - return choice_local - - # Invalid or malformed response — re-prompt - cx_print("Invalid choice. Please enter 1, 2, or 3.", "warning") - - def _prompt_execute_after_dry_run() -> str: - """Prompt user to execute or cancel after dry-run preview.""" - console.print() - console.print("[bold cyan]Dry-run complete. What next?[/bold cyan]") - console.print(" [1] Execute (run commands)") - console.print(" [2] Cancel") - console.print(" [dim](Ctrl+C to cancel)[/dim]") - console.print() - - try: - _flush_stdin() # Clear any buffered input - choice_input = input("Enter choice [1/2]: ").strip() - return choice_input or "2" # Default to cancel - except (KeyboardInterrupt, EOFError): - return "2" - - choice = _resolve_choice() - - # Process choice (unified for both modes) - if choice == "1": - self._install_with_session_key( - software, api_key, provider, execute=False, dry_run=True - ) - # After dry-run, ask if user wants to execute - follow_up = _prompt_execute_after_dry_run() - while follow_up not in {"1", "2"}: - cx_print("Invalid choice. Please enter 1 or 2.", "warning") - follow_up = _prompt_execute_after_dry_run() - if follow_up == "1": - cx_print("Executing installation...", "info") - self._install_with_session_key( - software, api_key, provider, execute=True, dry_run=False - ) - else: - cx_print("Cancelled.", "info") - elif choice == "2": - cx_print("Executing installation...", "info") - self._install_with_session_key( - software, api_key, provider, execute=True, dry_run=False - ) - else: - cx_print("Cancelled.", "info") - else: - # Treat as a question - cx_print(f"Question: {text}", "info") - self._ask_with_session_key(text, api_key, provider) - - handler = None - input_handler_thread = None - stop_input_handler = threading.Event() - - def input_handler_loop(): - """Main thread loop to handle user input requests from worker thread.""" - while not stop_input_handler.is_set(): - try: - request = input_queue.get(timeout=0.5) - if request.get("type") == "prompt": - console.print() - console.print("[bold cyan]Choose an action:[/bold cyan]") - console.print(" [1] Dry run (preview commands)") - console.print(" [2] Execute (run commands)") - console.print(" [3] Cancel") - console.print() - - while True: - try: - choice = input("Enter choice [1/2/3]: ").strip() - # Blank input defaults to dry-run (1) - choice = choice or "1" - except (KeyboardInterrupt, EOFError): - response_queue.put({"choice": "3"}) - cx_print("\nCancelled.", "info") - break - - if choice in {"1", "2", "3"}: - response_queue.put({"choice": choice}) - break - - cx_print("Invalid choice. Please enter 1, 2, or 3.", "warning") - except queue.Empty: - continue - except Exception as e: - logging.debug(f"Input handler error: {e}") - continue - - try: - handler = VoiceInputHandler(model_name=model) - - if continuous: - # Start input handler thread - input_handler_thread = threading.Thread(target=input_handler_loop, daemon=True) - input_handler_thread.start() - - # Continuous voice mode - handler.start_voice_mode(process_voice_command) - else: - # Single recording mode - text = handler.record_single() - if text: - process_voice_command(text) - else: - cx_print("No speech detected.", "warning") - - return 0 - - except (VoiceInputError, MicrophoneNotFoundError, ModelNotFoundError) as e: - self._print_error(str(e)) - return 1 - except KeyboardInterrupt: - cx_print("\nVoice mode exited.", "info") - return 0 - finally: - # Stop input handler thread - stop_input_handler.set() - if input_handler_thread is not None and input_handler_thread.is_alive(): - input_handler_thread.join(timeout=1.0) - - # Ensure cleanup even if exceptions occur - if handler is not None: - try: - handler.stop() - except Exception as e: - # Log cleanup errors but don't raise - logging.debug("Error during voice handler cleanup: %s", e) - - def _normalize_software_name(self, software: str) -> str: - """Normalize software name by cleaning whitespace. - - Returns a natural-language description suitable for LLM interpretation. - Does NOT return shell commands - all command generation must go through - the LLM and validation pipeline. - """ - # Just normalize whitespace - return natural language description - return " ".join(software.split()) - - def _record_history_error( - self, - history: InstallationHistory, - install_id: str | None, - error: str, - ) -> None: - """Record installation error to history.""" - if install_id: - history.update_installation(install_id, InstallationStatus.FAILED, error) - - def _handle_parallel_execution( - self, - commands: list[str], - software: str, - install_id: str | None, - history: InstallationHistory, - ) -> int: - """Handle parallel installation execution.""" - import asyncio - - from cortex.install_parallel import run_parallel_install - - def parallel_log_callback(message: str, level: str = "info"): - if level == "success": - cx_print(f" ✅ {message}", "success") - elif level == "error": - cx_print(f" ❌ {message}", "error") - else: - cx_print(f" ℹ {message}", "info") - - try: - success, parallel_tasks = asyncio.run( - run_parallel_install( - commands=commands, - descriptions=[f"Step {i + 1}" for i in range(len(commands))], - timeout=300, - stop_on_error=True, - log_callback=parallel_log_callback, - ) - ) - - if success: - total_duration = self._calculate_duration(parallel_tasks) - self._print_success(f"{software} installed successfully!") - print(f"\nCompleted in {total_duration:.2f} seconds (parallel mode)") - if install_id: - history.update_installation(install_id, InstallationStatus.SUCCESS) - print(f"\n📝 Installation recorded (ID: {install_id})") - print(f" To rollback: cortex rollback {install_id}") - return 0 - - error_msg = self._get_parallel_error_msg(parallel_tasks) - self._record_history_error(history, install_id, error_msg) - self._print_error(self.INSTALL_FAIL_MSG) - if error_msg: - print(f" Error: {error_msg}", file=sys.stderr) - if install_id: - print(f"\n📝 Installation recorded (ID: {install_id})") - print(f" View details: cortex history {install_id}") - return 1 - - except (ValueError, OSError) as e: - self._record_history_error(history, install_id, str(e)) - self._print_error(f"Parallel execution failed: {str(e)}") - return 1 - except Exception as e: - self._record_history_error(history, install_id, str(e)) - self._print_error(f"Unexpected parallel execution error: {str(e)}") - if self.verbose: - import traceback - - traceback.print_exc() - return 1 - - def _calculate_duration(self, parallel_tasks: list) -> float: - """Calculate total duration from parallel tasks.""" - if not parallel_tasks: - return 0.0 - - max_end = max( - (t.end_time for t in parallel_tasks if t.end_time is not None), - default=None, - ) - min_start = min( - (t.start_time for t in parallel_tasks if t.start_time is not None), - default=None, - ) - if max_end is not None and min_start is not None: - return max_end - min_start - return 0.0 - - def _get_parallel_error_msg(self, parallel_tasks: list) -> str: - """Extract error message from failed parallel tasks.""" - failed_tasks = [t for t in parallel_tasks if getattr(t.status, "value", "") == "failed"] - return failed_tasks[0].error if failed_tasks else self.INSTALL_FAIL_MSG - - def _handle_sequential_execution( - self, - commands: list[str], - software: str, - install_id: str | None, - history: InstallationHistory, - ) -> int: - """Handle sequential installation execution.""" - - def progress_callback(current, total, step): - status_emoji = "⏳" - if step.status == StepStatus.SUCCESS: - status_emoji = "✅" - elif step.status == StepStatus.FAILED: - status_emoji = "❌" - print(f"\n[{current}/{total}] {status_emoji} {step.description}") - print(f" Command: {step.command}") - - coordinator = InstallationCoordinator( - commands=commands, - descriptions=[f"Step {i + 1}" for i in range(len(commands))], - timeout=300, - stop_on_error=True, - progress_callback=progress_callback, - ) - - result = coordinator.execute() - - if result.success: - self._print_success(f"{software} installed successfully!") - print(f"\nCompleted in {result.total_duration:.2f} seconds") - if install_id: - history.update_installation(install_id, InstallationStatus.SUCCESS) - print(f"\n📝 Installation recorded (ID: {install_id})") - print(f" To rollback: cortex rollback {install_id}") - return 0 - - # Handle failure - self._record_history_error( - history, install_id, result.error_message or self.INSTALL_FAIL_MSG - ) - if result.failed_step is not None: - self._print_error(f"{self.INSTALL_FAIL_MSG} at step {result.failed_step + 1}") - else: - self._print_error(self.INSTALL_FAIL_MSG) - if result.error_message: - print(f" Error: {result.error_message}", file=sys.stderr) - if install_id: - print(f"\n📝 Installation recorded (ID: {install_id})") - print(f" View details: cortex history {install_id}") - return 1 - - def install( - self, - software: str, - execute: bool = False, - dry_run: bool = False, - parallel: bool = False, - json_output: bool = False, - ) -> int: - """Install software using the LLM-powered package manager.""" - # Initialize installation history - history = InstallationHistory() - install_id = None - start_time = datetime.now() - # Validate input first - is_valid, error = validate_install_request(software) - if not is_valid: - if json_output: - print(json.dumps({"success": False, "error": error, "error_type": "ValueError"})) - else: - self._print_error(error) - return 1 - - software = self._normalize_software_name(software) - - api_key = self._get_api_key() - if not api_key: - error_msg = "No API key found. Please configure an API provider." - # Record installation attempt before failing if we have packages - try: - packages = [software.split()[0]] # Basic package extraction - install_id = history.record_installation( - InstallationType.INSTALL, packages, [], start_time - ) - except Exception: - pass # If recording fails, continue with error reporting - - if install_id: - history.update_installation(install_id, InstallationStatus.FAILED, error_msg) - - if json_output: - print( - json.dumps({"success": False, "error": error_msg, "error_type": "RuntimeError"}) - ) - else: - self._print_error(error_msg) - return 1 - - provider = self._get_provider() - self._debug(f"Using provider: {provider}") - self._debug(f"API key: {api_key[:10]}...{api_key[-4:]}") - - try: - if not json_output: - self._print_status("🧠", "Understanding request...") - - interpreter = CommandInterpreter(api_key=api_key, provider=provider) - - if not json_output: - self._print_status("📦", "Planning installation...") - for _ in range(10): - self._animate_spinner("Analyzing system requirements...") - self._clear_line() - - commands = interpreter.parse(f"install {software}") - - if not commands: - self._print_error(t("install.no_commands")) - return 1 - - # Predictive Analysis - if not json_output: - self._print_status("🔮", t("predictive.analyzing")) - if not self.predict_manager: - self.predict_manager = PredictiveErrorManager(api_key=api_key, provider=provider) - prediction = self.predict_manager.analyze_installation(software, commands) - if not json_output: - self._clear_line() - - if not json_output: - if prediction.risk_level != RiskLevel.NONE: - self._display_prediction_warning(prediction) - if execute and not self._confirm_risky_operation(prediction): - cx_print(f"\n{t('ui.operation_cancelled')}", "warning") - return 0 - else: - cx_print(t("predictive.no_issues_detected"), "success") - - # Extract packages from commands for tracking - packages = history._extract_packages_from_commands(commands) - - # Record installation start - if execute or dry_run: - install_id = history.record_installation( - InstallationType.INSTALL, packages, commands, start_time - ) - - # If JSON output requested, return structured data and exit early - if json_output: - output = { - "success": True, - "commands": commands, - "packages": packages, - "install_id": install_id, - "prediction": { - "risk_level": prediction.risk_level.name, - "reasons": prediction.reasons, - "recommendations": prediction.recommendations, - "predicted_errors": prediction.predicted_errors, - }, - } - print(json.dumps(output, indent=2)) - return 0 - - self._print_status("⚙️", f"Installing {software}...") - print("\nGenerated commands:") - for i, cmd in enumerate(commands, 1): - print(f" {i}. {cmd}") - - if dry_run: - print(f"\n({t('install.dry_run_message')})") - if install_id: - history.update_installation(install_id, InstallationStatus.SUCCESS) - return 0 - - if execute: - - def progress_callback(current, total, step): - status_emoji = "⏳" - if step.status == StepStatus.SUCCESS: - status_emoji = "✅" - elif step.status == StepStatus.FAILED: - status_emoji = "❌" - print(f"\n[{current}/{total}] {status_emoji} {step.description}") - print(f" Command: {step.command}") - - print(f"\n{t('install.executing')}") - - if parallel: - import asyncio - - from cortex.install_parallel import run_parallel_install - - def parallel_log_callback(message: str, level: str = "info"): - if level == "success": - cx_print(f" ✅ {message}", "success") - elif level == "error": - cx_print(f" ❌ {message}", "error") - else: - cx_print(f" ℹ {message}", "info") + def parallel_log_callback(message: str, level: str = "info"): + if level == "success": + cx_print(f" ✅ {message}", "success") + elif level == "error": + cx_print(f" ❌ {message}", "error") + else: + cx_print(f" ℹ {message}", "info") try: success, parallel_tasks = asyncio.run( @@ -1613,10 +974,8 @@ def parallel_log_callback(message: str, level: str = "info"): total_duration = max_end - min_start if success: - self._print_success(t("install.package_installed", package=software)) - print( - f"\n{t('progress.completed_in', seconds=f'{total_duration:.2f}')}" - ) + self._print_success(f"{software} installed successfully!") + print(f"\nCompleted in {total_duration:.2f} seconds (parallel mode)") if install_id: history.update_installation(install_id, InstallationStatus.SUCCESS) @@ -1637,9 +996,9 @@ def parallel_log_callback(message: str, level: str = "info"): error_msg, ) - self._print_error(t("install.failed")) + self._print_error("Installation failed") if error_msg: - print(f" {t('common.error')}: {error_msg}", file=sys.stderr) + print(f" Error: {error_msg}", file=sys.stderr) if install_id: print(f"\n📝 Installation recorded (ID: {install_id})") print(f" View details: cortex history {install_id}") @@ -1675,8 +1034,8 @@ def parallel_log_callback(message: str, level: str = "info"): result = coordinator.execute() if result.success: - self._print_success(t("install.package_installed", package=software)) - print(f"\n{t('progress.completed_in', seconds=f'{result.total_duration:.2f}')}") + self._print_success(f"{software} installed successfully!") + print(f"\nCompleted in {result.total_duration:.2f} seconds") # Record successful installation if install_id: @@ -1706,43 +1065,27 @@ def parallel_log_callback(message: str, level: str = "info"): else: print("\nTo execute these commands, run with --execute flag") print("Example: cortex install docker --execute") - return 0 - print("\nExecuting commands...") - if parallel: - return self._handle_parallel_execution(commands, software, install_id, history) - - return self._handle_sequential_execution(commands, software, install_id, history) + return 0 except ValueError as e: if install_id: history.update_installation(install_id, InstallationStatus.FAILED, str(e)) - if json_output: - - print(json.dumps({"success": False, "error": str(e), "error_type": "ValueError"})) - else: - self._print_error(str(e)) + self._print_error(str(e)) return 1 except RuntimeError as e: if install_id: history.update_installation(install_id, InstallationStatus.FAILED, str(e)) - if json_output: - - print(json.dumps({"success": False, "error": str(e), "error_type": "RuntimeError"})) - else: - self._print_error(f"API call failed: {str(e)}") + self._print_error(f"API call failed: {str(e)}") return 1 except OSError as e: if install_id: history.update_installation(install_id, InstallationStatus.FAILED, str(e)) - if json_output: - - print(json.dumps({"success": False, "error": str(e), "error_type": "OSError"})) - else: - self._print_error(f"System error: {str(e)}") + self._print_error(f"System error: {str(e)}") return 1 except Exception as e: - self._record_history_error(history, install_id, str(e)) + if install_id: + history.update_installation(install_id, InstallationStatus.FAILED, str(e)) self._print_error(f"Unexpected error: {str(e)}") if self.verbose: import traceback @@ -1750,562 +1093,39 @@ def parallel_log_callback(message: str, level: str = "info"): traceback.print_exc() return 1 - def remove(self, args: argparse.Namespace) -> int: - """Handle package removal with impact analysis""" - package = args.package - dry_run = getattr(args, "dry_run", True) # Default to dry-run for safety - purge = getattr(args, "purge", False) - force = getattr(args, "force", False) - json_output = getattr(args, "json", False) - - # Initialize and analyze - result = self._analyze_package_removal(package) - if result is None: - return 1 - - # Check if package doesn't exist at all (not in repos) - if self._check_package_not_found(result): - return 1 + def cache_stats(self) -> int: + try: + from cortex.semantic_cache import SemanticCache - # Output results - self._output_impact_result(result, json_output) + cache = SemanticCache() + stats = cache.stats() + hit_rate = f"{stats.hit_rate * 100:.1f}%" if stats.total else "0.0%" - # Dry-run mode - stop here - if dry_run: - console.print() - cx_print("Dry run mode - no changes made", "info") - cx_print(f"To proceed with removal: cortex remove {package} --execute", "info") + cx_header("Cache Stats") + cx_print(f"Hits: {stats.hits}", "info") + cx_print(f"Misses: {stats.misses}", "info") + cx_print(f"Hit rate: {hit_rate}", "info") + cx_print(f"Saved calls (approx): {stats.hits}", "info") return 0 - - # Safety check and confirmation - if not self._can_proceed_with_removal(result, force, args, package, purge): - return self._removal_blocked_or_cancelled(result, force) - - return self._execute_removal(package, purge) - - def _analyze_package_removal(self, package: str): - """Initialize analyzer and perform impact analysis. Returns None on failure.""" - try: - analyzer = UninstallImpactAnalyzer() - except Exception as e: - self._print_error(f"Failed to initialize impact analyzer: {e}") - return None - - cx_print(f"Analyzing impact of removing '{package}'...", "info") - try: - return analyzer.analyze(package) + except (ImportError, OSError) as e: + self._print_error(f"Unable to read cache stats: {e}") + return 1 except Exception as e: - self._print_error(f"Impact analysis failed: {e}") + self._print_error(f"Unexpected error reading cache stats: {e}") if self.verbose: import traceback traceback.print_exc() - return None - - def _check_package_not_found(self, result) -> bool: - """Check if package doesn't exist in repos and print warnings.""" - if result.warnings and "not found in repositories" in str(result.warnings): - for warning in result.warnings: - cx_print(warning, "warning") - for rec in result.recommendations: - cx_print(rec, "info") - return True - return False - - def _output_impact_result(self, result, json_output: bool) -> None: - """Output the impact result in JSON or rich format.""" - if json_output: - import json as json_module - - data = { - "target_package": result.target_package, - "direct_dependents": result.direct_dependents, - "transitive_dependents": result.transitive_dependents, - "affected_services": [ - { - "name": s.name, - "status": s.status.value, - "package": s.package, - "is_critical": s.is_critical, - } - for s in result.affected_services - ], - "orphaned_packages": result.orphaned_packages, - "cascade_packages": result.cascade_packages, - "severity": result.severity.value, - "total_affected": result.total_affected, - "cascade_depth": result.cascade_depth, - "recommendations": result.recommendations, - "warnings": result.warnings, - "safe_to_remove": result.safe_to_remove, - } - console.print(json_module.dumps(data, indent=2)) - else: - self._display_impact_report(result) - - def _can_proceed_with_removal( - self, result, force: bool, args, package: str, purge: bool - ) -> bool: - """Check safety and get user confirmation. Returns True if can proceed.""" - if not result.safe_to_remove and not force: - return False - - skip_confirm = getattr(args, "yes", False) - if skip_confirm: - return True + return 1 - return self._confirm_removal(package, purge) + def history(self, limit: int = 20, status: str | None = None, show_id: str | None = None): + """Show installation history""" + history = InstallationHistory() - def _confirm_removal(self, package: str, purge: bool) -> bool: - """Prompt user for removal confirmation.""" - console.print() - confirm_msg = f"Remove '{package}'" - if purge: - confirm_msg += " and purge configuration" - confirm_msg += "? [y/N]: " try: - response = StdinHandler.get_input(confirm_msg).lower() - return response in ("y", "yes") - except (EOFError, KeyboardInterrupt): - console.print() - return False - - def _removal_blocked_or_cancelled(self, result, force: bool) -> int: - """Handle blocked or cancelled removal.""" - if not result.safe_to_remove and not force: - console.print() - self._print_error( - "Package removal has high impact. Use --force to proceed anyway, " - "or address the recommendations first." - ) - return 1 - cx_print("Removal cancelled", "info") - return 0 - - def _display_impact_report(self, result: ImpactResult) -> None: - """Display formatted impact analysis report""" - - # Severity styling - severity_styles = { - ImpactSeverity.SAFE: ("green", "✅"), - ImpactSeverity.LOW: ("green", "💚"), - ImpactSeverity.MEDIUM: ("yellow", "🟡"), - ImpactSeverity.HIGH: ("orange1", "🟠"), - ImpactSeverity.CRITICAL: ("red", "🔴"), - } - style, icon = severity_styles.get(result.severity, ("white", "❓")) - - # Header - console.print() - console.print( - Panel(f"[bold]{icon} Impact Analysis: {result.target_package}[/bold]", style=style) - ) - - # Display sections - self._display_warnings(result.warnings) - self._display_package_list(result.direct_dependents, "cyan", "📦 Direct dependents", 10) - self._display_services(result.affected_services) - self._display_summary_table(result, style, Table) - self._display_package_list(result.cascade_packages, "yellow", "🗑️ Cascade removal", 5) - self._display_package_list(result.orphaned_packages, "white", "👻 Would become orphaned", 5) - self._display_recommendations(result.recommendations) - - # Final verdict - console.print() - if result.safe_to_remove: - console.print("[bold green]✅ Safe to remove[/bold green]") - else: - console.print("[bold yellow]⚠️ Review recommendations before proceeding[/bold yellow]") - - def _display_warnings(self, warnings: list) -> None: - """Display warnings with appropriate styling.""" - for warning in warnings: - if "not currently installed" in warning: - console.print(f"\n[bold yellow]ℹ️ {warning}[/bold yellow]") - console.print("[dim] Showing potential impact analysis for this package.[/dim]") - else: - console.print(f"\n[bold red]⚠️ {warning}[/bold red]") - - def _display_package_list(self, packages: list, color: str, title: str, limit: int) -> None: - """Display a list of packages with truncation.""" - if packages: - console.print(f"\n[bold {color}]{title} ({len(packages)}):[/bold {color}]") - for pkg in packages[:limit]: - console.print(f" • {pkg}") - if len(packages) > limit: - console.print(f" [dim]... and {len(packages) - limit} more[/dim]") - elif "dependents" in title: - console.print(f"\n[bold {color}]{title}:[/bold {color}] None") - - def _display_services(self, services: list) -> None: - """Display affected services.""" - if services: - console.print(f"\n[bold magenta]🔧 Affected services ({len(services)}):[/bold magenta]") - for service in services: - status_icon = "🟢" if service.status == ServiceStatus.RUNNING else "⚪" - critical_marker = " [red][CRITICAL][/red]" if service.is_critical else "" - console.print(f" {status_icon} {service.name}{critical_marker}") - else: - console.print("\n[bold magenta]🔧 Affected services:[/bold magenta] None") - - def _display_summary_table(self, result, style: str, table_class) -> None: - """Display the impact summary table.""" - summary_table = table_class(show_header=False, box=None, padding=(0, 2)) - summary_table.add_column("Metric", style="dim") - summary_table.add_column("Value") - summary_table.add_row("Total packages affected", str(result.total_affected)) - summary_table.add_row("Cascade depth", str(result.cascade_depth)) - summary_table.add_row("Services at risk", str(len(result.affected_services))) - summary_table.add_row("Severity", f"[{style}]{result.severity.value.upper()}[/{style}]") - console.print("\n[bold]📊 Impact Summary:[/bold]") - console.print(summary_table) - - def _display_recommendations(self, recommendations: list) -> None: - """Display recommendations.""" - if recommendations: - console.print("\n[bold green]💡 Recommendations:[/bold green]") - for rec in recommendations: - console.print(f" • {rec}") - - def _execute_removal(self, package: str, purge: bool = False) -> int: - """Execute the actual package removal with audit logging""" - import datetime - import subprocess - - cx_print(f"Removing '{package}'...", "info") - - # Initialize history for audit logging - history = InstallationHistory() - start_time = datetime.datetime.now() - operation_type = InstallationType.PURGE if purge else InstallationType.REMOVE - - # Build removal command (with -y since user already confirmed) - if purge: - cmd = ["sudo", "apt-get", "purge", "-y", package] - else: - cmd = ["sudo", "apt-get", "remove", "-y", package] - - # Record the operation start - try: - install_id = history.record_installation( - operation_type=operation_type, - packages=[package], - commands=[" ".join(cmd)], - start_time=start_time, - ) - except Exception as e: - self._debug(f"Failed to record installation start: {e}") - install_id = None - - try: - result = subprocess.run(cmd, capture_output=True, text=True, timeout=300) - - if result.returncode == 0: - self._print_success(f"'{package}' removed successfully") - - # Record successful removal - if install_id: - try: - history.update_installation(install_id, InstallationStatus.SUCCESS) - except Exception as e: - self._debug(f"Failed to update installation record: {e}") - - # Run autoremove to clean up orphaned packages - console.print() - cx_print("Running autoremove to clean up orphaned packages...", "info") - autoremove_cmd = ["sudo", "apt-get", "autoremove", "-y"] - autoremove_start = datetime.datetime.now() - - # Record autoremove operation start - autoremove_id = None - try: - autoremove_id = history.record_installation( - operation_type=InstallationType.REMOVE, - packages=[f"{package}-autoremove"], - commands=[" ".join(autoremove_cmd)], - start_time=autoremove_start, - ) - except Exception as e: - self._debug(f"Failed to record autoremove start: {e}") - - try: - autoremove_result = subprocess.run( - autoremove_cmd, - capture_output=True, - text=True, - timeout=300, - ) - - if autoremove_result.returncode == 0: - cx_print("Cleanup complete", "success") - if autoremove_id: - try: - history.update_installation( - autoremove_id, InstallationStatus.SUCCESS - ) - except Exception as e: - self._debug(f"Failed to update autoremove record: {e}") - else: - cx_print("Autoremove completed with warnings", "warning") - if autoremove_id: - try: - history.update_installation( - autoremove_id, - InstallationStatus.FAILED, - error_message=( - autoremove_result.stderr[:500] - if autoremove_result.stderr - else "Autoremove returned non-zero exit code" - ), - ) - except Exception as e: - self._debug(f"Failed to update autoremove record: {e}") - except subprocess.TimeoutExpired: - cx_print("Autoremove timed out", "warning") - if autoremove_id: - try: - history.update_installation( - autoremove_id, - InstallationStatus.FAILED, - error_message="Autoremove timed out after 300 seconds", - ) - except Exception: - pass - except Exception as e: - cx_print(f"Autoremove failed: {e}", "warning") - if autoremove_id: - try: - history.update_installation( - autoremove_id, - InstallationStatus.FAILED, - error_message=str(e)[:500], - ) - except Exception: - pass - - return 0 - else: - self._print_error(f"Removal failed: {result.stderr}") - # Record failed removal - if install_id: - try: - history.update_installation( - install_id, - InstallationStatus.FAILED, - error_message=result.stderr[:500], - ) - except Exception as e: - self._debug(f"Failed to update installation record: {e}") - return 1 - - except subprocess.TimeoutExpired: - self._print_error("Removal timed out") - # Record timeout failure - if install_id: - try: - history.update_installation( - install_id, - InstallationStatus.FAILED, - error_message="Operation timed out after 300 seconds", - ) - except Exception: - pass - return 1 - except Exception as e: - self._print_error(f"Removal failed: {e}") - # Record exception failure - if install_id: - try: - history.update_installation( - install_id, - InstallationStatus.FAILED, - error_message=str(e)[:500], - ) - except Exception: - pass - return 1 - - def cache_stats(self) -> int: - try: - from cortex.semantic_cache import SemanticCache - - cache = SemanticCache() - stats = cache.stats() - hit_rate_value = f"{stats.hit_rate * 100:.1f}" if stats.total else "0.0" - - cx_header(t("cache.stats_header")) - cx_print(f"{t('cache.hits')}: {stats.hits}", "info") - cx_print(f"{t('cache.misses')}: {stats.misses}", "info") - cx_print(t("cache.hit_rate", rate=hit_rate_value), "info") - cx_print(f"{t('cache.saved_calls')}: {stats.saved_calls}", "info") - return 0 - except (ImportError, OSError) as e: - self._print_error(t("cache.read_error", error=str(e))) - return 1 - except Exception as e: - self._print_error(t("cache.unexpected_error", error=str(e))) - if self.verbose: - import traceback - - traceback.print_exc() - return 1 - - def config(self, args: argparse.Namespace) -> int: - """Handle configuration commands including language settings.""" - action = getattr(args, "config_action", None) - - if not action: - cx_print(t("config.missing_subcommand"), "error") - return 1 - - if action == "language": - return self._config_language(args) - elif action == "show": - return self._config_show() - else: - self._print_error(t("config.unknown_action", action=action)) - return 1 - - def _config_language(self, args: argparse.Namespace) -> int: - """Handle language configuration.""" - lang_config = LanguageConfig() - - # List available languages - if getattr(args, "list", False): - cx_header(t("language.available")) - for code, info in SUPPORTED_LANGUAGES.items(): - current_marker = " ✓" if code == get_language() else "" - console.print( - f" [green]{code}[/green] - {info['name']} ({info['native']}){current_marker}" - ) - return 0 - - # Show language info - if getattr(args, "info", False): - info = lang_config.get_language_info() - cx_header(t("language.current")) - console.print(f" [bold]{info['name']}[/bold] ({info['native_name']})") - console.print(f" [dim]{t('config.code_label')}: {info['language']}[/dim]") - # Translate the source value using proper key mapping - source_translation_keys = { - "environment": "language.set_from_env", - "config": "language.set_from_config", - "auto-detected": "language.auto_detected", - "default": "language.default", - } - source = info.get("source", "") - source_key = source_translation_keys.get(source) - source_display = t(source_key) if source_key else source - console.print(f" [dim]{t('config.source_label')}: {source_display}[/dim]") - - if info.get("env_override"): - console.print(f" [dim]{t('language.set_from_env')}: {info['env_override']}[/dim]") - if info.get("detected_language"): - console.print( - f" [dim]{t('language.auto_detected')}: {info['detected_language']}[/dim]" - ) - return 0 - - # Set language - code = getattr(args, "code", None) - if not code: - # No code provided, show current language and list - current = get_language() - current_info = SUPPORTED_LANGUAGES.get(current, {}) - cx_print( - f"{t('language.current')}: {current_info.get('name', current)} " - f"({current_info.get('native', '')})", - "info", - ) - console.print() - console.print( - f"[dim]{t('language.supported_codes')}: {', '.join(SUPPORTED_LANGUAGES.keys())}[/dim]" - ) - console.print(f"[dim]{t('config.use_command_hint')}[/dim]") - console.print(f"[dim]{t('config.list_hint')}[/dim]") - return 0 - - # Handle 'auto' to clear saved preference - if code.lower() == "auto": - lang_config.clear_language() - from cortex.i18n.translator import reset_translator - - reset_translator() - new_lang = get_language() - new_info = SUPPORTED_LANGUAGES.get(new_lang, {}) - cx_print(t("language.changed", language=new_info.get("native", new_lang)), "success") - console.print(f"[dim]({t('language.auto_detected')})[/dim]") - return 0 - - # Validate and set language - code = code.lower() - if code not in SUPPORTED_LANGUAGES: - self._print_error(t("language.invalid_code", code=code)) - console.print( - f"[dim]{t('language.supported_codes')}: {', '.join(SUPPORTED_LANGUAGES.keys())}[/dim]" - ) - return 1 - - try: - lang_config.set_language(code) - # Reset the global translator to pick up the new language - from cortex.i18n.translator import reset_translator - - reset_translator() - set_language(code) - - lang_info = SUPPORTED_LANGUAGES[code] - cx_print(t("language.changed", language=lang_info["native"]), "success") - return 0 - except (ValueError, RuntimeError) as e: - self._print_error(t("language.set_failed", error=str(e))) - return 1 - - def _config_show(self) -> int: - """Show all current configuration.""" - cx_header(t("config.header")) - - # Language - lang_config = LanguageConfig() - lang_info = lang_config.get_language_info() - console.print(f"[bold]{t('config.language_label')}:[/bold]") - console.print( - f" {lang_info['name']} ({lang_info['native_name']}) " - f"[dim][{lang_info['language']}][/dim]" - ) - # Translate the source identifier to user-friendly text - source_translations = { - "environment": t("language.set_from_env"), - "config": t("language.set_from_config"), - "auto-detected": t("language.auto_detected"), - "default": t("language.default"), - } - source_display = source_translations.get(lang_info["source"], lang_info["source"]) - console.print(f" [dim]{t('config.source_label')}: {source_display}[/dim]") - console.print() - - # API Provider - provider = self._get_provider() - console.print(f"[bold]{t('config.llm_provider_label')}:[/bold]") - console.print(f" {provider}") - console.print() - - # Config paths - console.print(f"[bold]{t('config.config_paths_label')}:[/bold]") - console.print(f" {t('config.preferences_path')}: ~/.cortex/preferences.yaml") - console.print(f" {t('config.history_path')}: ~/.cortex/history.db") - console.print() - - return 0 - - def history(self, limit: int = 20, status: str | None = None, show_id: str | None = None): - """Show installation history""" - history = InstallationHistory() - - try: - if show_id: - # Show specific installation - record = history.get_installation(show_id) + if show_id: + # Show specific installation + record = history.get_installation(show_id) if not record: self._print_error(f"Installation {show_id} not found") @@ -2385,1010 +1205,26 @@ def rollback(self, install_id: str, dry_run: bool = False): else: self._print_error(message) return 1 - except (ValueError, OSError) as e: - self._print_error(f"Rollback failed: {str(e)}") - return 1 - except Exception as e: - self._print_error(f"Unexpected rollback error: {str(e)}") - if self.verbose: - import traceback - - traceback.print_exc() - return 1 - - def status(self): - """Show comprehensive system status and run health checks""" - from cortex.doctor import SystemDoctor - - # Run the comprehensive system health checks - # This now includes all functionality from the old status command - # plus all the detailed health checks from doctor - doctor = SystemDoctor() - return doctor.run_checks() - - def update(self, args: argparse.Namespace) -> int: - """Handle the update command for self-updating Cortex.""" - from rich.progress import Progress, SpinnerColumn, TextColumn - - # Parse channel - channel_str = getattr(args, "channel", "stable") - try: - channel = UpdateChannel(channel_str) - except ValueError: - channel = UpdateChannel.STABLE - - updater = Updater(channel=channel) - - # Handle subcommands - action = getattr(args, "update_action", None) - - if action == "check" or (not action and getattr(args, "check", False)): - # Check for updates only - cx_print("Checking for updates...", "thinking") - result = updater.check_update_available(force=True) - - if result.error: - self._print_error(f"Update check failed: {result.error}") - return 1 - - console.print() - cx_print(f"Current version: [cyan]{result.current_version}[/cyan]", "info") - - if result.update_available and result.latest_release: - cx_print( - f"Update available: [green]{result.latest_version}[/green]", - "success", - ) - console.print() - console.print("[bold]Release notes:[/bold]") - console.print(result.latest_release.release_notes_summary) - console.print() - cx_print( - "Run [bold]cortex update install[/bold] to upgrade", - "info", - ) - else: - cx_print("Cortex is up to date!", "success") - - return 0 - - elif action == "install": - # Install update - target = getattr(args, "version", None) - dry_run = getattr(args, "dry_run", False) - - if dry_run: - cx_print("Dry run mode - no changes will be made", "warning") - - cx_header("Cortex Self-Update") - - def progress_callback(message: str, percent: float) -> None: - if percent >= 0: - cx_print(f"{message} ({percent:.0f}%)", "info") - else: - cx_print(message, "info") - - updater.progress_callback = progress_callback - - result = updater.update(target_version=target, dry_run=dry_run) - - console.print() - - if result.success: - if result.status == UpdateStatus.SUCCESS: - if result.new_version == result.previous_version: - cx_print("Already up to date!", "success") - else: - cx_print( - f"Updated: {result.previous_version} → {result.new_version}", - "success", - ) - if result.duration_seconds: - console.print(f"[dim]Completed in {result.duration_seconds:.1f}s[/dim]") - elif result.status == UpdateStatus.PENDING: - # Dry run - cx_print( - f"Would update: {result.previous_version} → {result.new_version}", - "info", - ) - return 0 - else: - if result.status == UpdateStatus.ROLLED_BACK: - cx_print("Update failed - rolled back to previous version", "warning") - else: - self._print_error(f"Update failed: {result.error}") - return 1 - - elif action == "rollback": - # Rollback to previous version - backup_id = getattr(args, "backup_id", None) - - backups = updater.list_backups() - - if not backups: - self._print_error("No backups available for rollback") - return 1 - - if backup_id: - # Find specific backup - target_backup = None - for b in backups: - if b.version == backup_id or str(b.path).endswith(backup_id): - target_backup = b - break - - if not target_backup: - self._print_error(f"Backup '{backup_id}' not found") - return 1 - - backup_path = target_backup.path - else: - # Use most recent backup - backup_path = backups[0].path - - cx_print(f"Rolling back to backup: {backup_path.name}", "info") - result = updater.rollback_to_backup(backup_path) - - if result.success: - cx_print( - f"Rolled back: {result.previous_version} → {result.new_version}", - "success", - ) - return 0 - else: - self._print_error(f"Rollback failed: {result.error}") - return 1 - - elif action == "list" or getattr(args, "list_releases", False): - # List available versions - from cortex.update_checker import UpdateChecker - - checker = UpdateChecker(channel=channel) - releases = checker.get_all_releases(limit=10) - - if not releases: - cx_print("No releases found", "warning") - return 1 - - cx_header(f"Available Releases ({channel.value} channel)") - - table = Table(show_header=True, header_style="bold cyan", box=None) - table.add_column("Version", style="green") - table.add_column("Date") - table.add_column("Channel") - table.add_column("Notes") - - current = get_version_string() - - for release in releases: - version_str = str(release.version) - if version_str == current: - version_str = f"{version_str} [dim](current)[/dim]" - - # Truncate notes - notes = release.name or release.body[:50] if release.body else "" - if len(notes) > 50: - notes = notes[:47] + "..." - - table.add_row( - version_str, - release.formatted_date, - release.version.channel.value, - notes, - ) - - console.print(table) - return 0 - - elif action == "backups": - # List backups - backups = updater.list_backups() - - if not backups: - cx_print("No backups available", "info") - return 0 - - cx_header("Available Backups") - - table = Table(show_header=True, header_style="bold cyan", box=None) - table.add_column("Version", style="green") - table.add_column("Date") - table.add_column("Size") - table.add_column("Path") - - for backup in backups: - # Format size - size_mb = backup.size_bytes / (1024 * 1024) - size_str = f"{size_mb:.1f} MB" - - # Format date - try: - dt = datetime.fromisoformat(backup.timestamp) - date_str = dt.strftime("%Y-%m-%d %H:%M") - except ValueError: - date_str = backup.timestamp[:16] - - table.add_row( - backup.version, - date_str, - size_str, - str(backup.path.name), - ) - - console.print(table) - console.print() - cx_print( - "Use [bold]cortex update rollback [/bold] to restore", - "info", - ) - return 0 - - else: - # Default: show current version and check for updates - cx_print(f"Current version: [cyan]{get_version_string()}[/cyan]", "info") - cx_print("Checking for updates...", "thinking") - - result = updater.check_update_available() - - if result.update_available and result.latest_release: - console.print() - cx_print( - f"Update available: [green]{result.latest_version}[/green]", - "success", - ) - console.print() - console.print("[bold]What's new:[/bold]") - console.print(result.latest_release.release_notes_summary) - console.print() - cx_print( - "Run [bold]cortex update install[/bold] to upgrade", - "info", - ) - else: - cx_print("Cortex is up to date!", "success") - - return 0 - - # Daemon Commands - # -------------------------- - - def daemon(self, args: argparse.Namespace) -> int: - """Handle daemon commands: install, uninstall, config, reload-config, version, ping, shutdown. - - Available commands: - - install/uninstall: Manage systemd service files (Python-side) - - config: Get daemon configuration via IPC - - reload-config: Reload daemon configuration via IPC - - version: Get daemon version via IPC - - ping: Test daemon connectivity via IPC - - shutdown: Request daemon shutdown via IPC - - run-tests: Run daemon test suite - """ - action = getattr(args, "daemon_action", None) - - if action == "install": - return self._daemon_install(args) - elif action == "uninstall": - return self._daemon_uninstall(args) - elif action == "config": - return self._daemon_config() - elif action == "reload-config": - return self._daemon_reload_config() - elif action == "version": - return self._daemon_version() - elif action == "ping": - return self._daemon_ping() - elif action == "shutdown": - return self._daemon_shutdown() - elif action == "run-tests": - return self._daemon_run_tests(args) - else: - cx_print("Usage: cortex daemon ", "info") - cx_print("", "info") - cx_print("Available commands:", "info") - cx_print(" install Install and enable the daemon service", "info") - cx_print(" uninstall Remove the daemon service", "info") - cx_print(" config Show daemon configuration", "info") - cx_print(" reload-config Reload daemon configuration", "info") - cx_print(" version Show daemon version", "info") - cx_print(" ping Test daemon connectivity", "info") - cx_print(" shutdown Request daemon shutdown", "info") - cx_print(" run-tests Run daemon test suite", "info") - return 0 - - def _update_history_on_failure( - self, history: InstallationHistory, install_id: str | None, error_msg: str - ) -> None: - """ - Helper method to update installation history on failure. - - Args: - history: InstallationHistory instance. - install_id: Installation ID to update, or None if not available. - error_msg: Error message to record. - """ - if install_id: - try: - history.update_installation(install_id, InstallationStatus.FAILED, error_msg) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - - def _daemon_ipc_call( - self, - operation_name: str, - ipc_func: "Callable[[DaemonClient], DaemonResponse]", - ) -> tuple[bool, "DaemonResponse | None"]: - """ - Helper method for daemon IPC calls with centralized error handling. - - Args: - operation_name: Human-readable name of the operation for error messages. - ipc_func: A callable that takes a DaemonClient and returns a DaemonResponse. - - Returns: - Tuple of (success: bool, response: DaemonResponse | None) - On error, response is None and an error message is printed. - """ - # Initialize audit logging - history = InstallationHistory() - start_time = datetime.now(timezone.utc) - install_id = None - - try: - # Record operation start - install_id = history.record_installation( - InstallationType.CONFIG, - ["cortexd"], - [f"daemon.{operation_name}"], - start_time, - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - - try: - from cortex.daemon_client import ( - DaemonClient, - DaemonConnectionError, - DaemonNotInstalledError, - DaemonResponse, - ) - - client = DaemonClient() - response = ipc_func(client) - - # Update history with success/failure - if install_id: - try: - if response and response.success: - history.update_installation(install_id, InstallationStatus.SUCCESS) - else: - error_msg = ( - response.error if response and response.error else "IPC call failed" - ) - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - - return True, response - - except DaemonNotInstalledError as e: - error_msg = str(e) - cx_print(f"{error_msg}", "error") - self._update_history_on_failure(history, install_id, error_msg) - return False, None - except DaemonConnectionError as e: - error_msg = str(e) - cx_print(f"{error_msg}", "error") - self._update_history_on_failure(history, install_id, error_msg) - return False, None - except ImportError: - error_msg = "Daemon client not available." - cx_print(error_msg, "error") - self._update_history_on_failure(history, install_id, error_msg) - return False, None - except Exception as e: - error_msg = f"Unexpected error during {operation_name}: {e}" - cx_print(error_msg, "error") - self._update_history_on_failure(history, install_id, error_msg) - return False, None - - def _daemon_install(self, args: argparse.Namespace) -> int: - """Install the cortexd daemon using setup_daemon.py.""" - import subprocess - from pathlib import Path - - cx_header("Installing Cortex Daemon") - - # Find setup_daemon.py - daemon_dir = Path(__file__).parent.parent / "daemon" - setup_script = daemon_dir / "scripts" / "setup_daemon.py" - - if not setup_script.exists(): - error_msg = f"Setup script not found at {setup_script}" - cx_print(error_msg, "error") - cx_print("Please ensure the daemon directory is present.", "error") - return 1 - - execute = getattr(args, "execute", False) - - if not execute: - cx_print("This will build and install the cortexd daemon.", "info") - cx_print("", "info") - cx_print("The setup wizard will:", "info") - cx_print(" 1. Check and install build dependencies", "info") - cx_print(" 2. Build the daemon from source", "info") - cx_print(" 3. Install systemd service files", "info") - cx_print(" 4. Enable and start the service", "info") - cx_print("", "info") - cx_print("Run with --execute to proceed:", "info") - cx_print(" cortex daemon install --execute", "dim") - # Don't record dry-runs in audit history - return 0 - - # Initialize audit logging only when execution will actually run - history = InstallationHistory() - start_time = datetime.now(timezone.utc) - install_id = None - - try: - # Record operation start - install_id = history.record_installation( - InstallationType.CONFIG, - ["cortexd"], - ["cortex daemon install"], - start_time, - ) - except Exception as e: - cx_print(f"Warning: Could not initialize audit logging: {e}", "warning") - - # Run setup_daemon.py - cx_print("Running daemon setup wizard...", "info") - try: - result = subprocess.run( - [sys.executable, str(setup_script)], - check=False, - ) - - # Record completion - if install_id: - try: - if result.returncode == 0: - history.update_installation(install_id, InstallationStatus.SUCCESS) - else: - error_msg = f"Setup script returned exit code {result.returncode}" - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - - return result.returncode - except subprocess.SubprocessError as e: - error_msg = f"Subprocess error during daemon install: {str(e)}" - cx_print(error_msg, "error") - if install_id: - try: - history.update_installation(install_id, InstallationStatus.FAILED, error_msg) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - except Exception as e: - error_msg = f"Unexpected error during daemon install: {str(e)}" - cx_print(error_msg, "error") - if install_id: - try: - history.update_installation(install_id, InstallationStatus.FAILED, error_msg) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - - def _daemon_uninstall(self, args: argparse.Namespace) -> int: - """Uninstall the cortexd daemon.""" - import subprocess - from pathlib import Path - - cx_header("Uninstalling Cortex Daemon") - - execute = getattr(args, "execute", False) - - if not execute: - cx_print("This will stop and remove the cortexd daemon.", "warning") - cx_print("", "info") - cx_print("This will:", "info") - cx_print(" 1. Stop the cortexd service", "info") - cx_print(" 2. Disable the service", "info") - cx_print(" 3. Remove systemd unit files", "info") - cx_print(" 4. Remove the daemon binary", "info") - cx_print("", "info") - cx_print("Run with --execute to proceed:", "info") - cx_print(" cortex daemon uninstall --execute", "dim") - # Don't record dry-runs in audit history - return 0 - - # Initialize audit logging only when execution will actually run - history = InstallationHistory() - start_time = datetime.now(timezone.utc) - install_id = None - - try: - # Record operation start - install_id = history.record_installation( - InstallationType.CONFIG, - ["cortexd"], - ["cortex daemon uninstall"], - start_time, - ) - except Exception as e: - cx_print(f"Warning: Could not initialize audit logging: {e}", "warning") - - # Find uninstall script - daemon_dir = Path(__file__).parent.parent / "daemon" - uninstall_script = daemon_dir / "scripts" / "uninstall.sh" - - if uninstall_script.exists(): - cx_print("Running uninstall script...", "info") - try: - # Security: Lock down script permissions before execution - # Set read-only permissions for non-root users to prevent tampering - import stat - - script_stat = uninstall_script.stat() - # Remove write permissions for group and others, keep owner read/execute - uninstall_script.chmod(stat.S_IRUSR | stat.S_IXUSR) - - result = subprocess.run( - ["sudo", "bash", str(uninstall_script)], - check=False, - capture_output=True, - text=True, - ) - - # Record completion - if install_id: - try: - if result.returncode == 0: - history.update_installation(install_id, InstallationStatus.SUCCESS) - else: - error_msg = f"Uninstall script returned exit code {result.returncode}" - if result.stderr: - error_msg += f": {result.stderr[:500]}" - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - - return result.returncode - except subprocess.SubprocessError as e: - error_msg = f"Subprocess error during daemon uninstall: {str(e)}" - cx_print(error_msg, "error") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - except Exception as e: - error_msg = f"Unexpected error during daemon uninstall: {str(e)}" - cx_print(error_msg, "error") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - else: - # Manual uninstall - cx_print("Running manual uninstall...", "info") - commands = [ - ["sudo", "systemctl", "stop", "cortexd"], - ["sudo", "systemctl", "disable", "cortexd"], - ["sudo", "rm", "-f", "/etc/systemd/system/cortexd.service"], - ["sudo", "rm", "-f", "/etc/systemd/system/cortexd.socket"], - ["sudo", "rm", "-f", "/usr/local/bin/cortexd"], - ["sudo", "systemctl", "daemon-reload"], - ] - - try: - any_failed = False - error_messages = [] - - for cmd in commands: - cmd_str = " ".join(cmd) - cx_print(f" Running: {cmd_str}", "dim") - - # Update installation history with command info (append to existing record) - if install_id: - try: - # Append command info to existing installation record - # instead of creating orphan records - history.update_installation( - install_id, - InstallationStatus.IN_PROGRESS, - f"Executing: {cmd_str}", - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - - result = subprocess.run(cmd, check=False, capture_output=True, text=True) - - # Track failures - if result.returncode != 0: - any_failed = True - error_msg = ( - f"Command '{cmd_str}' failed with return code {result.returncode}" - ) - if result.stderr: - error_msg += f": {result.stderr[:500]}" - error_messages.append(error_msg) - cx_print(f" Failed: {error_msg}", "error") - - # Update history and return based on overall success - if any_failed: - combined_error = "; ".join(error_messages) - cx_print("Daemon uninstall failed.", "error") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, combined_error - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - else: - cx_print("Daemon uninstalled.", "success") - # Record success - if install_id: - try: - history.update_installation(install_id, InstallationStatus.SUCCESS) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 0 - except subprocess.SubprocessError as e: - error_msg = f"Subprocess error during manual uninstall: {str(e)}" - cx_print(error_msg, "error") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - except Exception as e: - error_msg = f"Unexpected error during manual uninstall: {str(e)}" - cx_print(error_msg, "error") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - - def _daemon_config(self) -> int: - """Get daemon configuration via IPC.""" - from rich.table import Table - - cx_header("Daemon Configuration") - - success, response = self._daemon_ipc_call("config.get", lambda c: c.config_get()) - if not success: - return 1 - - if response.success and response.result: - table = Table(title="Current Configuration", show_header=True) - table.add_column("Setting", style="cyan") - table.add_column("Value", style="green") - - for key, value in response.result.items(): - table.add_row(key, str(value)) - - console.print(table) - return 0 - else: - cx_print(f"Failed to get config: {response.error}", "error") - return 1 - - def _daemon_reload_config(self) -> int: - """Reload daemon configuration via IPC.""" - cx_header("Reloading Daemon Configuration") - - success, response = self._daemon_ipc_call("config.reload", lambda c: c.config_reload()) - if not success: - return 1 - - if response.success: - cx_print("Configuration reloaded successfully!", "success") - return 0 - else: - cx_print(f"Failed to reload config: {response.error}", "error") - return 1 - - def _daemon_version(self) -> int: - """Get daemon version via IPC.""" - cx_header("Daemon Version") - - success, response = self._daemon_ipc_call("version", lambda c: c.version()) - if not success: - return 1 - - if response.success and response.result: - name = response.result.get("name", "cortexd") - version = response.result.get("version", "unknown") - cx_print(f"{name} version {version}", "success") - return 0 - else: - cx_print(f"Failed to get version: {response.error}", "error") - return 1 - - def _daemon_ping(self) -> int: - """Test daemon connectivity via IPC.""" - import time - - cx_header("Daemon Ping") - - start = time.time() - success, response = self._daemon_ipc_call("ping", lambda c: c.ping()) - elapsed = (time.time() - start) * 1000 # ms - - if not success: - return 1 - - if response.success: - cx_print(f"Pong! Response time: {elapsed:.1f}ms", "success") - return 0 - else: - cx_print(f"Ping failed: {response.error}", "error") - return 1 - - def _daemon_shutdown(self) -> int: - """Request daemon shutdown via IPC.""" - cx_header("Requesting Daemon Shutdown") - - success, response = self._daemon_ipc_call("shutdown", lambda c: c.shutdown()) - if not success: - return 1 - - if response.success: - cx_print("Daemon shutdown requested successfully!", "success") - return 0 - cx_print(f"Failed to request shutdown: {response.error}", "error") - return 1 - - def _daemon_run_tests(self, args: argparse.Namespace) -> int: - """Run the daemon test suite.""" - import subprocess - - cx_header("Daemon Tests") - - # Initialize audit logging - history = InstallationHistory() - start_time = datetime.now(timezone.utc) - install_id = None - - try: - # Record operation start - install_id = history.record_installation( - InstallationType.CONFIG, - ["cortexd"], - ["daemon.run-tests"], - start_time, - ) - except Exception: - # Continue even if audit logging fails - pass - - # Find daemon directory - daemon_dir = Path(__file__).parent.parent / "daemon" - build_dir = daemon_dir / "build" - tests_dir = build_dir / "tests" # Test binaries are in build/tests/ - - # Define test binaries - unit_tests = [ - "test_config", - "test_protocol", - "test_rate_limiter", - "test_logger", - "test_common", - ] - integration_tests = ["test_ipc_server", "test_handlers", "test_daemon"] - all_tests = unit_tests + integration_tests - - # Check if tests are built - def check_tests_built() -> tuple[bool, list[str]]: - """Check which test binaries exist.""" - existing = [] - for test in all_tests: - if (tests_dir / test).exists(): - existing.append(test) - return len(existing) > 0, existing - - tests_built, existing_tests = check_tests_built() - - if not tests_built: - error_msg = "Tests are not built." - cx_print(error_msg, "warning") - cx_print("", "info") - cx_print("To build tests, run the setup wizard with test building enabled:", "info") - cx_print("", "info") - cx_print(" [bold]python daemon/scripts/setup_daemon.py[/bold]", "info") - cx_print("", "info") - cx_print("When prompted, answer 'yes' to build the test suite.", "info") - cx_print("", "info") - cx_print("Or build manually:", "info") - cx_print(" cd daemon && ./scripts/build.sh Release --with-tests", "dim") - if install_id: - try: - history.update_installation(install_id, InstallationStatus.FAILED, error_msg) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - - # Determine which tests to run - test_filter = getattr(args, "test", None) - run_unit = getattr(args, "unit", False) - run_integration = getattr(args, "integration", False) - verbose = getattr(args, "verbose", False) - - tests_to_run = [] - - if test_filter: - # Run a specific test - # Allow partial matching (e.g., "config" matches "test_config") - test_name = test_filter if test_filter.startswith("test_") else f"test_{test_filter}" - if test_name in existing_tests: - tests_to_run = [test_name] - else: - error_msg = f"Test '{test_filter}' not found or not built." - cx_print(error_msg, "error") - cx_print("", "info") - cx_print("Available tests:", "info") - for t in existing_tests: - cx_print(f" • {t}", "info") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - elif run_unit and not run_integration: - tests_to_run = [t for t in unit_tests if t in existing_tests] - if not tests_to_run: - error_msg = "No unit tests built." - cx_print(error_msg, "warning") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - elif run_integration and not run_unit: - tests_to_run = [t for t in integration_tests if t in existing_tests] - if not tests_to_run: - error_msg = "No integration tests built." - cx_print(error_msg, "warning") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - else: - # Run all available tests - tests_to_run = existing_tests - - # Show what we're running - cx_print(f"Running {len(tests_to_run)} test(s)...", "info") - cx_print("", "info") - - # Use ctest for running tests - ctest_args = ["ctest", "--output-on-failure"] - - if verbose: - ctest_args.append("-V") - - # Filter specific tests if not running all - if test_filter or run_unit or run_integration: - # ctest uses -R for regex filtering - test_regex = "|".join(tests_to_run) - ctest_args.extend(["-R", test_regex]) - - try: - result = subprocess.run( - ctest_args, - cwd=str(build_dir), - check=False, - ) - - if result.returncode == 0: - cx_print("", "info") - cx_print("All tests passed!", "success") - if install_id: - try: - history.update_installation(install_id, InstallationStatus.SUCCESS) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 0 - else: - error_msg = f"Test execution failed with return code {result.returncode}" - cx_print("", "info") - cx_print("Some tests failed.", "error") - if install_id: - try: - history.update_installation( - install_id, InstallationStatus.FAILED, error_msg - ) - except Exception: - # Continue even if audit logging fails - don't break the main flow - pass - return 1 - except subprocess.SubprocessError as e: - error_msg = f"Subprocess error during test execution: {str(e)}" - cx_print(error_msg, "error") - self._update_history_on_failure(history, install_id, error_msg) - return 1 - except Exception as e: - error_msg = f"Unexpected error during test execution: {str(e)}" - cx_print(error_msg, "error") - self._update_history_on_failure(history, install_id, error_msg) - return 1 - - def benchmark(self, verbose: bool = False): - """Run AI performance benchmark and display scores""" - from cortex.benchmark import run_benchmark - - return run_benchmark(verbose=verbose) - - def systemd(self, service: str, action: str = "status", verbose: bool = False): - """Systemd service helper with plain English explanations""" - from cortex.systemd_helper import run_systemd_helper - - return run_systemd_helper(service, action, verbose) - - def gpu(self, action: str = "status", mode: str = None, verbose: bool = False): - """Hybrid GPU (Optimus) manager""" - from cortex.gpu_manager import run_gpu_manager + except (ValueError, OSError) as e: + self._print_error(f"Rollback failed: {str(e)}") + return 1 + except Exception as e: + self._print_error(f"Unexpected rollback error: {str(e)}") + if self.verbose: + import traceback - return run_gpu_manager(action, mode, verbose) + traceback.print_exc() + return 1 - def printer(self, action: str = "status", verbose: bool = False): - """Printer/Scanner auto-setup""" - from cortex.printer_setup import run_printer_setup + def status(self): + """Show comprehensive system status and run health checks""" + from cortex.doctor import SystemDoctor - return run_printer_setup(action, verbose) + # Run the comprehensive system health checks + # This now includes all functionality from the old status command + # plus all the detailed health checks from doctor + doctor = SystemDoctor() + return doctor.run_checks() def wizard(self): """Interactive setup wizard for API key configuration""" @@ -3409,7 +1245,7 @@ def env(self, args: argparse.Namespace) -> int: if not action: self._print_error( - "Please specify a subcommand (set/get/list/delete/export/import/clear/template/audit/check/path)" + "Please specify a subcommand (set/get/list/delete/export/import/clear/template)" ) return 1 @@ -3434,13 +1270,6 @@ def env(self, args: argparse.Namespace) -> int: return self._env_list_apps(env_mgr, args) elif action == "load": return self._env_load(env_mgr, args) - # Shell environment analyzer commands - elif action == "audit": - return self._env_audit(args) - elif action == "check": - return self._env_check(args) - elif action == "path": - return self._env_path(args) else: self._print_error(f"Unknown env subcommand: {action}") return 1 @@ -3634,9 +1463,7 @@ def _env_clear(self, env_mgr: EnvironmentManager, args: argparse.Namespace) -> i # Confirm unless --force is used if not force: - confirm = StdinHandler.get_input( - f"⚠️ Clear ALL environment variables for '{app}'? (y/n): " - ) + confirm = input(f"⚠️ Clear ALL environment variables for '{app}'? (y/n): ") if confirm.lower() != "y": cx_print("Operation cancelled", "info") return 0 @@ -3767,382 +1594,612 @@ def _env_load(self, env_mgr: EnvironmentManager, args: argparse.Namespace) -> in return 0 - # --- Shell Environment Analyzer Commands --- - def _env_audit(self, args: argparse.Namespace) -> int: - """Audit shell environment variables and show their sources.""" - from cortex.shell_env_analyzer import Shell, ShellEnvironmentAnalyzer - - shell = None - if hasattr(args, "shell") and args.shell: - shell = Shell(args.shell) - - analyzer = ShellEnvironmentAnalyzer(shell=shell) - include_system = not getattr(args, "no_system", False) - as_json = getattr(args, "json", False) - - audit = analyzer.audit(include_system=include_system) - - if as_json: - import json - - print(json.dumps(audit.to_dict(), indent=2)) - return 0 - - # Display audit results - cx_header(f"Environment Audit ({audit.shell.value} shell)") - - console.print("\n[bold]Config Files Scanned:[/bold]") - for f in audit.config_files_scanned: - console.print(f" • {f}") - - if audit.variables: - console.print("\n[bold]Variables with Definitions:[/bold]") - # Sort by number of sources (most definitions first) - sorted_vars = sorted(audit.variables.items(), key=lambda x: len(x[1]), reverse=True) - for var_name, sources in sorted_vars[:20]: # Limit to top 20 - console.print(f"\n [cyan]{var_name}[/cyan] ({len(sources)} definition(s))") - for src in sources: - console.print(f" [dim]{src.file}:{src.line_number}[/dim]") - # Show truncated value - val_preview = src.value[:50] + "..." if len(src.value) > 50 else src.value - console.print(f" → {val_preview}") - - if len(audit.variables) > 20: - console.print(f"\n [dim]... and {len(audit.variables) - 20} more variables[/dim]") - - if audit.conflicts: - console.print("\n[bold]⚠️ Conflicts Detected:[/bold]") - for conflict in audit.conflicts: - severity_color = { - "info": "blue", - "warning": "yellow", - "error": "red", - }.get(conflict.severity.value, "white") - console.print( - f" [{severity_color}]{conflict.severity.value.upper()}[/{severity_color}]: {conflict.description}" - ) - - console.print(f"\n[dim]Total: {len(audit.variables)} variable(s) found[/dim]") + # --- Do Command (manage do-mode runs) --- + def do_cmd(self, args: argparse.Namespace) -> int: + """Handle `cortex do` commands for managing do-mode runs.""" + from cortex.do_runner import DoHandler, ProtectedPathsManager, CortexUserManager + + action = getattr(args, "do_action", None) + + if not action: + cx_print("\n🔧 Do Mode - Execute commands to solve problems\n", "info") + console.print("Usage: cortex ask --do ") + console.print(" cortex do [options]") + console.print("\nCommands:") + console.print(" history [run_id] View do-mode run history") + console.print(" setup Setup cortex user for privilege management") + console.print(" protected Manage protected paths") + console.print("\nExample:") + console.print(" cortex ask --do 'Fix my nginx configuration'") + console.print(" cortex do history") + return 0 + + if action == "history": + return self._do_history(args) + elif action == "setup": + return self._do_setup() + elif action == "protected": + return self._do_protected(args) + else: + self._print_error(f"Unknown do action: {action}") + return 1 + + def _do_history(self, args: argparse.Namespace) -> int: + """Show do-mode run history.""" + from cortex.do_runner import DoHandler + + handler = DoHandler() + run_id = getattr(args, "run_id", None) + + if run_id: + # Show specific run details + run = handler.get_run(run_id) + if not run: + self._print_error(f"Run {run_id} not found") + return 1 + + # Get statistics from database + stats = handler.db.get_run_stats(run_id) + + console.print(f"\n[bold]Do Run: {run.run_id}[/bold]") + console.print("=" * 70) + + # Show session ID if available + session_id = getattr(run, "session_id", None) + if session_id: + console.print(f"[bold]Session:[/bold] [magenta]{session_id}[/magenta]") + + console.print(f"[bold]Query:[/bold] {run.user_query}") + console.print(f"[bold]Mode:[/bold] {run.mode.value}") + console.print(f"[bold]Started:[/bold] {run.started_at}") + console.print(f"[bold]Completed:[/bold] {run.completed_at}") + console.print(f"\n[bold]Summary:[/bold] {run.summary}") + + # Show statistics + if stats: + console.print(f"\n[bold cyan]📊 Command Statistics:[/bold cyan]") + total = stats.get("total_commands", 0) + success = stats.get("successful_commands", 0) + failed = stats.get("failed_commands", 0) + skipped = stats.get("skipped_commands", 0) + console.print(f" Total: {total} | [green]✓ Success: {success}[/green] | [red]✗ Failed: {failed}[/red] | [yellow]○ Skipped: {skipped}[/yellow]") + + if run.files_accessed: + console.print(f"\n[bold]Files Accessed:[/bold] {', '.join(run.files_accessed)}") + + # Get detailed commands from database + commands_detail = handler.db.get_run_commands(run_id) + + console.print(f"\n[bold cyan]📋 Commands Executed:[/bold cyan]") + console.print("-" * 70) + + if commands_detail: + for cmd in commands_detail: + status = cmd["status"] + if status == "success": + status_icon = "[green]✓[/green]" + elif status == "failed": + status_icon = "[red]✗[/red]" + elif status == "skipped": + status_icon = "[yellow]○[/yellow]" + else: + status_icon = "[dim]?[/dim]" + + console.print(f"\n{status_icon} [bold]Command {cmd['index'] + 1}:[/bold] {cmd['command']}") + console.print(f" [dim]Purpose:[/dim] {cmd['purpose']}") + console.print(f" [dim]Status:[/dim] {status} | [dim]Duration:[/dim] {cmd['duration']:.2f}s") + + if cmd["output"]: + console.print(f" [dim]Output:[/dim] {cmd['output']}") + if cmd["error"]: + console.print(f" [red]Error:[/red] {cmd['error']}") + else: + # Fallback to run.commands if database commands not available + for i, cmd in enumerate(run.commands): + status_icon = "[green]✓[/green]" if cmd.status.value == "success" else "[red]✗[/red]" + console.print(f"\n{status_icon} [bold]Command {i + 1}:[/bold] {cmd.command}") + console.print(f" [dim]Purpose:[/dim] {cmd.purpose}") + console.print(f" [dim]Status:[/dim] {cmd.status.value} | [dim]Duration:[/dim] {cmd.duration_seconds:.2f}s") + if cmd.output: + output_truncated = cmd.output[:250] + "..." if len(cmd.output) > 250 else cmd.output + console.print(f" [dim]Output:[/dim] {output_truncated}") + if cmd.error: + console.print(f" [red]Error:[/red] {cmd.error}") + + return 0 + + # List recent runs + limit = getattr(args, "limit", 20) + runs = handler.get_run_history(limit) + + if not runs: + cx_print("No do-mode runs found", "info") + return 0 + + # Group runs by session + sessions = {} + standalone_runs = [] + + for run in runs: + session_id = getattr(run, "session_id", None) + if session_id: + if session_id not in sessions: + sessions[session_id] = [] + sessions[session_id].append(run) + else: + standalone_runs.append(run) + + console.print(f"\n[bold]📜 Recent Do Runs:[/bold]") + console.print(f"[dim]Sessions: {len(sessions)} | Standalone runs: {len(standalone_runs)}[/dim]\n") + + import json as json_module + + # Show sessions first + for session_id, session_runs in sessions.items(): + console.print(f"[bold magenta]╭{'─' * 68}╮[/bold magenta]") + console.print(f"[bold magenta]│ 📂 Session: {session_id[:40]}...{' ' * 15}│[/bold magenta]") + console.print(f"[bold magenta]│ Runs: {len(session_runs)}{' ' * 57}│[/bold magenta]") + console.print(f"[bold magenta]╰{'─' * 68}╯[/bold magenta]") + + for run in session_runs: + self._display_run_summary(handler, run, indent=" ") + console.print() + + # Show standalone runs + if standalone_runs: + if sessions: + console.print(f"[bold cyan]{'─' * 70}[/bold cyan]") + console.print("[bold]📋 Standalone Runs (no session):[/bold]") + + for run in standalone_runs: + self._display_run_summary(handler, run) + + console.print(f"[dim]Use 'cortex do history ' for full details[/dim]") return 0 - - def _env_check(self, args: argparse.Namespace) -> int: - """Check for environment variable conflicts and issues.""" - from cortex.shell_env_analyzer import Shell, ShellEnvironmentAnalyzer - - shell = None - if hasattr(args, "shell") and args.shell: - shell = Shell(args.shell) - - analyzer = ShellEnvironmentAnalyzer(shell=shell) - audit = analyzer.audit() - - cx_header(f"Environment Health Check ({audit.shell.value})") - - issues_found = 0 - - # Check for conflicts - if audit.conflicts: - console.print("\n[bold]Variable Conflicts:[/bold]") - for conflict in audit.conflicts: - issues_found += 1 - severity_color = { - "info": "blue", - "warning": "yellow", - "error": "red", - }.get(conflict.severity.value, "white") - console.print( - f" [{severity_color}]●[/{severity_color}] {conflict.variable_name}: {conflict.description}" - ) - for src in conflict.sources: - console.print(f" [dim]• {src.file}:{src.line_number}[/dim]") - - # Check PATH - duplicates = analyzer.get_path_duplicates() - missing = analyzer.get_missing_paths() - - if duplicates: - console.print("\n[bold]PATH Duplicates:[/bold]") - for dup in duplicates: - issues_found += 1 - console.print(f" [yellow]●[/yellow] {dup}") - - if missing: - console.print("\n[bold]Missing PATH Entries:[/bold]") - for m in missing: - issues_found += 1 - console.print(f" [red]●[/red] {m}") - - if issues_found == 0: - cx_print("\n✓ No issues found! Environment looks healthy.", "success") + + def _display_run_summary(self, handler, run, indent: str = "") -> None: + """Display a single run summary.""" + stats = handler.db.get_run_stats(run.run_id) + if stats: + total = stats.get("total_commands", 0) + success = stats.get("successful_commands", 0) + failed = stats.get("failed_commands", 0) + status_str = f"[green]✓{success}[/green]/[red]✗{failed}[/red]/{total}" + else: + cmd_count = len(run.commands) + success_count = sum(1 for c in run.commands if c.status.value == "success") + failed_count = sum(1 for c in run.commands if c.status.value == "failed") + status_str = f"[green]✓{success_count}[/green]/[red]✗{failed_count}[/red]/{cmd_count}" + + commands_list = handler.db.get_commands_list(run.run_id) + + console.print(f"{indent}[bold cyan]{'─' * 60}[/bold cyan]") + console.print(f"{indent}[bold]Run ID:[/bold] {run.run_id}") + console.print(f"{indent}[bold]Query:[/bold] {run.user_query[:60]}{'...' if len(run.user_query) > 60 else ''}") + console.print(f"{indent}[bold]Status:[/bold] {status_str} | [bold]Started:[/bold] {run.started_at[:19] if run.started_at else '-'}") + + if commands_list and len(commands_list) <= 3: + console.print(f"{indent}[bold]Commands:[/bold] {', '.join(cmd[:30] for cmd in commands_list)}") + elif commands_list: + console.print(f"{indent}[bold]Commands:[/bold] {len(commands_list)} commands") + + def _do_setup(self) -> int: + """Setup cortex user for privilege management.""" + from cortex.do_runner import CortexUserManager + + cx_print("Setting up Cortex user for privilege management...", "info") + + if CortexUserManager.user_exists(): + cx_print("✓ Cortex user already exists", "success") + return 0 + + success, message = CortexUserManager.create_user() + if success: + cx_print(f"✓ {message}", "success") return 0 else: - console.print(f"\n[yellow]Found {issues_found} issue(s)[/yellow]") - cx_print("Run 'cortex env path dedupe' to fix PATH duplicates", "info") - return 1 - - def _env_path(self, args: argparse.Namespace) -> int: - """Handle PATH management subcommands.""" - from cortex.shell_env_analyzer import Shell, ShellEnvironmentAnalyzer - - path_action = getattr(args, "path_action", None) - - if not path_action: - self._print_error("Please specify a path action (list/add/remove/dedupe/clean)") - return 1 + self._print_error(message) + return 1 + + def _do_protected(self, args: argparse.Namespace) -> int: + """Manage protected paths.""" + from cortex.do_runner import ProtectedPathsManager + + manager = ProtectedPathsManager() + + add_path = getattr(args, "add", None) + remove_path = getattr(args, "remove", None) + list_paths = getattr(args, "list", False) + + if add_path: + manager.add_protected_path(add_path) + cx_print(f"✓ Added '{add_path}' to protected paths", "success") + return 0 + + if remove_path: + if manager.remove_protected_path(remove_path): + cx_print(f"✓ Removed '{remove_path}' from protected paths", "success") + else: + self._print_error(f"Path '{remove_path}' not found in user-defined protected paths") + return 0 + + # Default: list all protected paths + paths = manager.get_all_protected() + console.print("\n[bold]Protected Paths:[/bold]") + console.print("[dim](These paths require user confirmation for access)[/dim]\n") + + for path in paths: + is_system = path in manager.SYSTEM_PROTECTED_PATHS + tag = "[system]" if is_system else "[user]" + console.print(f" {path} [dim]{tag}[/dim]") + + console.print(f"\n[dim]Total: {len(paths)} paths[/dim]") + console.print("[dim]Use --add to add custom paths[/dim]") + return 0 - shell = None - if hasattr(args, "shell") and args.shell: - shell = Shell(args.shell) - - analyzer = ShellEnvironmentAnalyzer(shell=shell) - - if path_action == "list": - return self._env_path_list(analyzer, args) - elif path_action == "add": - return self._env_path_add(analyzer, args) - elif path_action == "remove": - return self._env_path_remove(analyzer, args) - elif path_action == "dedupe": - return self._env_path_dedupe(analyzer, args) - elif path_action == "clean": - return self._env_path_clean(analyzer, args) - else: - self._print_error(f"Unknown path action: {path_action}") + # --- Info Command --- + def info_cmd(self, args: argparse.Namespace) -> int: + """Get system and application information using read-only commands.""" + from rich.panel import Panel + from rich.table import Table + + try: + from cortex.system_info_generator import ( + SystemInfoGenerator, + get_system_info_generator, + COMMON_INFO_COMMANDS, + APP_INFO_TEMPLATES, + ) + except ImportError as e: + self._print_error(f"System info generator not available: {e}") return 1 - - def _env_path_list(self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace) -> int: - """List PATH entries with status.""" - as_json = getattr(args, "json", False) - - current_path = os.environ.get("PATH", "") - entries = current_path.split(os.pathsep) - - # Get analysis - audit = analyzer.audit() - - if as_json: - import json - - print(json.dumps([e.to_dict() for e in audit.path_entries], indent=2)) + + debug = getattr(args, "debug", False) + + # Handle --list + if getattr(args, "list", False): + console.print("\n[bold]📊 Available Information Types[/bold]\n") + + console.print("[bold cyan]Quick Info Types (--quick):[/bold cyan]") + for name in sorted(COMMON_INFO_COMMANDS.keys()): + console.print(f" • {name}") + + console.print("\n[bold cyan]Application Templates (--app):[/bold cyan]") + for name in sorted(APP_INFO_TEMPLATES.keys()): + aspects = ", ".join(APP_INFO_TEMPLATES[name].keys()) + console.print(f" • {name}: [dim]{aspects}[/dim]") + + console.print("\n[bold cyan]Categories (--category):[/bold cyan]") + console.print(" hardware, software, network, services, security, storage, performance, configuration") + + console.print("\n[dim]Examples:[/dim]") + console.print(" cortex info --quick cpu") + console.print(" cortex info --app nginx") + console.print(" cortex info --category hardware") + console.print(" cortex info What version of Python is installed?") + return 0 + + # Handle --quick + quick_type = getattr(args, "quick", None) + if quick_type: + console.print(f"\n[bold]🔍 Quick Info: {quick_type.upper()}[/bold]\n") + + if quick_type in COMMON_INFO_COMMANDS: + for cmd_info in COMMON_INFO_COMMANDS[quick_type]: + from cortex.ask import CommandValidator + success, stdout, stderr = CommandValidator.execute_command(cmd_info.command) + + if success and stdout: + console.print(Panel( + stdout[:1000] + ("..." if len(stdout) > 1000 else ""), + title=f"[cyan]{cmd_info.purpose}[/cyan]", + subtitle=f"[dim]{cmd_info.command[:60]}...[/dim]" if len(cmd_info.command) > 60 else f"[dim]{cmd_info.command}[/dim]", + )) + elif stderr: + console.print(f"[yellow]⚠ {cmd_info.purpose}: {stderr[:100]}[/yellow]") + else: + self._print_error(f"Unknown quick info type: {quick_type}") + return 1 return 0 - - cx_header("PATH Entries") - - seen: set = set() - for i, entry in enumerate(entries, 1): - if not entry: - continue - - status_icons = [] - - # Check if exists - if not Path(entry).exists(): - status_icons.append("[red]✗ missing[/red]") - - # Check if duplicate - if entry in seen: - status_icons.append("[yellow]⚠ duplicate[/yellow]") - seen.add(entry) - - status = " ".join(status_icons) if status_icons else "[green]✓[/green]" - console.print(f" {i:2d}. {entry} {status}") - - duplicates = analyzer.get_path_duplicates() - missing = analyzer.get_missing_paths() - - console.print() - console.print( - f"[dim]Total: {len(entries)} entries, {len(duplicates)} duplicates, {len(missing)} missing[/dim]" - ) - - return 0 - - def _env_path_add(self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace) -> int: - """Add a path entry.""" - import os - from pathlib import Path - - new_path = args.path - prepend = not getattr(args, "append", False) - persist = getattr(args, "persist", False) - - # Resolve to absolute path - new_path = str(Path(new_path).expanduser().resolve()) - - if persist: - # When persisting, check the config file, not current PATH + + # Handle --app + app_name = getattr(args, "app", None) + if app_name: + console.print(f"\n[bold]📦 Application Info: {app_name.upper()}[/bold]\n") + + if app_name.lower() in APP_INFO_TEMPLATES: + templates = APP_INFO_TEMPLATES[app_name.lower()] + for aspect, commands in templates.items(): + console.print(f"[bold cyan]─── {aspect.upper()} ───[/bold cyan]") + for cmd_info in commands: + from cortex.ask import CommandValidator + success, stdout, stderr = CommandValidator.execute_command(cmd_info.command, timeout=15) + + if success and stdout: + output = stdout[:500] + ("..." if len(stdout) > 500 else "") + console.print(f"[dim]{cmd_info.purpose}:[/dim]") + console.print(output) + elif stderr: + console.print(f"[yellow]{cmd_info.purpose}: {stderr[:100]}[/yellow]") + console.print() + else: + # Try using LLM for unknown apps + api_key = self._get_api_key() + if api_key: + try: + generator = SystemInfoGenerator( + api_key=api_key, + provider=self._get_provider(), + debug=debug, + ) + result = generator.get_app_info(app_name) + console.print(result.answer) + except Exception as e: + self._print_error(f"Could not get info for {app_name}: {e}") + return 1 + else: + self._print_error(f"Unknown app '{app_name}' and no API key for LLM lookup") + return 1 + return 0 + + # Handle --category + category = getattr(args, "category", None) + if category: + console.print(f"\n[bold]📊 Category Info: {category.upper()}[/bold]\n") + + api_key = self._get_api_key() + if not api_key: + # Fall back to running common commands without LLM + category_mapping = { + "hardware": ["cpu", "memory", "disk", "gpu"], + "software": ["os", "kernel"], + "network": ["network", "dns"], + "services": ["services"], + "security": ["security"], + "storage": ["disk"], + "performance": ["cpu", "memory", "processes"], + "configuration": ["environment"], + } + aspects = category_mapping.get(category, []) + for aspect in aspects: + if aspect in COMMON_INFO_COMMANDS: + console.print(f"[bold cyan]─── {aspect.upper()} ───[/bold cyan]") + for cmd_info in COMMON_INFO_COMMANDS[aspect]: + from cortex.ask import CommandValidator + success, stdout, _ = CommandValidator.execute_command(cmd_info.command) + if success and stdout: + console.print(stdout[:400]) + console.print() + return 0 + try: - config_path = analyzer.get_shell_config_path() - # Check if already in config - config_content = "" - if os.path.exists(config_path): - with open(config_path) as f: - config_content = f.read() - - # Check if path is in a cortex-managed block - if ( - f'export PATH="{new_path}:$PATH"' in config_content - or f'export PATH="$PATH:{new_path}"' in config_content - ): - cx_print(f"'{new_path}' is already in {config_path}", "info") - return 0 - - analyzer.add_path_to_config(new_path, prepend=prepend) - cx_print(f"✓ Added '{new_path}' to {config_path}", "success") - console.print(f"[dim]To use in current shell: source {config_path}[/dim]") + generator = SystemInfoGenerator( + api_key=api_key, + provider=self._get_provider(), + debug=debug, + ) + result = generator.get_structured_info(category) + console.print(result.answer) except Exception as e: - self._print_error(f"Failed to persist: {e}") + self._print_error(f"Could not get category info: {e}") return 1 - else: - # Check if already in current PATH (for non-persist mode) - current_path = os.environ.get("PATH", "") - if new_path in current_path.split(os.pathsep): - cx_print(f"'{new_path}' is already in PATH", "info") - return 0 - - # Only modify current process env (won't persist across commands) - updated = analyzer.safe_add_path(new_path, prepend=prepend) - os.environ["PATH"] = updated - position = "prepended to" if prepend else "appended to" - cx_print(f"✓ '{new_path}' {position} PATH (this process only)", "success") - cx_print("Note: Add --persist to make this permanent", "info") - - return 0 - - def _env_path_remove( - self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace - ) -> int: - """Remove a path entry.""" - import os - - target_path = args.path - persist = getattr(args, "persist", False) - - if persist: - # When persisting, remove from config file + return 0 + + # Handle natural language query + query_parts = getattr(args, "query", []) + if query_parts: + query = " ".join(query_parts) + console.print(f"\n[bold]🔍 System Info Query[/bold]\n") + console.print(f"[dim]Query: {query}[/dim]\n") + + api_key = self._get_api_key() + if not api_key: + self._print_error("Natural language queries require an API key. Use --quick or --app instead.") + return 1 + try: - config_path = analyzer.get_shell_config_path() - result = analyzer.remove_path_from_config(target_path) - if result: - cx_print(f"✓ Removed '{target_path}' from {config_path}", "success") - console.print(f"[dim]To update current shell: source {config_path}[/dim]") - else: - cx_print(f"'{target_path}' was not in cortex-managed config block", "info") + generator = SystemInfoGenerator( + api_key=api_key, + provider=self._get_provider(), + debug=debug, + ) + result = generator.get_info(query) + + console.print(Panel(result.answer, title="[bold green]Answer[/bold green]")) + + if debug and result.commands_executed: + table = Table(title="Commands Executed") + table.add_column("Command", style="cyan", max_width=50) + table.add_column("Status", style="green") + table.add_column("Time", style="dim") + for cmd in result.commands_executed: + status = "✓" if cmd.success else "✗" + table.add_row( + cmd.command[:50] + "..." if len(cmd.command) > 50 else cmd.command, + status, + f"{cmd.execution_time:.2f}s" + ) + console.print(table) + except Exception as e: - self._print_error(f"Failed to persist removal: {e}") + self._print_error(f"Query failed: {e}") + if debug: + import traceback + traceback.print_exc() return 1 - else: - # Only modify current process env (won't persist across commands) - current_path = os.environ.get("PATH", "") - if target_path not in current_path.split(os.pathsep): - cx_print(f"'{target_path}' is not in current PATH", "info") - return 0 - - updated = analyzer.safe_remove_path(target_path) - os.environ["PATH"] = updated - cx_print(f"✓ Removed '{target_path}' from PATH (this process only)", "success") - cx_print("Note: Add --persist to make this permanent", "info") - - return 0 - - def _env_path_dedupe( - self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace - ) -> int: - """Remove duplicate PATH entries.""" - import os - - dry_run = getattr(args, "dry_run", False) - persist = getattr(args, "persist", False) - - duplicates = analyzer.get_path_duplicates() - - if not duplicates: - cx_print("✓ No duplicate PATH entries found", "success") - return 0 - - cx_header("PATH Deduplication") - console.print(f"[yellow]Found {len(duplicates)} duplicate(s):[/yellow]") - for dup in duplicates: - console.print(f" • {dup}") - - if dry_run: - console.print("\n[dim]Dry run - no changes made[/dim]") - clean_path = analyzer.dedupe_path() - console.print("\n[bold]Cleaned PATH would be:[/bold]") - for entry in clean_path.split(os.pathsep)[:10]: - console.print(f" {entry}") - if len(clean_path.split(os.pathsep)) > 10: - console.print(" [dim]... and more[/dim]") return 0 - - # Apply deduplication - clean_path = analyzer.dedupe_path() - os.environ["PATH"] = clean_path - cx_print(f"✓ Removed {len(duplicates)} duplicate(s) from PATH (current session)", "success") - - if persist: - script = analyzer.generate_path_fix_script() - console.print("\n[bold]Add this to your shell config for persistence:[/bold]") - console.print(f"[dim]{script}[/dim]") - + + # No arguments - show help + console.print("\n[bold]📊 Cortex Info - System Information Generator[/bold]\n") + console.print("Get system and application information using read-only commands.\n") + console.print("[bold cyan]Usage:[/bold cyan]") + console.print(" cortex info --list List available info types") + console.print(" cortex info --quick Quick lookup (cpu, memory, etc.)") + console.print(" cortex info --app Application info (nginx, docker, etc.)") + console.print(" cortex info --category Category info (hardware, network, etc.)") + console.print(" cortex info Natural language query (requires API key)") + console.print("\n[bold cyan]Examples:[/bold cyan]") + console.print(" cortex info --quick memory") + console.print(" cortex info --app nginx") + console.print(" cortex info --category hardware") + console.print(" cortex info What Python packages are installed?") return 0 - def _env_path_clean( - self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace - ) -> int: - """Clean PATH by removing duplicates and optionally missing paths.""" - import os - - remove_missing = getattr(args, "remove_missing", False) - dry_run = getattr(args, "dry_run", False) - - duplicates = analyzer.get_path_duplicates() - missing = analyzer.get_missing_paths() if remove_missing else [] - - total_issues = len(duplicates) + len(missing) - - if total_issues == 0: - cx_print("✓ PATH is already clean", "success") + # --- Watch Command --- + def watch_cmd(self, args: argparse.Namespace) -> int: + """Manage terminal watching for manual intervention mode.""" + from rich.panel import Panel + from cortex.do_runner.terminal import TerminalMonitor + + monitor = TerminalMonitor(use_llm=False) + system_wide = getattr(args, "system", False) + as_service = getattr(args, "service", False) + + if getattr(args, "install", False): + if as_service: + # Install as systemd service + console.print("\n[bold cyan]🔧 Installing Cortex Watch Service[/bold cyan]") + console.print("[dim]This will create a systemd user service that runs automatically[/dim]\n") + + from cortex.watch_service import install_service + success, msg = install_service() + console.print(msg) + return 0 if success else 1 + elif system_wide: + console.print("\n[bold cyan]🔧 Installing System-Wide Terminal Watch Hook[/bold cyan]") + console.print("[dim]This will install to /etc/profile.d/ (requires sudo)[/dim]\n") + success, msg = monitor.setup_system_wide_watch() + if success: + console.print(f"[green]{msg}[/green]") + console.print("\n[bold green]✓ All new terminals will automatically have Cortex watching![/bold green]") + else: + console.print(f"[red]✗ {msg}[/red]") + return 1 + else: + console.print("\n[bold cyan]🔧 Installing Terminal Watch Hook[/bold cyan]\n") + success, msg = monitor.setup_auto_watch(permanent=True) + if success: + console.print(f"[green]✓ {msg}[/green]") + console.print("\n[yellow]Note: New terminals will have the hook automatically.[/yellow]") + console.print("[yellow]For existing terminals, run:[/yellow]") + console.print(f"[green]source ~/.cortex/watch_hook.sh[/green]") + console.print("\n[dim]Tip: For automatic activation in ALL terminals, run:[/dim]") + console.print("[cyan]cortex watch --install --system[/cyan]") + else: + console.print(f"[red]✗ {msg}[/red]") + return 1 return 0 - - cx_header("PATH Cleanup") - - if duplicates: - console.print(f"[yellow]Duplicates ({len(duplicates)}):[/yellow]") - for d in duplicates[:5]: - console.print(f" • {d}") - if len(duplicates) > 5: - console.print(f" [dim]... and {len(duplicates) - 5} more[/dim]") - - if missing: - console.print(f"\n[red]Missing paths ({len(missing)}):[/red]") - for m in missing[:5]: - console.print(f" • {m}") - if len(missing) > 5: - console.print(f" [dim]... and {len(missing) - 5} more[/dim]") - - if dry_run: - clean_path = analyzer.clean_path(remove_missing=remove_missing) - console.print("\n[dim]Dry run - no changes made[/dim]") - console.print( - f"[bold]Would reduce PATH from {len(os.environ.get('PATH', '').split(os.pathsep))} to {len(clean_path.split(os.pathsep))} entries[/bold]" - ) + + if getattr(args, "uninstall", False): + if as_service: + console.print("\n[bold cyan]🔧 Removing Cortex Watch Service[/bold cyan]\n") + from cortex.watch_service import uninstall_service + success, msg = uninstall_service() + elif system_wide: + console.print("\n[bold cyan]🔧 Removing System-Wide Terminal Watch Hook[/bold cyan]\n") + success, msg = monitor.uninstall_system_wide_watch() + else: + console.print("\n[bold cyan]🔧 Removing Terminal Watch Hook[/bold cyan]\n") + success, msg = monitor.remove_auto_watch() + if success: + console.print(f"[green]{msg}[/green]") + else: + console.print(f"[red]✗ {msg}[/red]") + return 1 return 0 - - # Apply cleanup - clean_path = analyzer.clean_path(remove_missing=remove_missing) - old_count = len(os.environ.get("PATH", "").split(os.pathsep)) - new_count = len(clean_path.split(os.pathsep)) - os.environ["PATH"] = clean_path - - cx_print(f"✓ Cleaned PATH: {old_count} → {new_count} entries", "success") - - # Show fix script - script = analyzer.generate_path_fix_script() - if "no fixes needed" not in script: - console.print("\n[bold]To make permanent, add to your shell config:[/bold]") - console.print(f"[dim]{script}[/dim]") - + + if getattr(args, "test", False): + console.print("\n[bold cyan]🧪 Testing Terminal Monitoring[/bold cyan]\n") + monitor.test_monitoring() + return 0 + + if getattr(args, "status", False): + console.print("\n[bold cyan]📊 Terminal Watch Status[/bold cyan]\n") + + from pathlib import Path + bashrc = Path.home() / ".bashrc" + zshrc = Path.home() / ".zshrc" + source_file = Path.home() / ".cortex" / "watch_hook.sh" + watch_log = Path.home() / ".cortex" / "terminal_watch.log" + system_hook = Path("/etc/profile.d/cortex-watch.sh") + service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" + + console.print("[bold]Service Status:[/bold]") + + # Check systemd service + if service_file.exists(): + try: + result = subprocess.run( + ["systemctl", "--user", "is-active", "cortex-watch.service"], + capture_output=True, text=True, timeout=5 + ) + is_active = result.stdout.strip() == "active" + if is_active: + console.print(" [bold green]✓ SYSTEMD SERVICE RUNNING[/bold green]") + console.print(" [dim]Automatic terminal monitoring active[/dim]") + else: + console.print(" [yellow]○ Systemd service installed but not running[/yellow]") + console.print(" [dim]Run: systemctl --user start cortex-watch[/dim]") + except Exception: + console.print(" [yellow]○ Systemd service installed (status unknown)[/yellow]") + else: + console.print(" [dim]○ Systemd service not installed[/dim]") + console.print(" [dim]Run: cortex watch --install --service (recommended)[/dim]") + + console.print() + console.print("[bold]Hook Status:[/bold]") + + # System-wide check + if system_hook.exists(): + console.print(" [green]✓ System-wide hook installed[/green]") + else: + console.print(" [dim]○ System-wide hook not installed[/dim]") + + # User-level checks + if bashrc.exists() and "Cortex Terminal Watch Hook" in bashrc.read_text(): + console.print(" [green]✓ Hook installed in .bashrc[/green]") + else: + console.print(" [dim]○ Not installed in .bashrc[/dim]") + + if zshrc.exists() and "Cortex Terminal Watch Hook" in zshrc.read_text(): + console.print(" [green]✓ Hook installed in .zshrc[/green]") + else: + console.print(" [dim]○ Not installed in .zshrc[/dim]") + + console.print("\n[bold]Watch Log:[/bold]") + if watch_log.exists(): + size = watch_log.stat().st_size + lines = len(watch_log.read_text().strip().split('\n')) if size > 0 else 0 + console.print(f" [green]✓ Log file exists: {watch_log}[/green]") + console.print(f" [dim] Size: {size} bytes, {lines} commands logged[/dim]") + else: + console.print(f" [dim]○ No log file yet (created when commands are run)[/dim]") + + return 0 + + # Default: show help + console.print() + console.print(Panel( + "[bold cyan]Terminal Watch[/bold cyan] - Real-time monitoring for manual intervention mode\n\n" + "When Cortex enters manual intervention mode, it watches your other terminals\n" + "to provide real-time feedback and AI-powered suggestions.\n\n" + "[bold]Commands:[/bold]\n" + " [cyan]cortex watch --install --service[/cyan] Install as systemd service (RECOMMENDED)\n" + " [cyan]cortex watch --install --system[/cyan] Install system-wide hook (requires sudo)\n" + " [cyan]cortex watch --install[/cyan] Install hook to .bashrc/.zshrc\n" + " [cyan]cortex watch --uninstall --service[/cyan] Remove systemd service\n" + " [cyan]cortex watch --status[/cyan] Show installation status\n" + " [cyan]cortex watch --test[/cyan] Test monitoring setup\n\n" + "[bold green]Recommended Setup:[/bold green]\n" + " Run [green]cortex watch --install --service[/green]\n\n" + " This creates a background service that:\n" + " • Starts automatically on login\n" + " • Restarts if it crashes\n" + " • Monitors ALL terminal activity\n" + " • No manual setup in each terminal!", + title="[green]🔍 Cortex Watch[/green]", + border_style="cyan", + )) return 0 # --- Import Dependencies Command --- @@ -4257,7 +2314,7 @@ def _import_all(self, importer: DependencyImporter, execute: bool, include_dev: # Execute mode - confirm before installing total = total_packages + total_dev_packages - confirm = StdinHandler.get_input(f"\nInstall all {total} packages? [Y/n]: ") + confirm = input(f"\nInstall all {total} packages? [Y/n]: ") if confirm.lower() not in ["", "y", "yes"]: cx_print("Installation cancelled", "info") return 0 @@ -4276,6 +2333,7 @@ def _display_parse_result(self, result: ParseResult, include_dev: bool) -> None: } ecosystem_name = ecosystem_names.get(result.ecosystem, "Unknown") + filename = os.path.basename(result.file_path) cx_print(f"\n📋 Found {result.prod_count} {ecosystem_name} packages", "info") @@ -4319,251 +2377,72 @@ def progress_callback(current: int, total: int, step: InstallationStep) -> None: status_emoji = "✅" elif step.status == StepStatus.FAILED: status_emoji = "❌" - console.print(f"[{current}/{total}] {status_emoji} {step.description}") - - coordinator = InstallationCoordinator( - commands=[command], - descriptions=[f"Install {ecosystem_name} packages"], - timeout=600, # 10 minutes for package installation - stop_on_error=True, - progress_callback=progress_callback, - ) - - result = coordinator.execute() - - if result.success: - self._print_success(f"{ecosystem_name} packages installed successfully!") - console.print(f"Completed in {result.total_duration:.2f} seconds") - return 0 - else: - self._print_error(self.INSTALL_FAIL_MSG) - if result.error_message: - console.print(f"Error: {result.error_message}", style="red") - return 1 - - def _execute_multi_install(self, commands: list[dict[str, str]]) -> int: - """Execute multiple install commands.""" - all_commands = [cmd["command"] for cmd in commands] - all_descriptions = [cmd["description"] for cmd in commands] - - def progress_callback(current: int, total: int, step: InstallationStep) -> None: - status_emoji = "⏳" - if step.status == StepStatus.SUCCESS: - status_emoji = "✅" - elif step.status == StepStatus.FAILED: - status_emoji = "❌" - console.print(f"\n[{current}/{total}] {status_emoji} {step.description}") - console.print(f" Command: {step.command}") - - coordinator = InstallationCoordinator( - commands=all_commands, - descriptions=all_descriptions, - timeout=600, - stop_on_error=True, - progress_callback=progress_callback, - ) - - console.print("\n[bold]Installing packages...[/bold]") - result = coordinator.execute() - - if result.success: - self._print_success("\nAll packages installed successfully!") - console.print(f"Completed in {result.total_duration:.2f} seconds") - return 0 - else: - if result.failed_step is not None: - self._print_error(f"\n{self.INSTALL_FAIL_MSG} at step {result.failed_step + 1}") - else: - self._print_error(f"\n{self.INSTALL_FAIL_MSG}") - if result.error_message: - console.print(f"Error: {result.error_message}", style="red") - return 1 - - def doctor(self) -> int: - """Run system health checks.""" - from cortex.doctor import SystemDoctor - - doc = SystemDoctor() - return doc.run_checks() - - def troubleshoot(self, no_execute: bool = False) -> int: - """Run interactive troubleshooter.""" - from cortex.troubleshoot import Troubleshooter - - troubleshooter = Troubleshooter(no_execute=no_execute) - return troubleshooter.start() - - # -------------------------- - - -def _is_ascii(s: str) -> bool: - """Check if a string contains only ASCII characters.""" - try: - s.encode("ascii") - return True - except UnicodeEncodeError: - return False - - -def _normalize_for_lookup(s: str) -> str: - """ - Normalize a string for lookup, handling Latin and non-Latin scripts differently. - - For ASCII/Latin text: casefold for case-insensitive matching (handles accented chars) - For non-Latin text (e.g., 中文): keep unchanged to preserve meaning - - Uses casefold() instead of lower() because: - - casefold() handles accented Latin characters better (e.g., "Español", "Français") - - casefold() is more aggressive and handles edge cases like German ß -> ss - - This prevents issues like: - - "中文".lower() producing the same string but creating duplicate keys - - Meaningless normalization of non-Latin scripts - """ - if _is_ascii(s): - return s.casefold() - # For non-ASCII Latin scripts (accented chars like é, ñ, ü), use casefold - # Only keep unchanged for truly non-Latin scripts (CJK, Arabic, etc.) - try: - # Check if string contains any Latin characters (a-z, A-Z, or accented) - # If it does, it's likely a Latin-based language name - import unicodedata - - has_latin = any(unicodedata.category(c).startswith("L") and ord(c) < 0x3000 for c in s) - if has_latin: - return s.casefold() - except Exception: - pass - return s - - -def _resolve_language_name(name: str) -> str | None: - """ - Resolve a language name or code to a supported language code. - - Accepts: - - Language codes: en, es, fr, de, zh - - English names: English, Spanish, French, German, Chinese - - Native names: Español, Français, Deutsch, 中文 - - Args: - name: Language name or code (case-insensitive for Latin scripts) - - Returns: - Language code if found, None otherwise - - Note: - Non-Latin scripts (e.g., Chinese 中文) are matched exactly without - case normalization, since .lower() is meaningless for these scripts - and could create key collisions. - """ - name = name.strip() - name_normalized = _normalize_for_lookup(name) - - # Direct code match (codes are always ASCII/lowercase) - if name_normalized in SUPPORTED_LANGUAGES: - return name_normalized - - # Build lookup tables for names - # Using a list of tuples to handle potential key collisions properly - name_to_code: dict[str, str] = {} - - for code, info in SUPPORTED_LANGUAGES.items(): - english_name = info["name"] - native_name = info["native"] - - # English names are always ASCII, use casefold for case-insensitive matching - name_to_code[english_name.casefold()] = code - - # Native names: normalize using _normalize_for_lookup - # - Latin scripts (Español, Français): casefold for case-insensitive matching - # - Non-Latin scripts (中文): store as-is only - native_normalized = _normalize_for_lookup(native_name) - name_to_code[native_normalized] = code - - # Also store original native name for exact match - # (handles case where user types exactly "Español" with correct accent) - if native_name != native_normalized: - name_to_code[native_name] = code - - # Try to find a match using normalized input - if name_normalized in name_to_code: - return name_to_code[name_normalized] - - # Try exact match for non-ASCII input - if name in name_to_code: - return name_to_code[name] - - return None - - -def _handle_set_language(language_input: str) -> int: - """ - Handle the --set-language global flag. - - Args: - language_input: Language name or code from user + console.print(f"[{current}/{total}] {status_emoji} {step.description}") - Returns: - Exit code (0 for success, 1 for error) - """ - # Resolve the language name to a code - lang_code = _resolve_language_name(language_input) + coordinator = InstallationCoordinator( + commands=[command], + descriptions=[f"Install {ecosystem_name} packages"], + timeout=600, # 10 minutes for package installation + stop_on_error=True, + progress_callback=progress_callback, + ) - if not lang_code: - # Show error with available options - cx_print(t("language.invalid_code", code=language_input), "error") - console.print() - console.print(f"[bold]{t('language.supported_languages_header')}[/bold]") - for code, info in SUPPORTED_LANGUAGES.items(): - console.print(f" • {info['name']} ({info['native']}) - code: [green]{code}[/green]") - return 1 + result = coordinator.execute() - # Set the language - try: - lang_config = LanguageConfig() - lang_config.set_language(lang_code) + if result.success: + self._print_success(f"{ecosystem_name} packages installed successfully!") + console.print(f"Completed in {result.total_duration:.2f} seconds") + return 0 + else: + self._print_error("Installation failed") + if result.error_message: + console.print(f"Error: {result.error_message}", style="red") + return 1 - # Reset and update global translator - from cortex.i18n.translator import reset_translator + def _execute_multi_install(self, commands: list[dict[str, str]]) -> int: + """Execute multiple install commands.""" + all_commands = [cmd["command"] for cmd in commands] + all_descriptions = [cmd["description"] for cmd in commands] - reset_translator() - set_language(lang_code) + def progress_callback(current: int, total: int, step: InstallationStep) -> None: + status_emoji = "⏳" + if step.status == StepStatus.SUCCESS: + status_emoji = "✅" + elif step.status == StepStatus.FAILED: + status_emoji = "❌" + console.print(f"\n[{current}/{total}] {status_emoji} {step.description}") + console.print(f" Command: {step.command}") - lang_info = SUPPORTED_LANGUAGES[lang_code] - cx_print(t("language.changed", language=lang_info["native"]), "success") - return 0 - except Exception as e: - cx_print(t("language.set_failed", error=str(e)), "error") - return 1 + coordinator = InstallationCoordinator( + commands=all_commands, + descriptions=all_descriptions, + timeout=600, + stop_on_error=True, + progress_callback=progress_callback, + ) - def dashboard(self) -> int: - """Launch the real-time system monitoring dashboard""" - try: - from cortex.dashboard import DashboardApp + console.print("\n[bold]Installing packages...[/bold]") + result = coordinator.execute() - app = DashboardApp() - rc = app.run() - return rc if isinstance(rc, int) else 0 - except ImportError as e: - self._print_error(f"Dashboard dependencies not available: {e}") - cx_print("Install required packages with:", "info") - cx_print(" pip install psutil>=5.9.0 nvidia-ml-py>=12.0.0", "info") - return 1 - except KeyboardInterrupt: + if result.success: + self._print_success("\nAll packages installed successfully!") + console.print(f"Completed in {result.total_duration:.2f} seconds") return 0 - except Exception as e: - self._print_error(f"Dashboard error: {e}") + else: + if result.failed_step is not None: + self._print_error(f"\nInstallation failed at step {result.failed_step + 1}") + else: + self._print_error("\nInstallation failed") + if result.error_message: + console.print(f"Error: {result.error_message}", style="red") return 1 + # -------------------------- -def show_rich_help(): - """Display a beautifully formatted help table using the Rich library. - This function outputs the primary command menu, providing descriptions - for all core Cortex utilities including installation, environment - management, and container tools. - """ +def show_rich_help(): + """Display beautifully formatted help using Rich""" + from rich.table import Table show_banner(show_version=True) console.print() @@ -4572,35 +2451,27 @@ def show_rich_help(): console.print("[dim]Just tell Cortex what you want to install.[/dim]") console.print() - # Initialize a table to display commands with specific column styling + # Commands table table = Table(show_header=True, header_style="bold cyan", box=None) table.add_column("Command", style="green") table.add_column("Description") - # Command Rows table.add_row("ask ", "Ask about your system") - table.add_row("voice", "Voice input mode (F9 to speak)") + table.add_row("ask --do ", "Solve problems (can write/execute)") + table.add_row("do history", "View do-mode run history") table.add_row("demo", "See Cortex in action") table.add_row("wizard", "Configure API key") table.add_row("status", "System status") table.add_row("install ", "Install software") - table.add_row("remove ", "Remove packages with impact analysis") - table.add_row("install --mic", "Install via voice input") table.add_row("import ", "Import deps from package files") table.add_row("history", "View history") table.add_row("rollback ", "Undo installation") - table.add_row("role", "AI-driven system role detection") - table.add_row("stack ", "Install the stack") - table.add_row("dashboard", "Real-time system monitoring dashboard") table.add_row("notify", "Manage desktop notifications") table.add_row("env", "Manage environment variables") table.add_row("cache stats", "Show LLM cache statistics") - table.add_row("docker permissions", "Fix Docker bind-mount permissions") + table.add_row("stack ", "Install the stack") table.add_row("sandbox ", "Test packages in Docker sandbox") - table.add_row("update", "Check for and install updates") - table.add_row("daemon ", "Manage the cortexd background daemon") table.add_row("doctor", "System health check") - table.add_row("troubleshoot", "Interactive system troubleshooter") console.print(table) console.print() @@ -4653,21 +2524,6 @@ def main(): # Network config is optional - don't block execution if it fails console.print(f"[yellow]⚠️ Network auto-config failed: {e}[/yellow]") - # Check for updates on startup (cached, non-blocking) - # Only show notification for commands that aren't 'update' itself - try: - if temp_args.command not in ["update", None] and "--json" not in sys.argv: - update_release = should_notify_update() - if update_release: - console.print( - f"[cyan]🔔 Cortex update available:[/cyan] " - f"[green]{update_release.version}[/green]" - ) - console.print(" [dim]Run 'cortex update' to upgrade[/dim]") - console.print() - except Exception: - pass # Don't block CLI on update check failures - parser = argparse.ArgumentParser( prog="cortex", description="AI-powered Linux command interpreter", @@ -4677,131 +2533,31 @@ def main(): # Global flags parser.add_argument("--version", "-V", action="version", version=f"cortex {VERSION}") parser.add_argument("--verbose", "-v", action="store_true", help="Show detailed output") - parser.add_argument( - "--set-language", - "--language", - dest="set_language", - metavar="LANG", - help="Set display language (e.g., English, Spanish, Español, es, zh)", - ) subparsers = parser.add_subparsers(dest="command", help="Available commands") - # Define the docker command and its associated sub-actions - docker_parser = subparsers.add_parser("docker", help="Docker and container utilities") - docker_subs = docker_parser.add_subparsers(dest="docker_action", help="Docker actions") - - # Add the permissions action to allow fixing file ownership issues - perm_parser = docker_subs.add_parser( - "permissions", help="Fix file permissions from bind mounts" - ) - - # Provide an option to skip the manual confirmation prompt - perm_parser.add_argument("--yes", "-y", action="store_true", help=HELP_SKIP_CONFIRM) - - perm_parser.add_argument( - "--execute", "-e", action="store_true", help="Apply ownership changes (default: dry-run)" - ) - # Demo command - subparsers.add_parser("demo", help="See Cortex in action") - - # Dashboard command - dashboard_parser = subparsers.add_parser( - "dashboard", help="Real-time system monitoring dashboard" - ) + demo_parser = subparsers.add_parser("demo", help="See Cortex in action") # Wizard command - subparsers.add_parser("wizard", help="Configure API key interactively") + wizard_parser = subparsers.add_parser("wizard", help="Configure API key interactively") # Status command (includes comprehensive health checks) subparsers.add_parser("status", help="Show comprehensive system status and health checks") - # Benchmark command - benchmark_parser = subparsers.add_parser("benchmark", help="Run AI performance benchmark") - benchmark_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") - - # Systemd helper command - systemd_parser = subparsers.add_parser("systemd", help="Systemd service helper (plain English)") - systemd_parser.add_argument("service", help="Service name") - systemd_parser.add_argument( - "action", - nargs="?", - default="status", - choices=["status", "diagnose", "deps"], - help="Action: status (default), diagnose, deps", - ) - systemd_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") - - # GPU manager command - gpu_parser = subparsers.add_parser("gpu", help="Hybrid GPU (Optimus) manager") - gpu_parser.add_argument( - "action", - nargs="?", - default="status", - choices=["status", "modes", "switch", "apps"], - help="Action: status (default), modes, switch, apps", - ) - gpu_parser.add_argument( - "mode", nargs="?", help="Mode for switch action (integrated/hybrid/nvidia)" - ) - gpu_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") - - # Printer/Scanner setup command - printer_parser = subparsers.add_parser("printer", help="Printer/Scanner auto-setup") - printer_parser.add_argument( - "action", - nargs="?", - default="status", - choices=["status", "detect"], - help="Action: status (default), detect", - ) - printer_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") - # Ask command ask_parser = subparsers.add_parser("ask", help="Ask a question about your system") - ask_parser.add_argument("question", nargs="?", type=str, help="Natural language question") + ask_parser.add_argument("question", type=str, nargs="?", default=None, help="Natural language question (optional with --do)") + ask_parser.add_argument("--debug", action="store_true", help="Show debug output for agentic loop") ask_parser.add_argument( - "--mic", - action="store_true", - help="Use voice input (press F9 to record)", - ) - - # Voice command - continuous voice mode - voice_parser = subparsers.add_parser( - "voice", help="Voice input mode (F9 to speak, Ctrl+C to exit)" - ) - voice_parser.add_argument( - "--single", - "-s", - action="store_true", - help="Record single input and exit (default: continuous mode)", - ) - voice_parser.add_argument( - "--model", - "-m", - type=str, - default=None, - metavar="MODEL", - choices=[ - "tiny.en", - "base.en", - "small.en", - "medium.en", - "tiny", - "base", - "small", - "medium", - "large", - ], - help="Whisper model to use (default: base.en or CORTEX_WHISPER_MODEL env var). " - "Available models: tiny.en (39MB), base.en (140MB), small.en (466MB), " - "medium.en (1.5GB), tiny/base/small/medium (multilingual), large (6GB).", + "--do", + action="store_true", + help="Enable do mode - Cortex can write, read, and execute commands to solve problems. If no question is provided, starts interactive session." ) # Install command install_parser = subparsers.add_parser("install", help="Install software") - install_parser.add_argument("software", nargs="?", type=str, help="Software to install") + install_parser.add_argument("software", type=str, help="Software to install") install_parser.add_argument("--execute", action="store_true", help="Execute commands") install_parser.add_argument("--dry-run", action="store_true", help="Show commands only") install_parser.add_argument( @@ -4809,57 +2565,6 @@ def main(): action="store_true", help="Enable parallel execution for multi-step installs", ) - install_parser.add_argument( - "--json", - action="store_true", - help="Output as JSON", - ) - install_parser.add_argument( - "--mic", - action="store_true", - help="Use voice input for software name (press F9 to record)", - ) - - # Remove command - uninstall with impact analysis - remove_parser = subparsers.add_parser( - "remove", - help="Remove packages with impact analysis", - description="Analyze and remove packages safely with dependency impact analysis.", - ) - remove_parser.add_argument("package", type=str, help="Package to remove") - remove_parser.add_argument( - "--dry-run", - action="store_true", - default=True, - help="Show impact analysis without removing (default)", - ) - remove_parser.add_argument( - "--execute", - action="store_true", - help="Actually remove the package after analysis", - ) - remove_parser.add_argument( - "--purge", - action="store_true", - help="Also remove configuration files", - ) - remove_parser.add_argument( - "--force", - "-f", - action="store_true", - help="Force removal even if impact is high", - ) - remove_parser.add_argument( - "-y", - "--yes", - action="store_true", - help=HELP_SKIP_CONFIRM, - ) - remove_parser.add_argument( - "--json", - action="store_true", - help="Output impact analysis as JSON", - ) # Import command - import dependencies from package manager files import_parser = subparsers.add_parser( @@ -4920,29 +2625,6 @@ def main(): send_parser.add_argument("--actions", nargs="*", help="Action buttons") # -------------------------- - # Role Management Commands - # This parser defines the primary interface for system personality and contextual sensing. - role_parser = subparsers.add_parser( - "role", help="AI-driven system personality and context management" - ) - role_subs = role_parser.add_subparsers(dest="role_action", help="Role actions") - - # Subcommand: role detect - # Dynamically triggers the sensing layer to analyze system context and suggest roles. - role_subs.add_parser( - "detect", help="Dynamically sense system context and shell patterns to suggest an AI role" - ) - - # Subcommand: role set - # Allows manual override for role persistence and provides tailored recommendations. - role_set_parser = role_subs.add_parser( - "set", help="Manually override the system role and receive tailored recommendations" - ) - role_set_parser.add_argument( - "role_slug", - help="The role identifier (e.g., 'data-scientist', 'web-server', 'ml-workstation')", - ) - # Stack command stack_parser = subparsers.add_parser("stack", help="Manage pre-built package stacks") stack_parser.add_argument( @@ -4959,83 +2641,6 @@ def main(): cache_subs = cache_parser.add_subparsers(dest="cache_action", help="Cache actions") cache_subs.add_parser("stats", help="Show cache statistics") - # --- Config commands (including language settings) --- - config_parser = subparsers.add_parser("config", help="Configure Cortex settings") - config_subs = config_parser.add_subparsers(dest="config_action", help="Configuration actions") - - # config language - set language - config_lang_parser = config_subs.add_parser("language", help="Set display language") - config_lang_parser.add_argument( - "code", - nargs="?", - help="Language code (en, es, fr, de, zh) or 'auto' for auto-detection", - ) - config_lang_parser.add_argument( - "--list", "-l", action="store_true", help="List available languages" - ) - config_lang_parser.add_argument( - "--info", "-i", action="store_true", help="Show current language configuration" - ) - - # config show - show all configuration - config_subs.add_parser("show", help="Show all current configuration") - - # --- Daemon Commands --- - daemon_parser = subparsers.add_parser("daemon", help="Manage the cortexd background daemon") - daemon_subs = daemon_parser.add_subparsers(dest="daemon_action", help="Daemon actions") - - # daemon install [--execute] - daemon_install_parser = daemon_subs.add_parser( - "install", help="Install and enable the daemon service" - ) - daemon_install_parser.add_argument( - "--execute", action="store_true", help="Actually run the installation" - ) - - # daemon uninstall [--execute] - daemon_uninstall_parser = daemon_subs.add_parser( - "uninstall", help="Stop and remove the daemon service" - ) - daemon_uninstall_parser.add_argument( - "--execute", action="store_true", help="Actually run the uninstallation" - ) - - # daemon config - uses config.get IPC handler - daemon_subs.add_parser("config", help="Show current daemon configuration") - - # daemon reload-config - uses config.reload IPC handler - daemon_subs.add_parser("reload-config", help="Reload daemon configuration from disk") - - # daemon version - uses version IPC handler - daemon_subs.add_parser("version", help="Show daemon version") - - # daemon ping - uses ping IPC handler - daemon_subs.add_parser("ping", help="Test daemon connectivity") - - # daemon shutdown - uses shutdown IPC handler - daemon_subs.add_parser("shutdown", help="Request daemon shutdown") - - # daemon run-tests - run daemon test suite - daemon_run_tests_parser = daemon_subs.add_parser( - "run-tests", - help="Run daemon test suite (runs all tests by default when no filters are provided)", - ) - daemon_run_tests_parser.add_argument("--unit", action="store_true", help="Run only unit tests") - daemon_run_tests_parser.add_argument( - "--integration", action="store_true", help="Run only integration tests" - ) - daemon_run_tests_parser.add_argument( - "--test", - "-t", - type=str, - metavar="NAME", - help="Run a specific test (e.g., test_config, test_daemon)", - ) - daemon_run_tests_parser.add_argument( - "--verbose", "-v", action="store_true", help="Show verbose test output" - ) - # -------------------------- - # --- Sandbox Commands (Docker-based package testing) --- sandbox_parser = subparsers.add_parser( "sandbox", help="Test packages in isolated Docker sandbox" @@ -5068,7 +2673,9 @@ def main(): sandbox_promote_parser.add_argument( "--dry-run", action="store_true", help="Show command without executing" ) - sandbox_promote_parser.add_argument("-y", "--yes", action="store_true", help=HELP_SKIP_CONFIRM) + sandbox_promote_parser.add_argument( + "-y", "--yes", action="store_true", help="Skip confirmation prompt" + ) # sandbox cleanup [--force] sandbox_cleanup_parser = sandbox_subs.add_parser("cleanup", help="Remove a sandbox environment") @@ -5081,7 +2688,7 @@ def main(): # sandbox exec sandbox_exec_parser = sandbox_subs.add_parser("exec", help="Execute command in sandbox") sandbox_exec_parser.add_argument("name", help="Sandbox name") - sandbox_exec_parser.add_argument("cmd", nargs="+", help="Command to execute") + sandbox_exec_parser.add_argument("command", nargs="+", help="Command to execute") # -------------------------- # --- Environment Variable Management Commands --- @@ -5174,408 +2781,103 @@ def main(): env_template_apply_parser.add_argument( "--encrypt-keys", help="Comma-separated list of keys to encrypt" ) - - # --- Shell Environment Analyzer Commands --- - # env audit - show all shell variables with sources - env_audit_parser = env_subs.add_parser( - "audit", help="Audit shell environment variables and show their sources" + # --- Info Command (system information queries) --- + info_parser = subparsers.add_parser("info", help="Get system and application information") + info_parser.add_argument("query", nargs="*", help="Information query (natural language)") + info_parser.add_argument( + "--app", "-a", + type=str, + help="Get info about a specific application (nginx, docker, etc.)" ) - env_audit_parser.add_argument( - "--shell", - choices=["bash", "zsh", "fish"], - help="Shell to analyze (default: auto-detect)", + info_parser.add_argument( + "--quick", "-q", + type=str, + choices=["cpu", "memory", "disk", "gpu", "os", "kernel", "network", "dns", + "services", "security", "processes", "environment"], + help="Quick lookup for common info types" ) - env_audit_parser.add_argument( - "--no-system", - action="store_true", - help="Exclude system-wide config files", + info_parser.add_argument( + "--category", "-c", + type=str, + choices=["hardware", "software", "network", "services", "security", + "storage", "performance", "configuration"], + help="Get structured info for a category" ) - env_audit_parser.add_argument( - "--json", + info_parser.add_argument( + "--list", "-l", action="store_true", - help="Output as JSON", + help="List available info types and applications" ) - - # env check - detect conflicts and issues - env_check_parser = env_subs.add_parser( - "check", help="Check for environment variable conflicts and issues" - ) - env_check_parser.add_argument( - "--shell", - choices=["bash", "zsh", "fish"], - help="Shell to check (default: auto-detect)", - ) - - # env path subcommands - env_path_parser = env_subs.add_parser("path", help="Manage PATH entries") - env_path_subs = env_path_parser.add_subparsers(dest="path_action", help="PATH actions") - - # env path list - env_path_list_parser = env_path_subs.add_parser("list", help="List PATH entries with status") - env_path_list_parser.add_argument( - "--json", + info_parser.add_argument( + "--debug", action="store_true", - help="Output as JSON", + help="Show debug output" ) - # env path add [--prepend|--append] [--persist] - env_path_add_parser = env_path_subs.add_parser("add", help="Add a path entry (idempotent)") - env_path_add_parser.add_argument("path", help="Path to add") - env_path_add_parser.add_argument( - "--append", - action="store_true", - help="Append to end of PATH (default: prepend)", - ) - env_path_add_parser.add_argument( - "--persist", - action="store_true", - help="Add to shell config file for persistence", - ) - env_path_add_parser.add_argument( - "--shell", - choices=["bash", "zsh", "fish"], - help="Shell config to modify (default: auto-detect)", - ) + # --- Do Command (manage do-mode runs) --- + do_parser = subparsers.add_parser("do", help="Manage do-mode execution runs") + do_subs = do_parser.add_subparsers(dest="do_action", help="Do actions") - # env path remove [--persist] - env_path_remove_parser = env_path_subs.add_parser("remove", help="Remove a path entry") - env_path_remove_parser.add_argument("path", help="Path to remove") - env_path_remove_parser.add_argument( - "--persist", - action="store_true", - help="Remove from shell config file", - ) - env_path_remove_parser.add_argument( - "--shell", - choices=["bash", "zsh", "fish"], - help="Shell config to modify (default: auto-detect)", - ) + # do history [--limit N] + do_history_parser = do_subs.add_parser("history", help="View do-mode run history") + do_history_parser.add_argument("--limit", "-n", type=int, default=20, help="Number of runs to show") + do_history_parser.add_argument("run_id", nargs="?", help="Show details for specific run ID") - # env path dedupe [--dry-run] [--persist] - env_path_dedupe_parser = env_path_subs.add_parser( - "dedupe", help="Remove duplicate PATH entries" - ) - env_path_dedupe_parser.add_argument( - "--dry-run", - action="store_true", - help="Show what would be removed without making changes", - ) - env_path_dedupe_parser.add_argument( - "--persist", - action="store_true", - help="Generate shell config to persist deduplication", - ) - env_path_dedupe_parser.add_argument( - "--shell", - choices=["bash", "zsh", "fish"], - help="Shell for generated config (default: auto-detect)", - ) + # do setup - setup cortex user + do_subs.add_parser("setup", help="Setup cortex user for privilege management") - # env path clean [--remove-missing] [--dry-run] - env_path_clean_parser = env_path_subs.add_parser( - "clean", help="Clean PATH (remove duplicates and optionally missing paths)" - ) - env_path_clean_parser.add_argument( - "--remove-missing", - action="store_true", - help="Also remove paths that don't exist", - ) - env_path_clean_parser.add_argument( - "--dry-run", - action="store_true", - help="Show what would be cleaned without making changes", - ) - env_path_clean_parser.add_argument( - "--shell", - choices=["bash", "zsh", "fish"], - help="Shell for generated fix script (default: auto-detect)", - ) + # do protected - manage protected paths + do_protected_parser = do_subs.add_parser("protected", help="Manage protected paths") + do_protected_parser.add_argument("--add", help="Add a path to protected list") + do_protected_parser.add_argument("--remove", help="Remove a path from protected list") + do_protected_parser.add_argument("--list", action="store_true", help="List all protected paths") # -------------------------- - # Doctor command - doctor_parser = subparsers.add_parser("doctor", help="System health check") - - # Troubleshoot command - troubleshoot_parser = subparsers.add_parser( - "troubleshoot", help="Interactive system troubleshooter" - ) - troubleshoot_parser.add_argument( - "--no-execute", - action="store_true", - help="Disable automatic command execution (read-only mode)", - ) - # License and upgrade commands - subparsers.add_parser("upgrade", help="Upgrade to Cortex Pro") - subparsers.add_parser("license", help="Show license status") - - activate_parser = subparsers.add_parser("activate", help="Activate a license key") - activate_parser.add_argument("license_key", help="Your license key") - - # --- Update Command --- - update_parser = subparsers.add_parser("update", help="Check for and install Cortex updates") - update_parser.add_argument( - "--channel", - "-c", - choices=["stable", "beta", "dev"], - default="stable", - help="Update channel (default: stable)", - ) - update_subs = update_parser.add_subparsers(dest="update_action", help="Update actions") - - # update check - update_check_parser = update_subs.add_parser("check", help="Check for available updates") - - # update install [version] [--dry-run] - update_install_parser = update_subs.add_parser("install", help="Install available update") - update_install_parser.add_argument( - "version", nargs="?", help="Specific version to install (default: latest)" - ) - update_install_parser.add_argument( - "--dry-run", action="store_true", help="Show what would be updated without installing" - ) - - # update rollback [backup_id] - update_rollback_parser = update_subs.add_parser("rollback", help="Rollback to previous version") - update_rollback_parser.add_argument( - "backup_id", nargs="?", help="Backup ID or version to restore (default: most recent)" - ) - - # update list - update_subs.add_parser("list", help="List available versions") - - # update backups - update_subs.add_parser("backups", help="List available backups for rollback") + # --- Watch Command (terminal monitoring setup) --- + watch_parser = subparsers.add_parser("watch", help="Manage terminal watching for manual intervention mode") + watch_parser.add_argument("--install", action="store_true", help="Install terminal watch hook to .bashrc/.zshrc") + watch_parser.add_argument("--uninstall", action="store_true", help="Remove terminal watch hook from shell configs") + watch_parser.add_argument("--system", action="store_true", help="Install/uninstall system-wide (requires sudo)") + watch_parser.add_argument("--service", action="store_true", help="Install/uninstall as systemd service (recommended)") + watch_parser.add_argument("--status", action="store_true", help="Show terminal watch status") + watch_parser.add_argument("--test", action="store_true", help="Test terminal monitoring") # -------------------------- - # WiFi/Bluetooth Driver Matcher - wifi_parser = subparsers.add_parser("wifi", help="WiFi/Bluetooth driver auto-matcher") - wifi_parser.add_argument( - "action", - nargs="?", - default="status", - choices=["status", "detect", "recommend", "install", "connectivity"], - help="Action to perform (default: status)", - ) - wifi_parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - ) - - # Stdin Piping Support - stdin_parser = subparsers.add_parser("stdin", help="Process piped stdin data") - stdin_parser.add_argument( - "action", - nargs="?", - default="info", - choices=["info", "analyze", "passthrough", "stats"], - help="Action to perform (default: info)", - ) - stdin_parser.add_argument( - "--max-lines", - type=int, - default=1000, - help="Maximum lines to process (default: 1000)", - ) - stdin_parser.add_argument( - "--truncation", - choices=["head", "tail", "middle", "sample"], - default="middle", - help="Truncation mode for large input (default: middle)", - ) - stdin_parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - ) - - # Semantic Version Resolver - deps_parser = subparsers.add_parser("deps", help="Dependency version resolver") - deps_parser.add_argument( - "action", - nargs="?", - default="analyze", - choices=["analyze", "parse", "check", "compare"], - help="Action to perform (default: analyze)", - ) - deps_parser.add_argument( - "packages", - nargs="*", - help="Package constraints (format: pkg:constraint:source)", - ) - deps_parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - ) - - # System Health Score - health_parser = subparsers.add_parser("health", help="System health score and recommendations") - health_parser.add_argument( - "action", - nargs="?", - default="check", - choices=["check", "history", "factors", "quick"], - help="Action to perform (default: check)", - ) - health_parser.add_argument( - "-v", - "--verbose", - action="store_true", - help="Enable verbose output", - ) - args = parser.parse_args() - # Configure logging based on parsed arguments - if getattr(args, "json", False): - logging.getLogger("cortex").setLevel(logging.ERROR) - # Also suppress common SDK loggers - logging.getLogger("anthropic").setLevel(logging.ERROR) - logging.getLogger("openai").setLevel(logging.ERROR) - logging.getLogger("httpcore").setLevel(logging.ERROR) - - # Handle --set-language global flag first (before any command) - if getattr(args, "set_language", None): - result = _handle_set_language(args.set_language) - # Only return early if no command is specified - # This allows: cortex --set-language es install nginx - if not args.command: - return result - # If language setting failed, still return the error - if result != 0: - return result - # Otherwise continue with the command execution - - # The Guard: Check for empty commands before starting the CLI if not args.command: show_rich_help() return 0 - # Initialize the CLI handler cli = CortexCLI(verbose=args.verbose) try: - # Route the command to the appropriate method inside the cli object - if args.command == "docker": - if args.docker_action == "permissions": - return cli.docker_permissions(args) - parser.print_help() - return 1 - if args.command == "demo": return cli.demo() - elif args.command == "dashboard": - return cli.dashboard() elif args.command == "wizard": return cli.wizard() elif args.command == "status": return cli.status() - elif args.command == "benchmark": - return cli.benchmark(verbose=getattr(args, "verbose", False)) - elif args.command == "systemd": - return cli.systemd( - args.service, - action=getattr(args, "action", "status"), - verbose=getattr(args, "verbose", False), - ) - elif args.command == "gpu": - return cli.gpu( - action=getattr(args, "action", "status"), - mode=getattr(args, "mode", None), - verbose=getattr(args, "verbose", False), - ) - elif args.command == "printer": - return cli.printer( - action=getattr(args, "action", "status"), verbose=getattr(args, "verbose", False) - ) - elif args.command == "voice": - model = getattr(args, "model", None) - return cli.voice(continuous=not getattr(args, "single", False), model=model) elif args.command == "ask": - # Handle --mic flag for voice input - if getattr(args, "mic", False): - try: - from cortex.voice import VoiceInputError, VoiceInputHandler - - handler = VoiceInputHandler() - cx_print("Press F9 to speak your question...", "info") - transcript = handler.record_single() - - if not transcript: - cli._print_error("No speech detected") - return 1 - - cx_print(f"Question: {transcript}", "info") - return cli.ask(transcript) - except ImportError: - cli._print_error("Voice dependencies not installed.") - cx_print("Install with: pip install cortex-linux[voice]", "info") - return 1 - except VoiceInputError as e: - cli._print_error(f"Voice input error: {e}") - return 1 - if not args.question: - cli._print_error("Please provide a question or use --mic for voice input") - return 1 - return cli.ask(args.question) + return cli.ask( + getattr(args, "question", None), + debug=args.debug, + do_mode=getattr(args, "do", False) + ) elif args.command == "install": - # Handle --mic flag for voice input - if getattr(args, "mic", False): - handler = None - try: - from cortex.voice import VoiceInputError, VoiceInputHandler - - handler = VoiceInputHandler() - cx_print("Press F9 to speak what you want to install...", "info") - software = handler.record_single() - if not software: - cx_print("No speech detected.", "warning") - return 1 - cx_print(f"Installing: {software}", "info") - except ImportError: - cli._print_error("Voice dependencies not installed.") - cx_print("Install with: pip install cortex-linux[voice]", "info") - return 1 - except VoiceInputError as e: - cli._print_error(f"Voice input error: {e}") - return 1 - finally: - # Always clean up resources - if handler is not None: - try: - handler.stop() - except Exception as e: - # Log cleanup errors but don't raise - logging.debug("Error during voice handler cleanup: %s", e) - else: - software = args.software - if not software: - cli._print_error("Please provide software name or use --mic for voice input") - return 1 return cli.install( - software, + args.software, execute=args.execute, dry_run=args.dry_run, parallel=args.parallel, - json_output=args.json, ) - elif args.command == "remove": - # Handle --execute flag to override default dry-run - if args.execute: - args.dry_run = False - return cli.remove(args) elif args.command == "import": return cli.import_deps(args) elif args.command == "history": return cli.history(limit=args.limit, status=args.status, show_id=args.show_id) elif args.command == "rollback": return cli.rollback(args.id, dry_run=args.dry_run) - elif args.command == "role": - return cli.role(args) + # Handle the new notify command elif args.command == "notify": return cli.notify(args) elif args.command == "stack": @@ -5589,63 +2891,12 @@ def main(): return 1 elif args.command == "env": return cli.env(args) - elif args.command == "doctor": - return cli.doctor() - elif args.command == "troubleshoot": - return cli.troubleshoot( - no_execute=getattr(args, "no_execute", False), - ) - elif args.command == "config": - return cli.config(args) - elif args.command == "upgrade": - from cortex.licensing import open_upgrade_page - - open_upgrade_page() - return 0 - elif args.command == "license": - from cortex.licensing import show_license_status - - show_license_status() - return 0 - elif args.command == "activate": - from cortex.licensing import activate_license - - return 0 if activate_license(args.license_key) else 1 - elif args.command == "update": - return cli.update(args) - elif args.command == "daemon": - return cli.daemon(args) - elif args.command == "wifi": - from cortex.wifi_driver import run_wifi_driver - - return run_wifi_driver( - action=getattr(args, "action", "status"), - verbose=getattr(args, "verbose", False), - ) - elif args.command == "stdin": - from cortex.stdin_handler import run_stdin_handler - - return run_stdin_handler( - action=getattr(args, "action", "info"), - max_lines=getattr(args, "max_lines", 1000), - truncation=getattr(args, "truncation", "middle"), - verbose=getattr(args, "verbose", False), - ) - elif args.command == "deps": - from cortex.semver_resolver import run_semver_resolver - - return run_semver_resolver( - action=getattr(args, "action", "analyze"), - packages=getattr(args, "packages", None), - verbose=getattr(args, "verbose", False), - ) - elif args.command == "health": - from cortex.health_score import run_health_check - - return run_health_check( - action=getattr(args, "action", "check"), - verbose=getattr(args, "verbose", False), - ) + elif args.command == "do": + return cli.do_cmd(args) + elif args.command == "info": + return cli.info_cmd(args) + elif args.command == "watch": + return cli.watch_cmd(args) else: parser.print_help() return 1 @@ -5655,6 +2906,13 @@ def main(): except (ValueError, ImportError, OSError) as e: print(f"❌ Error: {e}", file=sys.stderr) return 1 + except AttributeError as e: + # Internal errors - show friendly message + print("❌ Something went wrong. Please try again.", file=sys.stderr) + if "--verbose" in sys.argv or "-v" in sys.argv: + import traceback + traceback.print_exc() + return 1 except Exception as e: print(f"❌ Unexpected error: {e}", file=sys.stderr) # Print traceback if verbose mode was requested diff --git a/cortex/demo.py b/cortex/demo.py index 730d4805e..6fd487eeb 100644 --- a/cortex/demo.py +++ b/cortex/demo.py @@ -1,601 +1,148 @@ -""" -Cortex Interactive Demo -Interactive 5-minute tutorial showcasing all major Cortex features -""" +"""Interactive demo for Cortex Linux.""" -import secrets +import subprocess import sys -import time -from datetime import datetime, timedelta from rich.console import Console from rich.panel import Panel -from rich.table import Table +from rich.prompt import Prompt, Confirm +from rich.markdown import Markdown from cortex.branding import show_banner -from cortex.hardware_detection import SystemInfo, detect_hardware - - -class CortexDemo: - """Interactive Cortex demonstration""" - - def __init__(self) -> None: - self.console = Console() - self.hw: SystemInfo | None = None - self.is_interactive = sys.stdin.isatty() - self.installation_id = self._generate_id() - - def clear_screen(self) -> None: - """Clears the terminal screen""" - self.console.clear() - - def _generate_id(self) -> str: - """Generate a fake installation ID for demo""" - return secrets.token_hex(8) - - def _generate_past_date(self, days_ago: int, hours: int = 13, minutes: int = 11) -> str: - """Generate a date string for N days ago""" - past = datetime.now() - timedelta(days=days_ago) - past = past.replace(hour=hours, minute=minutes, second=51) - return past.strftime("%Y-%m-%d %H:%M:%S") - - def _is_gpu_vendor(self, model: str, keywords: list[str]) -> bool: - """Check if GPU model matches any vendor keywords.""" - model_upper = str(model).upper() - return any(kw in model_upper for kw in keywords) - - def run(self) -> int: - """Main demo entry point""" - try: - self.clear_screen() - show_banner() - - self.console.print("\n[bold cyan]🎬 Cortex Interactive Demo[/bold cyan]") - self.console.print("[dim]Learn Cortex by typing real commands (~5 minutes)[/dim]\n") - - intro_text = """ -Cortex is an AI-powered universal package manager that: - - • 🧠 [cyan]Understands natural language[/cyan] - No exact syntax needed - • 🔍 [cyan]Plans before installing[/cyan] - Shows you what it will do first - • 🔒 [cyan]Checks hardware compatibility[/cyan] - Prevents bad installs - • 📦 [cyan]Works with all package managers[/cyan] - apt, brew, npm, pip... - • 🎯 [cyan]Smart stacks[/cyan] - Pre-configured tool bundles - • 🔄 [cyan]Safe rollback[/cyan] - Undo any installation - -[bold]This is interactive - you'll type real commands![/bold] -[dim](Just type commands as shown - any input works for learning!)[/dim] - """ - - self.console.print(Panel(intro_text, border_style="cyan")) - - if not self._wait_for_user("\nPress Enter to start..."): - return 0 - - # Detect hardware for smart demos - self.hw = detect_hardware() - - # Run all sections (now consolidated to 3) - sections = [ - ("AI Intelligence & Understanding", self._section_ai_intelligence), - ("Smart Stacks & Workflows", self._section_smart_stacks), - ("History & Safety Features", self._section_history_safety), - ] - - for i, (name, section_func) in enumerate(sections, 1): - self.clear_screen() - self.console.print(f"\n[dim]━━━ Section {i} of {len(sections)}: {name} ━━━[/dim]\n") - - if not section_func(): - self.console.print( - "\n[yellow]Demo interrupted. Thanks for trying Cortex![/yellow]" - ) - return 1 - - # Show finale - self.clear_screen() - self._show_finale() - - return 0 - - except (KeyboardInterrupt, EOFError): - self.console.print( - "\n\n[yellow]Demo interrupted. Thank you for trying Cortex![/yellow]" - ) - return 1 - - def _wait_for_user(self, message: str = "\nPress Enter to continue...") -> bool: - """Wait for user input""" - try: - if self.is_interactive: - self.console.print(f"[dim]{message}[/dim]") - input() - else: - time.sleep(2) # Auto-advance in non-interactive mode - return True - except (KeyboardInterrupt, EOFError): - return False - - def _prompt_command(self, command: str) -> bool: - """ - Prompt user to type a command. - Re-prompts on empty input to ensure user provides something. - """ - try: - if self.is_interactive: - while True: - self.console.print(f"\n[yellow]Try:[/yellow] [bold]{command}[/bold]") - self.console.print("\n[bold green]$[/bold green] ", end="") - user_input = input() - - # If empty, re-prompt and give hint - if not user_input.strip(): - self.console.print( - "[dim]Type the command above or anything else to continue[/dim]" - ) - continue - - break - - self.console.print("[green]✓[/green] [dim]Let's see what Cortex does...[/dim]\n") - else: - self.console.print(f"\n[yellow]Command:[/yellow] [bold]{command}[/bold]\n") - time.sleep(1) - - return True - except (KeyboardInterrupt, EOFError): - return False - - def _simulate_cortex_output(self, packages: list[str], show_execution: bool = False) -> None: - """Simulate real Cortex output with CX branding""" - - # Understanding phase - with self.console.status("[cyan]CX[/cyan] Understanding request...", spinner="dots"): - time.sleep(0.8) - - # Planning phase - with self.console.status("[cyan]CX[/cyan] Planning installation...", spinner="dots"): - time.sleep(1.0) - - pkg_str = " ".join(packages) - self.console.print(f" [cyan]CX[/cyan] │ Installing {pkg_str}...\n") - time.sleep(0.5) - - # Show generated commands - self.console.print("[bold]Generated commands:[/bold]") - self.console.print(" 1. [dim]sudo apt update[/dim]") - - for i, pkg in enumerate(packages, 2): - self.console.print(f" {i}. [dim]sudo apt install -y {pkg}[/dim]") - - if not show_execution: - self.console.print( - "\n[yellow]To execute these commands, run with --execute flag[/yellow]" - ) - self.console.print("[dim]Example: cortex install docker --execute[/dim]\n") - else: - # Simulate execution - self.console.print("\n[cyan]Executing commands...[/cyan]\n") - time.sleep(0.5) - - total_steps = len(packages) + 1 - for step in range(1, total_steps + 1): - self.console.print(f"[{step}/{total_steps}] ⏳ Step {step}") - if step == 1: - self.console.print(" Command: [dim]sudo apt update[/dim]") - else: - self.console.print( - f" Command: [dim]sudo apt install -y {packages[step - 2]}[/dim]" - ) - time.sleep(0.8) - self.console.print() - - self.console.print( - f" [cyan]CX[/cyan] [green]✓[/green] {pkg_str} installed successfully!\n" - ) - - # Show installation ID - self.console.print(f"📝 Installation recorded (ID: {self.installation_id})") - self.console.print( - f" To rollback: [cyan]cortex rollback {self.installation_id}[/cyan]\n" - ) - - def _section_ai_intelligence(self) -> bool: - """Section 1: AI Intelligence - NLP, Planning, and Hardware Awareness""" - self.console.print("[bold cyan]🧠 AI Intelligence & Understanding[/bold cyan]\n") - - # Part 1: Natural Language Understanding - self.console.print("[bold]Part 1: Natural Language Understanding[/bold]") - self.console.print( - "Cortex understands what you [italic]mean[/italic], not just exact syntax." - ) - self.console.print("Ask questions in plain English:\n") - - if not self._prompt_command('cortex ask "I need tools for Python web development"'): - return False - - # Simulate AI response - with self.console.status("[cyan]CX[/cyan] Understanding your request...", spinner="dots"): - time.sleep(1.0) - with self.console.status("[cyan]CX[/cyan] Analyzing requirements...", spinner="dots"): - time.sleep(1.2) - - self.console.print(" [cyan]CX[/cyan] [green]✓[/green] [dim]Recommendations ready[/dim]\n") - time.sleep(0.5) - - # Show AI response - response = """For Python web development on your system, here are the essential tools: - -[bold]Web Frameworks:[/bold] - • [cyan]FastAPI[/cyan] - Modern, fast framework with automatic API documentation - • [cyan]Flask[/cyan] - Lightweight, flexible microframework - • [cyan]Django[/cyan] - Full-featured framework with ORM and admin interface - -[bold]Development Tools:[/bold] - • [cyan]uvicorn[/cyan] - ASGI server for FastAPI - • [cyan]gunicorn[/cyan] - WSGI server for production - • [cyan]python3-venv[/cyan] - Virtual environments - -Install a complete stack with: [cyan]cortex stack webdev[/cyan] - """ - - self.console.print(Panel(response, border_style="cyan", title="AI Response")) - self.console.print() - - self.console.print("[bold green]💡 Key Feature:[/bold green]") - self.console.print( - "Cortex's AI [bold]understands intent[/bold] and provides smart recommendations.\n" - ) - - if not self._wait_for_user(): - return False - - # Part 2: Smart Planning - self.console.print("\n[bold]Part 2: Transparent Planning[/bold]") - self.console.print("Let's install Docker and Node.js together.") - self.console.print("[dim]Cortex will show you the plan before executing anything.[/dim]") - - if not self._prompt_command('cortex install "docker nodejs"'): - return False - - # Simulate the actual output - self._simulate_cortex_output(["docker.io", "nodejs"], show_execution=False) - - self.console.print("[bold green]🔒 Transparency & Safety:[/bold green]") - self.console.print( - "Cortex [bold]shows you exactly what it will do[/bold] before making any changes." - ) - self.console.print("[dim]No surprises, no unwanted modifications to your system.[/dim]\n") - - if not self._wait_for_user(): - return False - - # Part 3: Hardware-Aware Intelligence - self.console.print("\n[bold]Part 3: Hardware-Aware Intelligence[/bold]") - self.console.print( - "Cortex detects your hardware and prevents incompatible installations.\n" - ) - - # Detect GPU (check both dedicated and integrated) - gpu = getattr(self.hw, "gpu", None) if self.hw else None - gpu_info = gpu[0] if (gpu and len(gpu) > 0) else None - # Check for NVIDIA - nvidia_keywords = ["NVIDIA", "GTX", "RTX", "GEFORCE", "QUADRO", "TESLA"] - has_nvidia = gpu_info and self._is_gpu_vendor(gpu_info.model, nvidia_keywords) +console = Console() - # Check for AMD (dedicated or integrated Radeon) - amd_keywords = ["AMD", "RADEON", "RENOIR", "VEGA", "NAVI", "RX "] - has_amd = gpu_info and self._is_gpu_vendor(gpu_info.model, amd_keywords) - if has_nvidia: - # NVIDIA GPU - show successful CUDA install - self.console.print(f"[cyan]Detected GPU:[/cyan] {gpu_info.model}") - self.console.print("Let's install CUDA for GPU acceleration:") +def _run_cortex_command(args: list[str], capture: bool = False) -> tuple[int, str]: + """Run a cortex command and return exit code and output.""" + cmd = ["cortex"] + args + if capture: + result = subprocess.run(cmd, capture_output=True, text=True) + return result.returncode, result.stdout + result.stderr + else: + result = subprocess.run(cmd) + return result.returncode, "" - if not self._prompt_command("cortex install cuda"): - return False - with self.console.status("[cyan]CX[/cyan] Understanding request...", spinner="dots"): - time.sleep(0.8) - with self.console.status( - "[cyan]CX[/cyan] Checking hardware compatibility...", spinner="dots" - ): - time.sleep(1.0) +def _wait_for_enter(): + """Wait for user to press enter.""" + console.print("\n[dim]Press Enter to continue...[/dim]") + input() - self.console.print( - " [cyan]CX[/cyan] [green]✓[/green] NVIDIA GPU detected - CUDA compatible!\n" - ) - time.sleep(0.5) - self.console.print("[bold]Generated commands:[/bold]") - self.console.print(" 1. [dim]sudo apt update[/dim]") - self.console.print(" 2. [dim]sudo apt install -y nvidia-cuda-toolkit[/dim]\n") - - self.console.print( - "[green]✅ Perfect! CUDA will work great on your NVIDIA GPU.[/green]\n" - ) - - elif has_amd: - # AMD GPU - show Cortex catching the mistake - self.console.print(f"[cyan]Detected GPU:[/cyan] {gpu_info.model}") - self.console.print("Let's try to install CUDA...") - - if not self._prompt_command("cortex install cuda"): - return False - - with self.console.status("[cyan]CX[/cyan] Understanding request...", spinner="dots"): - time.sleep(0.8) - with self.console.status( - "[cyan]CX[/cyan] Checking hardware compatibility...", spinner="dots" - ): - time.sleep(1.2) - - self.console.print("\n[yellow]⚠️ Hardware Compatibility Warning:[/yellow]") - time.sleep(0.8) - self.console.print(f"[cyan]Your GPU:[/cyan] {gpu_info.model}") - self.console.print("[red]NVIDIA CUDA will not work on AMD hardware![/red]\n") - time.sleep(1.0) - - self.console.print( - "[cyan]🤖 Cortex suggests:[/cyan] Install ROCm instead (AMD's GPU framework)" - ) - time.sleep(0.8) - self.console.print("\n[bold]Recommended alternative:[/bold]") - self.console.print(" [cyan]cortex install rocm[/cyan]\n") - - self.console.print("[green]✅ Cortex prevented an incompatible installation![/green]\n") - - else: - # No GPU - show Python dev tools - self.console.print("[cyan]No dedicated GPU detected - CPU mode[/cyan]") - self.console.print("Let's install Python development tools:") - - if not self._prompt_command("cortex install python-dev"): - return False - - with self.console.status("[cyan]CX[/cyan] Understanding request...", spinner="dots"): - time.sleep(0.8) - with self.console.status("[cyan]CX[/cyan] Planning installation...", spinner="dots"): - time.sleep(1.0) - - self.console.print("[bold]Generated commands:[/bold]") - self.console.print(" 1. [dim]sudo apt update[/dim]") - self.console.print(" 2. [dim]sudo apt install -y python3-dev[/dim]") - self.console.print(" 3. [dim]sudo apt install -y python3-pip[/dim]") - self.console.print(" 4. [dim]sudo apt install -y python3-venv[/dim]\n") - - self.console.print("[bold green]💡 The Difference:[/bold green]") - self.console.print("Traditional package managers install whatever you ask for.") - self.console.print( - "Cortex [bold]checks compatibility FIRST[/bold] and prevents problems!\n" - ) - - return self._wait_for_user() - - def _section_smart_stacks(self) -> bool: - """Section 2: Smart Stacks & Complete Workflows""" - self.console.print("[bold cyan]📚 Smart Stacks - Complete Workflows[/bold cyan]\n") - - self.console.print("Stacks are pre-configured bundles of tools for common workflows.") - self.console.print("Install everything you need with one command.\n") - - # List stacks - if not self._prompt_command("cortex stack --list"): - return False - - self.console.print() # Visual spacing before stacks table - - # Show stacks table - stacks_table = Table(title="📦 Available Stacks", show_header=True) - stacks_table.add_column("Stack", style="cyan", width=12) - stacks_table.add_column("Description", style="white", width=22) - stacks_table.add_column("Packages", style="dim", width=35) - - stacks_table.add_row("ml", "Machine Learning (GPU)", "PyTorch, CUDA, Jupyter, pandas...") - stacks_table.add_row("ml-cpu", "Machine Learning (CPU)", "PyTorch CPU-only version") - stacks_table.add_row("webdev", "Web Development", "Node, npm, nginx, postgres") - stacks_table.add_row("devops", "DevOps Tools", "Docker, kubectl, terraform, ansible") - stacks_table.add_row("data", "Data Science", "Python, pandas, jupyter, postgres") - - self.console.print(stacks_table) - self.console.print( - "\n [cyan]CX[/cyan] │ Use: [cyan]cortex stack [/cyan] to install a stack\n" - ) - - if not self._wait_for_user(): - return False - - # Install webdev stack - self.console.print("\nLet's install the Web Development stack:") - - if not self._prompt_command("cortex stack webdev"): - return False - - self.console.print(" [cyan]CX[/cyan] [green]✓[/green] ") - self.console.print("🚀 Installing stack: [bold]Web Development[/bold]\n") - - # Simulate full stack installation - self._simulate_cortex_output(["nodejs", "npm", "nginx", "postgresql"], show_execution=True) - - self.console.print(" [cyan]CX[/cyan] [green]✓[/green] ") - self.console.print("[green]✅ Stack 'Web Development' installed successfully![/green]") - self.console.print("[green]Installed 4 packages[/green]\n") - - self.console.print("[bold green]💡 Benefit:[/bold green]") - self.console.print( - "One command sets up your [bold]entire development environment[/bold].\n" - ) - - self.console.print("\n[cyan]💡 Tip:[/cyan] Create custom stacks for your team's workflow!") - self.console.print(' [dim]cortex stack create "mystack" package1 package2...[/dim]\n') - - return self._wait_for_user() - - def _section_history_safety(self) -> bool: - """Section 3: History Tracking & Safety Features""" - self.console.print("[bold cyan]🔒 History & Safety Features[/bold cyan]\n") - - # Part 1: Installation History - self.console.print("[bold]Part 1: Installation History[/bold]") - self.console.print("Cortex keeps a complete record of all installations.") - self.console.print("Review what you've installed anytime:\n") - - if not self._prompt_command("cortex history"): - return False - - self.console.print() - - # Show history table - history_table = Table(show_header=True) - history_table.add_column("ID", style="dim", width=18) - history_table.add_column("Date", style="cyan", width=20) - history_table.add_column("Operation", style="white", width=12) - history_table.add_column("Packages", style="yellow", width=25) - history_table.add_column("Status", style="green", width=10) - - history_table.add_row( - self.installation_id, - self._generate_past_date(0), - "install", - "nginx, nodejs +2", - "success", - ) - history_table.add_row( - self._generate_id(), - self._generate_past_date(1, 13, 13), - "install", - "docker", - "success", - ) - history_table.add_row( - self._generate_id(), - self._generate_past_date(1, 14, 25), - "install", - "python3-dev", - "success", - ) - history_table.add_row( - self._generate_id(), - self._generate_past_date(2, 18, 29), - "install", - "postgresql", - "success", - ) - - self.console.print(history_table) - self.console.print() - - self.console.print("[bold green]💡 Tracking Feature:[/bold green]") - self.console.print( - "Every installation is tracked. You can [bold]review or undo[/bold] any operation.\n" - ) - - if not self._wait_for_user(): - return False - - # Part 2: Rollback Functionality - self.console.print("\n[bold]Part 2: Safe Rollback[/bold]") - self.console.print("Made a mistake? Installed something wrong?") - self.console.print("Cortex can [bold]roll back any installation[/bold].\n") - - self.console.print( - f"Let's undo our webdev stack installation (ID: {self.installation_id}):" - ) - - if not self._prompt_command(f"cortex rollback {self.installation_id}"): - return False - - self.console.print() - with self.console.status("[cyan]CX[/cyan] Loading installation record...", spinner="dots"): - time.sleep(0.8) - with self.console.status("[cyan]CX[/cyan] Planning rollback...", spinner="dots"): - time.sleep(1.0) - with self.console.status("[cyan]CX[/cyan] Removing packages...", spinner="dots"): - time.sleep(1.2) - - rollback_id = self._generate_id() - self.console.print( - f" [cyan]CX[/cyan] [green]✓[/green] Rollback successful (ID: {rollback_id})\n" - ) - - self.console.print( - "[green]✅ All packages from that installation have been removed.[/green]\n" - ) - - self.console.print("[bold green]💡 Peace of Mind:[/bold green]") - self.console.print( - "Try anything fearlessly - you can always [bold]roll back[/bold] to a clean state.\n" - ) - - return self._wait_for_user() - - def _show_finale(self) -> None: - """Show finale with comparison table and next steps""" - self.console.print("\n" + "=" * 70) - self.console.print( - "[bold green]🎉 Demo Complete - You've Mastered Cortex Basics![/bold green]" - ) - self.console.print("=" * 70 + "\n") - - # Show comparison table (THE WOW FACTOR) - self.console.print("\n[bold]Why Cortex is Different:[/bold]\n") - - comparison_table = Table( - title="Cortex vs Traditional Package Managers", show_header=True, border_style="cyan" - ) - comparison_table.add_column("Feature", style="cyan", width=20) - comparison_table.add_column("Traditional (apt/brew)", style="yellow", width=25) - comparison_table.add_column("Cortex", style="green", width=25) - - comparison_table.add_row("Planning", "Installs immediately", "Shows plan first") - comparison_table.add_row("Search", "Exact string match", "Semantic/Intent based") - comparison_table.add_row( - "Hardware Aware", "Installs anything", "Checks compatibility first" - ) - comparison_table.add_row("Natural Language", "Strict syntax only", "AI understands intent") - comparison_table.add_row("Stacks", "Manual script creation", "One-command workflows") - comparison_table.add_row("Safety", "Manual backups", "Automatic rollback") - comparison_table.add_row("Multi-Manager", "Choose apt/brew/npm", "One tool, all managers") - - self.console.print(comparison_table) - self.console.print() - - # Key takeaways - summary = """ -[bold]What You've Learned:[/bold] - - ✓ [cyan]AI-Powered Understanding[/cyan] - Natural language queries - ✓ [cyan]Transparent Planning[/cyan] - See commands before execution - ✓ [cyan]Hardware-Aware[/cyan] - Prevents incompatible installations - ✓ [cyan]Smart Stacks[/cyan] - Complete workflows in one command - ✓ [cyan]Full History[/cyan] - Track every installation - ✓ [cyan]Safe Rollback[/cyan] - Undo anything, anytime - -[bold cyan]Ready to use Cortex?[/bold cyan] - -Essential commands: - $ [cyan]cortex wizard[/cyan] # Configure your API key (recommended first step!) - $ [cyan]cortex install "package"[/cyan] # Install packages - $ [cyan]cortex ask "question"[/cyan] # Get AI recommendations - $ [cyan]cortex stack --list[/cyan] # See available stacks - $ [cyan]cortex stack [/cyan] # Install a complete stack - $ [cyan]cortex history[/cyan] # View installation history - $ [cyan]cortex rollback [/cyan] # Undo an installation - $ [cyan]cortex doctor[/cyan] # Check system health - $ [cyan]cortex --help[/cyan] # See all commands - -[dim]GitHub: github.com/cortexlinux/cortex[/dim] - """ - - self.console.print(Panel(summary, border_style="green", title="🚀 Next Steps")) - self.console.print("\n[bold]Thank you for trying Cortex! Happy installing! 🎉[/bold]\n") +def _section(title: str, problem: str): + """Display a compact section header.""" + console.print(f"\n[bold cyan]{'─' * 50}[/bold cyan]") + console.print(f"[bold white]{title}[/bold white]") + console.print(f"[dim]{problem}[/dim]\n") def run_demo() -> int: - """ - Entry point for the interactive Cortex demo. - Teaches users Cortex through hands-on practice. - """ - demo = CortexDemo() - return demo.run() + """Run the interactive Cortex demo.""" + console.clear() + show_banner() + + # ───────────────────────────────────────────────────────────────── + # INTRODUCTION + # ───────────────────────────────────────────────────────────────── + + intro = """ +**Cortex** - The AI-native package manager for Linux. + +In this demo you'll try: +• **Ask** - Query your system in natural language +• **Install** - Install packages with AI interpretation +• **Rollback** - Undo installations safely +""" + console.print(Panel(Markdown(intro), title="[cyan]Demo[/cyan]", border_style="cyan")) + _wait_for_enter() + + # ───────────────────────────────────────────────────────────────── + # ASK COMMAND + # ───────────────────────────────────────────────────────────────── + + _section( + "🔍 Ask Command", + "Query your system without memorizing Linux commands." + ) + + console.print("[dim]Examples: 'What Python version?', 'How much disk space?'[/dim]\n") + + user_question = Prompt.ask( + "[cyan]What would you like to ask?[/cyan]", + default="What version of Python is installed?" + ) + + console.print(f"\n[yellow]$[/yellow] cortex ask \"{user_question}\"\n") + _run_cortex_command(["ask", user_question]) + + _wait_for_enter() + + # ───────────────────────────────────────────────────────────────── + # INSTALL COMMAND + # ───────────────────────────────────────────────────────────────── + + _section( + "📦 Install Command", + "Describe what you want - Cortex finds the right packages." + ) + + console.print("[dim]Examples: 'a web server', 'python dev tools', 'docker'[/dim]\n") + + user_install = Prompt.ask( + "[cyan]What would you like to install?[/cyan]", + default="a lightweight text editor" + ) + + console.print(f"\n[yellow]$[/yellow] cortex install \"{user_install}\" --dry-run\n") + _run_cortex_command(["install", user_install, "--dry-run"]) + + console.print() + if Confirm.ask("Actually install this?", default=False): + console.print(f"\n[yellow]$[/yellow] cortex install \"{user_install}\" --execute\n") + _run_cortex_command(["install", user_install, "--execute"]) + + _wait_for_enter() + + # ───────────────────────────────────────────────────────────────── + # ROLLBACK COMMAND + # ───────────────────────────────────────────────────────────────── + + _section( + "⏪ Rollback Command", + "Undo any installation by reverting to the previous state." + ) + + console.print("[dim]First, let's see your installation history with IDs:[/dim]\n") + console.print("[yellow]$[/yellow] cortex history --limit 5\n") + _run_cortex_command(["history", "--limit", "5"]) + + _wait_for_enter() + + if Confirm.ask("Preview a rollback?", default=False): + console.print("\n[cyan]Copy an installation ID from the history above:[/cyan]") + console.print("[dim]$ cortex rollback [/dim]", end="") + rollback_id = input().strip() + + if rollback_id: + console.print(f"\n[yellow]$[/yellow] cortex rollback {rollback_id} --dry-run\n") + _run_cortex_command(["rollback", rollback_id, "--dry-run"]) + + if Confirm.ask("Actually rollback?", default=False): + console.print(f"\n[yellow]$[/yellow] cortex rollback {rollback_id}\n") + _run_cortex_command(["rollback", rollback_id]) + + # ───────────────────────────────────────────────────────────────── + # SUMMARY + # ───────────────────────────────────────────────────────────────── + + console.print(f"\n[bold cyan]{'─' * 50}[/bold cyan]") + console.print("[bold green]✓ Demo Complete![/bold green]\n") + console.print("[dim]Commands: ask, install, history, rollback, stack, status[/dim]") + console.print("[dim]Run 'cortex --help' for more.[/dim]\n") + + return 0 + + +if __name__ == "__main__": + sys.exit(run_demo()) diff --git a/cortex/do_runner.py b/cortex/do_runner.py new file mode 100644 index 000000000..e5e2cea3f --- /dev/null +++ b/cortex/do_runner.py @@ -0,0 +1,63 @@ +"""Do Runner Module for Cortex. + +This file provides backward compatibility by re-exporting all classes +from the modular do_runner package. + +For new code, prefer importing directly from the package: + from cortex.do_runner import DoHandler, CommandStatus, etc. +""" + +# Re-export everything from the modular package +from cortex.do_runner import ( + # Models + CommandLog, + CommandStatus, + DoRun, + RunMode, + TaskNode, + TaskTree, + TaskType, + # Database + DoRunDatabase, + # Managers + CortexUserManager, + ProtectedPathsManager, + # Terminal + TerminalMonitor, + # Executor + TaskTreeExecutor, + # Diagnosis + AutoFixer, + ErrorDiagnoser, + # Verification + ConflictDetector, + FileUsefulnessAnalyzer, + VerificationRunner, + # Handler + DoHandler, + get_do_handler, + setup_cortex_user, +) + +__all__ = [ + "CommandLog", + "CommandStatus", + "DoRun", + "RunMode", + "TaskNode", + "TaskTree", + "TaskType", + "DoRunDatabase", + "CortexUserManager", + "ProtectedPathsManager", + "TerminalMonitor", + "TaskTreeExecutor", + "AutoFixer", + "ErrorDiagnoser", + "ConflictDetector", + "FileUsefulnessAnalyzer", + "VerificationRunner", + "DoHandler", + "get_do_handler", + "setup_cortex_user", +] diff --git a/cortex/do_runner/Untitled b/cortex/do_runner/Untitled new file mode 100644 index 000000000..597a6db29 --- /dev/null +++ b/cortex/do_runner/Untitled @@ -0,0 +1 @@ +i \ No newline at end of file diff --git a/cortex/do_runner/__init__.py b/cortex/do_runner/__init__.py new file mode 100644 index 000000000..906fd1883 --- /dev/null +++ b/cortex/do_runner/__init__.py @@ -0,0 +1,129 @@ +""" +Do Runner Module for Cortex. + +Enables the ask command to write, read, and execute commands to solve problems. +Manages privilege escalation, command logging, and user confirmation flows. + +This module is organized into the following submodules: +- models: Data classes and enums (CommandStatus, RunMode, TaskType, etc.) +- database: DoRunDatabase for storing run history +- managers: CortexUserManager, ProtectedPathsManager +- terminal: TerminalMonitor for watching terminal activity +- executor: TaskTreeExecutor for advanced command execution +- diagnosis: ErrorDiagnoser, AutoFixer for error handling +- verification: ConflictDetector, VerificationRunner, FileUsefulnessAnalyzer +- handler: Main DoHandler class +""" + +from .models import ( + CommandLog, + CommandStatus, + DoRun, + RunMode, + TaskNode, + TaskTree, + TaskType, +) + +from .database import DoRunDatabase + +from .managers import ( + CortexUserManager, + ProtectedPathsManager, +) + +from .terminal import TerminalMonitor + +from .executor import TaskTreeExecutor + +from .diagnosis import ( + AutoFixer, + ErrorDiagnoser, + LoginHandler, + LoginRequirement, + LOGIN_REQUIREMENTS, + UBUNTU_PACKAGE_MAP, + UBUNTU_SERVICE_MAP, + ALL_ERROR_PATTERNS, + get_error_category, + get_severity, + is_critical_error, +) + +# New structured diagnosis engine +from .diagnosis_v2 import ( + DiagnosisEngine, + ErrorCategory, + DiagnosisResult, + FixCommand, + FixPlan, + VariableResolution, + ExecutionResult, + ErrorStackEntry, + ERROR_PATTERNS, + get_diagnosis_engine, +) + +from .verification import ( + ConflictDetector, + FileUsefulnessAnalyzer, + VerificationRunner, +) + +from .handler import ( + DoHandler, + get_do_handler, + setup_cortex_user, +) + +__all__ = [ + # Models + "CommandLog", + "CommandStatus", + "DoRun", + "RunMode", + "TaskNode", + "TaskTree", + "TaskType", + # Database + "DoRunDatabase", + # Managers + "CortexUserManager", + "ProtectedPathsManager", + # Terminal + "TerminalMonitor", + # Executor + "TaskTreeExecutor", + # Diagnosis (legacy) + "AutoFixer", + "ErrorDiagnoser", + "LoginHandler", + "LoginRequirement", + "LOGIN_REQUIREMENTS", + "UBUNTU_PACKAGE_MAP", + "UBUNTU_SERVICE_MAP", + "ALL_ERROR_PATTERNS", + "get_error_category", + "get_severity", + "is_critical_error", + # Diagnosis v2 (structured) + "DiagnosisEngine", + "ErrorCategory", + "DiagnosisResult", + "FixCommand", + "FixPlan", + "VariableResolution", + "ExecutionResult", + "ErrorStackEntry", + "ERROR_PATTERNS", + "get_diagnosis_engine", + # Verification + "ConflictDetector", + "FileUsefulnessAnalyzer", + "VerificationRunner", + # Handler + "DoHandler", + "get_do_handler", + "setup_cortex_user", +] + diff --git a/cortex/do_runner/database.py b/cortex/do_runner/database.py new file mode 100644 index 000000000..b153fe1da --- /dev/null +++ b/cortex/do_runner/database.py @@ -0,0 +1,478 @@ +"""Database module for storing do run history.""" + +import datetime +import hashlib +import json +import os +import sqlite3 +from pathlib import Path +from typing import Any + +from rich.console import Console + +from .models import CommandLog, CommandStatus, DoRun, RunMode + +console = Console() + + +class DoRunDatabase: + """SQLite database for storing do run history.""" + + def __init__(self, db_path: Path | None = None): + self.db_path = db_path or Path.home() / ".cortex" / "do_runs.db" + self._ensure_directory() + self._init_db() + + def _ensure_directory(self): + """Ensure the database directory exists with proper permissions.""" + try: + self.db_path.parent.mkdir(parents=True, exist_ok=True) + if not os.access(self.db_path.parent, os.W_OK): + raise OSError(f"Directory {self.db_path.parent} is not writable") + except OSError: + alt_path = Path("/tmp") / ".cortex" / "do_runs.db" + alt_path.parent.mkdir(parents=True, exist_ok=True) + self.db_path = alt_path + console.print(f"[yellow]Warning: Using alternate database path: {self.db_path}[/yellow]") + + def _init_db(self): + """Initialize the database schema.""" + try: + with sqlite3.connect(str(self.db_path)) as conn: + conn.execute(""" + CREATE TABLE IF NOT EXISTS do_runs ( + run_id TEXT PRIMARY KEY, + session_id TEXT, + summary TEXT NOT NULL, + commands_log TEXT NOT NULL, + commands_list TEXT, + mode TEXT NOT NULL, + user_query TEXT, + started_at TEXT, + completed_at TEXT, + files_accessed TEXT, + privileges_granted TEXT, + full_data TEXT, + total_commands INTEGER DEFAULT 0, + successful_commands INTEGER DEFAULT 0, + failed_commands INTEGER DEFAULT 0, + skipped_commands INTEGER DEFAULT 0 + ) + """) + + # Create sessions table + conn.execute(""" + CREATE TABLE IF NOT EXISTS do_sessions ( + session_id TEXT PRIMARY KEY, + started_at TEXT, + ended_at TEXT, + total_runs INTEGER DEFAULT 0, + total_queries TEXT + ) + """) + + conn.execute(""" + CREATE TABLE IF NOT EXISTS do_run_commands ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + run_id TEXT NOT NULL, + command_index INTEGER NOT NULL, + command TEXT NOT NULL, + purpose TEXT, + status TEXT NOT NULL, + output_truncated TEXT, + error_truncated TEXT, + duration_seconds REAL DEFAULT 0, + timestamp TEXT, + useful INTEGER DEFAULT 1, + FOREIGN KEY (run_id) REFERENCES do_runs(run_id) + ) + """) + + conn.execute(""" + CREATE INDEX IF NOT EXISTS idx_do_runs_started + ON do_runs(started_at DESC) + """) + + conn.execute(""" + CREATE INDEX IF NOT EXISTS idx_do_run_commands_run_id + ON do_run_commands(run_id) + """) + + self._migrate_schema(conn) + conn.commit() + except sqlite3.OperationalError as e: + raise OSError(f"Failed to initialize database at {self.db_path}: {e}") + + def _migrate_schema(self, conn: sqlite3.Connection): + """Add new columns to existing tables if they don't exist.""" + cursor = conn.execute("PRAGMA table_info(do_runs)") + existing_columns = {row[1] for row in cursor.fetchall()} + + new_columns = [ + ("total_commands", "INTEGER DEFAULT 0"), + ("successful_commands", "INTEGER DEFAULT 0"), + ("failed_commands", "INTEGER DEFAULT 0"), + ("skipped_commands", "INTEGER DEFAULT 0"), + ("commands_list", "TEXT"), + ("session_id", "TEXT"), + ] + + for col_name, col_type in new_columns: + if col_name not in existing_columns: + try: + conn.execute(f"ALTER TABLE do_runs ADD COLUMN {col_name} {col_type}") + except sqlite3.OperationalError: + pass + + cursor = conn.execute(""" + SELECT run_id, full_data FROM do_runs + WHERE total_commands IS NULL OR total_commands = 0 OR commands_list IS NULL + """) + + for row in cursor.fetchall(): + run_id = row[0] + try: + full_data = json.loads(row[1]) if row[1] else {} + commands = full_data.get("commands", []) + total = len(commands) + success = sum(1 for c in commands if c.get("status") == "success") + failed = sum(1 for c in commands if c.get("status") == "failed") + skipped = sum(1 for c in commands if c.get("status") == "skipped") + + commands_list = json.dumps([c.get("command", "") for c in commands]) + + conn.execute(""" + UPDATE do_runs SET + total_commands = ?, + successful_commands = ?, + failed_commands = ?, + skipped_commands = ?, + commands_list = ? + WHERE run_id = ? + """, (total, success, failed, skipped, commands_list, run_id)) + + for idx, cmd in enumerate(commands): + exists = conn.execute( + "SELECT 1 FROM do_run_commands WHERE run_id = ? AND command_index = ?", + (run_id, idx) + ).fetchone() + + if not exists: + output = cmd.get("output", "")[:250] if cmd.get("output") else "" + error = cmd.get("error", "")[:250] if cmd.get("error") else "" + conn.execute(""" + INSERT INTO do_run_commands + (run_id, command_index, command, purpose, status, + output_truncated, error_truncated, duration_seconds, timestamp, useful) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + run_id, + idx, + cmd.get("command", ""), + cmd.get("purpose", ""), + cmd.get("status", "pending"), + output, + error, + cmd.get("duration_seconds", 0), + cmd.get("timestamp", ""), + 1 if cmd.get("useful", True) else 0, + )) + except (json.JSONDecodeError, KeyError): + pass + + def _generate_run_id(self) -> str: + """Generate a unique run ID.""" + timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f") + random_part = hashlib.sha256(os.urandom(16)).hexdigest()[:8] + return f"do_{timestamp}_{random_part}" + + def _truncate_output(self, text: str, max_length: int = 250) -> str: + """Truncate output to specified length.""" + if not text: + return "" + if len(text) <= max_length: + return text + return text[:max_length] + "... [truncated]" + + def save_run(self, run: DoRun) -> str: + """Save a do run to the database with detailed command information.""" + if not run.run_id: + run.run_id = self._generate_run_id() + + commands_log = run.get_commands_log_string() + + total_commands = len(run.commands) + successful_commands = sum(1 for c in run.commands if c.status == CommandStatus.SUCCESS) + failed_commands = sum(1 for c in run.commands if c.status == CommandStatus.FAILED) + skipped_commands = sum(1 for c in run.commands if c.status == CommandStatus.SKIPPED) + + commands_list = json.dumps([cmd.command for cmd in run.commands]) + + with sqlite3.connect(str(self.db_path)) as conn: + conn.execute(""" + INSERT OR REPLACE INTO do_runs + (run_id, session_id, summary, commands_log, commands_list, mode, user_query, started_at, + completed_at, files_accessed, privileges_granted, full_data, + total_commands, successful_commands, failed_commands, skipped_commands) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + run.run_id, + run.session_id or None, + run.summary, + commands_log, + commands_list, + run.mode.value, + run.user_query, + run.started_at, + run.completed_at, + json.dumps(run.files_accessed), + json.dumps(run.privileges_granted), + json.dumps(run.to_dict()), + total_commands, + successful_commands, + failed_commands, + skipped_commands, + )) + + conn.execute("DELETE FROM do_run_commands WHERE run_id = ?", (run.run_id,)) + + for idx, cmd in enumerate(run.commands): + conn.execute(""" + INSERT INTO do_run_commands + (run_id, command_index, command, purpose, status, + output_truncated, error_truncated, duration_seconds, timestamp, useful) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + """, ( + run.run_id, + idx, + cmd.command, + cmd.purpose, + cmd.status.value, + self._truncate_output(cmd.output, 250), + self._truncate_output(cmd.error, 250), + cmd.duration_seconds, + cmd.timestamp, + 1 if cmd.useful else 0, + )) + + conn.commit() + + return run.run_id + + def get_run(self, run_id: str) -> DoRun | None: + """Get a specific run by ID.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute( + "SELECT * FROM do_runs WHERE run_id = ?", (run_id,) + ) + row = cursor.fetchone() + + if row: + full_data = json.loads(row["full_data"]) + run = DoRun( + run_id=full_data["run_id"], + summary=full_data["summary"], + mode=RunMode(full_data["mode"]), + commands=[CommandLog.from_dict(c) for c in full_data["commands"]], + started_at=full_data.get("started_at", ""), + completed_at=full_data.get("completed_at", ""), + user_query=full_data.get("user_query", ""), + files_accessed=full_data.get("files_accessed", []), + privileges_granted=full_data.get("privileges_granted", []), + session_id=row["session_id"] if "session_id" in row.keys() else "", + ) + return run + return None + + def get_run_commands(self, run_id: str) -> list[dict[str, Any]]: + """Get detailed command information for a run.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute(""" + SELECT command_index, command, purpose, status, + output_truncated, error_truncated, duration_seconds, timestamp, useful + FROM do_run_commands + WHERE run_id = ? + ORDER BY command_index + """, (run_id,)) + + commands = [] + for row in cursor: + commands.append({ + "index": row["command_index"], + "command": row["command"], + "purpose": row["purpose"], + "status": row["status"], + "output": row["output_truncated"], + "error": row["error_truncated"], + "duration": row["duration_seconds"], + "timestamp": row["timestamp"], + "useful": bool(row["useful"]), + }) + return commands + + def get_run_stats(self, run_id: str) -> dict[str, Any] | None: + """Get command statistics for a run.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute(""" + SELECT run_id, summary, total_commands, successful_commands, + failed_commands, skipped_commands, started_at, completed_at + FROM do_runs WHERE run_id = ? + """, (run_id,)) + row = cursor.fetchone() + + if row: + return { + "run_id": row["run_id"], + "summary": row["summary"], + "total_commands": row["total_commands"] or 0, + "successful_commands": row["successful_commands"] or 0, + "failed_commands": row["failed_commands"] or 0, + "skipped_commands": row["skipped_commands"] or 0, + "started_at": row["started_at"], + "completed_at": row["completed_at"], + } + return None + + def get_commands_list(self, run_id: str) -> list[str]: + """Get just the list of commands for a run.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute( + "SELECT commands_list FROM do_runs WHERE run_id = ?", (run_id,) + ) + row = cursor.fetchone() + + if row and row["commands_list"]: + try: + return json.loads(row["commands_list"]) + except (json.JSONDecodeError, TypeError): + pass + + cursor = conn.execute( + "SELECT command FROM do_run_commands WHERE run_id = ? ORDER BY command_index", + (run_id,) + ) + return [row["command"] for row in cursor.fetchall()] + + def get_recent_runs(self, limit: int = 20) -> list[DoRun]: + """Get recent do runs.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute( + "SELECT full_data, session_id FROM do_runs ORDER BY started_at DESC LIMIT ?", + (limit,) + ) + runs = [] + for row in cursor: + full_data = json.loads(row["full_data"]) + run = DoRun( + run_id=full_data["run_id"], + summary=full_data["summary"], + mode=RunMode(full_data["mode"]), + commands=[CommandLog.from_dict(c) for c in full_data["commands"]], + started_at=full_data.get("started_at", ""), + completed_at=full_data.get("completed_at", ""), + user_query=full_data.get("user_query", ""), + files_accessed=full_data.get("files_accessed", []), + privileges_granted=full_data.get("privileges_granted", []), + ) + run.session_id = row["session_id"] + runs.append(run) + return runs + + def create_session(self) -> str: + """Create a new session and return the session ID.""" + session_id = f"session_{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}_{hashlib.md5(str(datetime.datetime.now().timestamp()).encode()).hexdigest()[:8]}" + + with sqlite3.connect(str(self.db_path)) as conn: + conn.execute( + """INSERT INTO do_sessions (session_id, started_at, total_runs, total_queries) + VALUES (?, ?, 0, '[]')""", + (session_id, datetime.datetime.now().isoformat()) + ) + conn.commit() + + return session_id + + def update_session(self, session_id: str, query: str | None = None, increment_runs: bool = False): + """Update a session with new query or run count.""" + with sqlite3.connect(str(self.db_path)) as conn: + if increment_runs: + conn.execute( + "UPDATE do_sessions SET total_runs = total_runs + 1 WHERE session_id = ?", + (session_id,) + ) + + if query: + # Get current queries + cursor = conn.execute( + "SELECT total_queries FROM do_sessions WHERE session_id = ?", + (session_id,) + ) + row = cursor.fetchone() + if row: + queries = json.loads(row[0]) if row[0] else [] + queries.append(query) + conn.execute( + "UPDATE do_sessions SET total_queries = ? WHERE session_id = ?", + (json.dumps(queries), session_id) + ) + + conn.commit() + + def end_session(self, session_id: str): + """Mark a session as ended.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.execute( + "UPDATE do_sessions SET ended_at = ? WHERE session_id = ?", + (datetime.datetime.now().isoformat(), session_id) + ) + conn.commit() + + def get_session_runs(self, session_id: str) -> list[DoRun]: + """Get all runs in a session.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute( + "SELECT full_data FROM do_runs WHERE session_id = ? ORDER BY started_at ASC", + (session_id,) + ) + runs = [] + for row in cursor: + full_data = json.loads(row["full_data"]) + run = DoRun( + run_id=full_data["run_id"], + summary=full_data["summary"], + mode=RunMode(full_data["mode"]), + commands=[CommandLog.from_dict(c) for c in full_data["commands"]], + started_at=full_data.get("started_at", ""), + completed_at=full_data.get("completed_at", ""), + user_query=full_data.get("user_query", ""), + ) + run.session_id = session_id + runs.append(run) + return runs + + def get_recent_sessions(self, limit: int = 10) -> list[dict]: + """Get recent sessions with their run counts.""" + with sqlite3.connect(str(self.db_path)) as conn: + conn.row_factory = sqlite3.Row + cursor = conn.execute( + """SELECT session_id, started_at, ended_at, total_runs, total_queries + FROM do_sessions ORDER BY started_at DESC LIMIT ?""", + (limit,) + ) + sessions = [] + for row in cursor: + sessions.append({ + "session_id": row["session_id"], + "started_at": row["started_at"], + "ended_at": row["ended_at"], + "total_runs": row["total_runs"], + "queries": json.loads(row["total_queries"]) if row["total_queries"] else [], + }) + return sessions + diff --git a/cortex/do_runner/diagnosis.py b/cortex/do_runner/diagnosis.py new file mode 100644 index 000000000..0a824611b --- /dev/null +++ b/cortex/do_runner/diagnosis.py @@ -0,0 +1,2804 @@ +""" +Comprehensive Error Diagnosis and Auto-Fix for Cortex Do Runner. + +Handles all categories of Linux system errors: +1. Command & Shell Errors +2. File & Directory Errors +3. Permission & Ownership Errors +4. Process & Execution Errors +5. Memory & Resource Errors +6. Disk & Filesystem Errors +7. Networking Errors +8. Package Manager Errors +9. User & Authentication Errors +10. Device & Hardware Errors +11. Compilation & Build Errors +12. Archive & Compression Errors +13. Shell Script Errors +14. Environment & PATH Errors +15. Miscellaneous System Errors +""" + +import os +import re +import subprocess +import shutil +from typing import Any, Callable +from dataclasses import dataclass, field + +from rich.console import Console + +console = Console() + + +# ============================================================================ +# Error Pattern Definitions by Category +# ============================================================================ + +@dataclass +class ErrorPattern: + """Defines an error pattern and its fix strategy.""" + pattern: str + error_type: str + category: str + description: str + can_auto_fix: bool = False + fix_strategy: str = "" + severity: str = "error" # error, warning, critical + + +# Category 1: Command & Shell Errors +COMMAND_SHELL_ERRORS = [ + # Timeout errors (check first for our specific message) + ErrorPattern(r"[Cc]ommand timed out after \d+ seconds", "command_timeout", "timeout", + "Command timed out - operation took too long", True, "retry_with_longer_timeout"), + ErrorPattern(r"[Tt]imed out", "timeout", "timeout", + "Operation timed out", True, "retry_with_longer_timeout"), + ErrorPattern(r"[Tt]imeout", "timeout", "timeout", + "Operation timed out", True, "retry_with_longer_timeout"), + # Standard command errors + ErrorPattern(r"command not found", "command_not_found", "command_shell", + "Command not installed", True, "install_package"), + ErrorPattern(r"No such file or directory", "not_found", "command_shell", + "File or directory not found", True, "create_path"), + ErrorPattern(r"Permission denied", "permission_denied", "command_shell", + "Permission denied", True, "use_sudo"), + ErrorPattern(r"Operation not permitted", "operation_not_permitted", "command_shell", + "Operation not permitted (may need root)", True, "use_sudo"), + ErrorPattern(r"Not a directory", "not_a_directory", "command_shell", + "Expected directory but found file", False, "check_path"), + ErrorPattern(r"Is a directory", "is_a_directory", "command_shell", + "Expected file but found directory", False, "check_path"), + ErrorPattern(r"Invalid argument", "invalid_argument", "command_shell", + "Invalid argument passed", False, "check_args"), + ErrorPattern(r"Too many arguments", "too_many_args", "command_shell", + "Too many arguments provided", False, "check_args"), + ErrorPattern(r"[Mm]issing operand", "missing_operand", "command_shell", + "Required argument missing", False, "check_args"), + ErrorPattern(r"[Aa]mbiguous redirect", "ambiguous_redirect", "command_shell", + "Shell redirect is ambiguous", False, "fix_redirect"), + ErrorPattern(r"[Bb]ad substitution", "bad_substitution", "command_shell", + "Shell variable substitution error", False, "fix_syntax"), + ErrorPattern(r"[Uu]nbound variable", "unbound_variable", "command_shell", + "Variable not set", True, "set_variable"), + ErrorPattern(r"[Ss]yntax error near unexpected token", "syntax_error_token", "command_shell", + "Shell syntax error", False, "fix_syntax"), + ErrorPattern(r"[Uu]nexpected EOF", "unexpected_eof", "command_shell", + "Unclosed quote or bracket", False, "fix_syntax"), + ErrorPattern(r"[Cc]annot execute binary file", "cannot_execute_binary", "command_shell", + "Binary incompatible with system", False, "check_architecture"), + ErrorPattern(r"[Ee]xec format error", "exec_format_error", "command_shell", + "Invalid executable format", False, "check_architecture"), + ErrorPattern(r"[Ii]llegal option", "illegal_option", "command_shell", + "Unrecognized command option", False, "check_help"), + ErrorPattern(r"[Ii]nvalid option", "invalid_option", "command_shell", + "Invalid command option", False, "check_help"), + ErrorPattern(r"[Rr]ead-only file ?system", "readonly_fs", "command_shell", + "Filesystem is read-only", True, "remount_rw"), + ErrorPattern(r"[Ii]nput/output error", "io_error", "command_shell", + "I/O error (disk issue)", False, "check_disk", "critical"), + ErrorPattern(r"[Tt]ext file busy", "text_file_busy", "command_shell", + "File is being executed", True, "wait_retry"), + ErrorPattern(r"[Aa]rgument list too long", "arg_list_too_long", "command_shell", + "Too many arguments for command", True, "use_xargs"), + ErrorPattern(r"[Bb]roken pipe", "broken_pipe", "command_shell", + "Pipe closed unexpectedly", False, "check_pipe"), +] + +# Category 2: File & Directory Errors +FILE_DIRECTORY_ERRORS = [ + ErrorPattern(r"[Ff]ile exists", "file_exists", "file_directory", + "File already exists", True, "backup_overwrite"), + ErrorPattern(r"[Ff]ile name too long", "filename_too_long", "file_directory", + "Filename exceeds limit", False, "shorten_name"), + ErrorPattern(r"[Tt]oo many.*symbolic links", "symlink_loop", "file_directory", + "Symbolic link loop detected", True, "fix_symlink"), + ErrorPattern(r"[Ss]tale file handle", "stale_handle", "file_directory", + "NFS file handle stale", True, "remount_nfs"), + ErrorPattern(r"[Dd]irectory not empty", "dir_not_empty", "file_directory", + "Directory has contents", True, "rm_recursive"), + ErrorPattern(r"[Cc]ross-device link", "cross_device_link", "file_directory", + "Cannot link across filesystems", True, "copy_instead"), + ErrorPattern(r"[Tt]oo many open files", "too_many_files", "file_directory", + "File descriptor limit reached", True, "increase_ulimit"), + ErrorPattern(r"[Qq]uota exceeded", "quota_exceeded", "file_directory", + "Disk quota exceeded", False, "check_quota"), + ErrorPattern(r"[Oo]peration timed out", "operation_timeout", "file_directory", + "Operation timed out", True, "increase_timeout"), +] + +# Category 3: Permission & Ownership Errors +PERMISSION_ERRORS = [ + ErrorPattern(r"[Aa]ccess denied", "access_denied", "permission", + "Access denied", True, "use_sudo"), + ErrorPattern(r"[Aa]uthentication fail", "auth_failure", "permission", + "Authentication failed", False, "check_credentials"), + ErrorPattern(r"[Ii]nvalid user", "invalid_user", "permission", + "User does not exist", True, "create_user"), + ErrorPattern(r"[Ii]nvalid group", "invalid_group", "permission", + "Group does not exist", True, "create_group"), + ErrorPattern(r"[Nn]ot owner", "not_owner", "permission", + "Not the owner of file", True, "use_sudo"), +] + +# Category 4: Process & Execution Errors +PROCESS_ERRORS = [ + ErrorPattern(r"[Nn]o such process", "no_such_process", "process", + "Process does not exist", False, "check_pid"), + ErrorPattern(r"[Pp]rocess already running", "already_running", "process", + "Process already running", True, "kill_existing"), + ErrorPattern(r"[Pp]rocess terminated", "process_terminated", "process", + "Process was terminated", False, "check_logs"), + ErrorPattern(r"[Kk]illed", "killed", "process", + "Process was killed (OOM?)", False, "check_memory", "critical"), + ErrorPattern(r"[Ss]egmentation fault", "segfault", "process", + "Memory access violation", False, "debug_crash", "critical"), + ErrorPattern(r"[Bb]us error", "bus_error", "process", + "Bus error (memory alignment)", False, "debug_crash", "critical"), + ErrorPattern(r"[Ff]loating point exception", "fpe", "process", + "Floating point exception", False, "debug_crash"), + ErrorPattern(r"[Ii]llegal instruction", "illegal_instruction", "process", + "CPU instruction error", False, "check_architecture", "critical"), + ErrorPattern(r"[Tt]race.*trap", "trace_trap", "process", + "Debugger trap", False, "check_debugger"), + ErrorPattern(r"[Rr]esource temporarily unavailable", "resource_unavailable", "process", + "Resource busy", True, "wait_retry"), + ErrorPattern(r"[Tt]oo many processes", "too_many_processes", "process", + "Process limit reached", True, "increase_ulimit"), + ErrorPattern(r"[Oo]peration canceled", "operation_canceled", "process", + "Operation was canceled", False, "check_timeout"), +] + +# Category 5: Memory & Resource Errors +MEMORY_ERRORS = [ + ErrorPattern(r"[Oo]ut of memory", "oom", "memory", + "Out of memory", True, "free_memory", "critical"), + ErrorPattern(r"[Cc]annot allocate memory", "cannot_allocate", "memory", + "Memory allocation failed", True, "free_memory", "critical"), + ErrorPattern(r"[Mm]emory exhausted", "memory_exhausted", "memory", + "Memory exhausted", True, "free_memory", "critical"), + ErrorPattern(r"[Ss]tack overflow", "stack_overflow", "memory", + "Stack overflow", False, "increase_stack", "critical"), + ErrorPattern(r"[Dd]evice or resource busy", "device_busy", "memory", + "Device or resource busy", True, "wait_retry"), + ErrorPattern(r"[Nn]o space left on device", "no_space", "memory", + "Disk full", True, "free_disk", "critical"), + ErrorPattern(r"[Dd]isk quota exceeded", "disk_quota", "memory", + "Disk quota exceeded", False, "check_quota"), + ErrorPattern(r"[Ff]ile table overflow", "file_table_overflow", "memory", + "System file table full", True, "increase_ulimit", "critical"), +] + +# Category 6: Disk & Filesystem Errors +FILESYSTEM_ERRORS = [ + ErrorPattern(r"[Ww]rong fs type", "wrong_fs_type", "filesystem", + "Wrong filesystem type", False, "check_fstype"), + ErrorPattern(r"[Ff]ilesystem.*corrupt", "fs_corrupt", "filesystem", + "Filesystem corrupted", False, "fsck", "critical"), + ErrorPattern(r"[Ss]uperblock invalid", "superblock_invalid", "filesystem", + "Superblock invalid", False, "fsck", "critical"), + ErrorPattern(r"[Mm]ount point does not exist", "mount_point_missing", "filesystem", + "Mount point missing", True, "create_mountpoint"), + ErrorPattern(r"[Dd]evice is busy", "device_busy_mount", "filesystem", + "Device busy (in use)", True, "lazy_umount"), + ErrorPattern(r"[Nn]ot mounted", "not_mounted", "filesystem", + "Filesystem not mounted", True, "mount_fs"), + ErrorPattern(r"[Aa]lready mounted", "already_mounted", "filesystem", + "Already mounted", False, "check_mount"), + ErrorPattern(r"[Bb]ad magic number", "bad_magic", "filesystem", + "Bad magic number in superblock", False, "fsck", "critical"), + ErrorPattern(r"[Ss]tructure needs cleaning", "needs_cleaning", "filesystem", + "Filesystem needs fsck", False, "fsck"), + ErrorPattern(r"[Jj]ournal has aborted", "journal_aborted", "filesystem", + "Journal aborted", False, "fsck", "critical"), +] + +# Category 7: Networking Errors +NETWORK_ERRORS = [ + ErrorPattern(r"[Nn]etwork is unreachable", "network_unreachable", "network", + "Network unreachable", True, "check_network"), + ErrorPattern(r"[Nn]o route to host", "no_route", "network", + "No route to host", True, "check_routing"), + ErrorPattern(r"[Cc]onnection refused", "connection_refused", "network", + "Connection refused", True, "check_service"), + ErrorPattern(r"[Cc]onnection timed out", "connection_timeout", "network", + "Connection timed out", True, "check_firewall"), + ErrorPattern(r"[Cc]onnection reset by peer", "connection_reset", "network", + "Connection reset", False, "check_remote"), + ErrorPattern(r"[Hh]ost is down", "host_down", "network", + "Remote host down", False, "check_host"), + ErrorPattern(r"[Tt]emporary failure in name resolution", "dns_temp_fail", "network", + "DNS temporary failure", True, "retry_dns"), + ErrorPattern(r"[Nn]ame or service not known", "dns_unknown", "network", + "DNS lookup failed", True, "check_dns"), + ErrorPattern(r"[Dd]NS lookup failed", "dns_failed", "network", + "DNS lookup failed", True, "check_dns"), + ErrorPattern(r"[Aa]ddress already in use", "address_in_use", "network", + "Port already in use", True, "find_port_user"), + ErrorPattern(r"[Cc]annot assign requested address", "cannot_assign_addr", "network", + "Address not available", False, "check_interface"), + ErrorPattern(r"[Pp]rotocol not supported", "protocol_not_supported", "network", + "Protocol not supported", False, "check_protocol"), + ErrorPattern(r"[Ss]ocket operation on non-socket", "not_socket", "network", + "Invalid socket operation", False, "check_fd"), +] + +# Category 8: Package Manager Errors (Ubuntu/Debian apt) +PACKAGE_ERRORS = [ + ErrorPattern(r"[Uu]nable to locate package", "package_not_found", "package", + "Package not found", True, "update_repos"), + ErrorPattern(r"[Pp]ackage.*not found", "package_not_found", "package", + "Package not found", True, "update_repos"), + ErrorPattern(r"[Ff]ailed to fetch", "fetch_failed", "package", + "Failed to download package", True, "change_mirror"), + ErrorPattern(r"[Hh]ash [Ss]um mismatch", "hash_mismatch", "package", + "Package checksum mismatch", True, "clean_apt"), + ErrorPattern(r"[Rr]epository.*not signed", "repo_not_signed", "package", + "Repository not signed", True, "add_key"), + ErrorPattern(r"[Gg][Pp][Gg] error", "gpg_error", "package", + "GPG signature error", True, "fix_gpg"), + ErrorPattern(r"[Dd]ependency problems", "dependency_problems", "package", + "Dependency issues", True, "fix_dependencies"), + ErrorPattern(r"[Uu]nmet dependencies", "unmet_dependencies", "package", + "Unmet dependencies", True, "fix_dependencies"), + ErrorPattern(r"[Bb]roken packages", "broken_packages", "package", + "Broken packages", True, "fix_broken"), + ErrorPattern(r"[Vv]ery bad inconsistent state", "inconsistent_state", "package", + "Package in bad state", True, "force_reinstall"), + ErrorPattern(r"[Cc]onflicts with", "package_conflict", "package", + "Package conflict", True, "resolve_conflict"), + ErrorPattern(r"dpkg.*lock", "dpkg_lock", "package", + "Package manager locked", True, "clear_lock"), + ErrorPattern(r"apt.*lock", "apt_lock", "package", + "APT locked", True, "clear_lock"), + ErrorPattern(r"E: Could not get lock", "could_not_get_lock", "package", + "Package manager locked", True, "clear_lock"), +] + +# Category 9: User & Authentication Errors +USER_AUTH_ERRORS = [ + ErrorPattern(r"[Uu]ser does not exist", "user_not_exist", "user_auth", + "User does not exist", True, "create_user"), + ErrorPattern(r"[Gg]roup does not exist", "group_not_exist", "user_auth", + "Group does not exist", True, "create_group"), + ErrorPattern(r"[Aa]ccount expired", "account_expired", "user_auth", + "Account expired", False, "renew_account"), + ErrorPattern(r"[Pp]assword expired", "password_expired", "user_auth", + "Password expired", False, "change_password"), + ErrorPattern(r"[Ii]ncorrect password", "wrong_password", "user_auth", + "Wrong password", False, "check_password"), + ErrorPattern(r"[Aa]ccount locked", "account_locked", "user_auth", + "Account locked", False, "unlock_account"), +] + +# Category 16: Docker/Container Errors +DOCKER_ERRORS = [ + # Container name conflicts + ErrorPattern(r"[Cc]onflict.*container name.*already in use", "container_name_conflict", "docker", + "Container name already in use", True, "remove_or_rename_container"), + ErrorPattern(r"is already in use by container", "container_name_conflict", "docker", + "Container name already in use", True, "remove_or_rename_container"), + # Container not found + ErrorPattern(r"[Nn]o such container", "container_not_found", "docker", + "Container does not exist", True, "check_container_name"), + ErrorPattern(r"[Ee]rror: No such container", "container_not_found", "docker", + "Container does not exist", True, "check_container_name"), + # Image not found + ErrorPattern(r"[Uu]nable to find image", "image_not_found", "docker", + "Docker image not found locally", True, "pull_image"), + ErrorPattern(r"[Rr]epository.*not found", "image_not_found", "docker", + "Docker image repository not found", True, "check_image_name"), + ErrorPattern(r"manifest.*not found", "manifest_not_found", "docker", + "Image manifest not found", True, "check_image_tag"), + # Container already running/stopped + ErrorPattern(r"is already running", "container_already_running", "docker", + "Container is already running", True, "stop_or_use_existing"), + ErrorPattern(r"is not running", "container_not_running", "docker", + "Container is not running", True, "start_container"), + # Port conflicts + ErrorPattern(r"[Pp]ort.*already allocated", "port_in_use", "docker", + "Port is already in use", True, "free_port_or_use_different"), + ErrorPattern(r"[Bb]ind.*address already in use", "port_in_use", "docker", + "Port is already in use", True, "free_port_or_use_different"), + # Volume errors + ErrorPattern(r"[Vv]olume.*not found", "volume_not_found", "docker", + "Docker volume not found", True, "create_volume"), + ErrorPattern(r"[Mm]ount.*denied", "mount_denied", "docker", + "Mount point access denied", True, "check_mount_permissions"), + # Network errors + ErrorPattern(r"[Nn]etwork.*not found", "network_not_found", "docker", + "Docker network not found", True, "create_network"), + # Daemon errors + ErrorPattern(r"[Cc]annot connect to the Docker daemon", "docker_daemon_not_running", "docker", + "Docker daemon is not running", True, "start_docker_daemon"), + ErrorPattern(r"[Ii]s the docker daemon running", "docker_daemon_not_running", "docker", + "Docker daemon is not running", True, "start_docker_daemon"), + # OOM errors + ErrorPattern(r"[Oo]ut of memory", "container_oom", "docker", + "Container ran out of memory", True, "increase_memory_limit"), + # Exec errors + ErrorPattern(r"[Oo]CI runtime.*not found", "runtime_not_found", "docker", + "Container runtime not found", False, "check_docker_installation"), +] + +# Category 17: Login/Credential Required Errors +LOGIN_REQUIRED_ERRORS = [ + # Docker/Container registry login errors + ErrorPattern(r"[Uu]sername.*[Rr]equired", "docker_username_required", "login_required", + "Docker username required", True, "prompt_docker_login"), + ErrorPattern(r"[Nn]on-null [Uu]sername", "docker_username_required", "login_required", + "Docker username required", True, "prompt_docker_login"), + ErrorPattern(r"unauthorized.*authentication required", "docker_auth_required", "login_required", + "Docker authentication required", True, "prompt_docker_login"), + ErrorPattern(r"denied.*requested access", "docker_access_denied", "login_required", + "Docker registry access denied", True, "prompt_docker_login"), + ErrorPattern(r"denied:.*access", "docker_access_denied", "login_required", + "Docker registry access denied", True, "prompt_docker_login"), + ErrorPattern(r"access.*denied", "docker_access_denied", "login_required", + "Docker registry access denied", True, "prompt_docker_login"), + ErrorPattern(r"no basic auth credentials", "docker_no_credentials", "login_required", + "Docker credentials not found", True, "prompt_docker_login"), + ErrorPattern(r"docker login", "docker_login_needed", "login_required", + "Docker login required", True, "prompt_docker_login"), + # ghcr.io (GitHub Container Registry) specific errors + ErrorPattern(r"ghcr\.io.*denied", "ghcr_access_denied", "login_required", + "GitHub Container Registry access denied - login required", True, "prompt_docker_login"), + ErrorPattern(r"Head.*ghcr\.io.*denied", "ghcr_access_denied", "login_required", + "GitHub Container Registry access denied - login required", True, "prompt_docker_login"), + # Generic registry denied patterns + ErrorPattern(r"Error response from daemon.*denied", "registry_access_denied", "login_required", + "Container registry access denied - login may be required", True, "prompt_docker_login"), + ErrorPattern(r"pull access denied", "pull_access_denied", "login_required", + "Pull access denied - login required or image doesn't exist", True, "prompt_docker_login"), + ErrorPattern(r"requested resource.*denied", "resource_access_denied", "login_required", + "Resource access denied - authentication required", True, "prompt_docker_login"), + + # Git credential errors + ErrorPattern(r"[Cc]ould not read.*[Uu]sername", "git_username_required", "login_required", + "Git username required", True, "prompt_git_login"), + ErrorPattern(r"[Ff]atal:.*[Aa]uthentication failed", "git_auth_failed", "login_required", + "Git authentication failed", True, "prompt_git_login"), + ErrorPattern(r"[Pp]assword.*authentication.*removed", "git_token_required", "login_required", + "Git token required (password auth disabled)", True, "prompt_git_token"), + ErrorPattern(r"[Pp]ermission denied.*publickey", "git_ssh_required", "login_required", + "Git SSH key required", True, "setup_git_ssh"), + + # npm login errors + ErrorPattern(r"npm ERR!.*E401", "npm_auth_required", "login_required", + "npm authentication required", True, "prompt_npm_login"), + ErrorPattern(r"npm ERR!.*ENEEDAUTH", "npm_need_auth", "login_required", + "npm authentication needed", True, "prompt_npm_login"), + ErrorPattern(r"You must be logged in", "npm_login_required", "login_required", + "npm login required", True, "prompt_npm_login"), + + # AWS credential errors + ErrorPattern(r"[Uu]nable to locate credentials", "aws_no_credentials", "login_required", + "AWS credentials not configured", True, "prompt_aws_configure"), + ErrorPattern(r"[Ii]nvalid[Aa]ccess[Kk]ey", "aws_invalid_key", "login_required", + "AWS access key invalid", True, "prompt_aws_configure"), + ErrorPattern(r"[Ss]ignature.*[Dd]oes[Nn]ot[Mm]atch", "aws_secret_invalid", "login_required", + "AWS secret key invalid", True, "prompt_aws_configure"), + ErrorPattern(r"[Ee]xpired[Tt]oken", "aws_token_expired", "login_required", + "AWS token expired", True, "prompt_aws_configure"), + + # PyPI/pip login errors + ErrorPattern(r"HTTPError: 403.*upload", "pypi_auth_required", "login_required", + "PyPI authentication required", True, "prompt_pypi_login"), + + # Generic credential prompts + ErrorPattern(r"[Ee]nter.*[Uu]sername", "username_prompt", "login_required", + "Username required", True, "prompt_credentials"), + ErrorPattern(r"[Ee]nter.*[Pp]assword", "password_prompt", "login_required", + "Password required", True, "prompt_credentials"), + ErrorPattern(r"[Aa]ccess [Tt]oken.*[Rr]equired", "token_required", "login_required", + "Access token required", True, "prompt_token"), + ErrorPattern(r"[Aa][Pp][Ii].*[Kk]ey.*[Rr]equired", "api_key_required", "login_required", + "API key required", True, "prompt_api_key"), +] + +# Category 10: Device & Hardware Errors +DEVICE_ERRORS = [ + ErrorPattern(r"[Nn]o such device", "no_device", "device", + "Device not found", False, "check_device"), + ErrorPattern(r"[Dd]evice not configured", "device_not_configured", "device", + "Device not configured", False, "configure_device"), + ErrorPattern(r"[Hh]ardware error", "hardware_error", "device", + "Hardware error", False, "check_hardware", "critical"), + ErrorPattern(r"[Dd]evice offline", "device_offline", "device", + "Device offline", False, "bring_online"), + ErrorPattern(r"[Mm]edia not present", "no_media", "device", + "No media in device", False, "insert_media"), + ErrorPattern(r"[Rr]ead error", "read_error", "device", + "Device read error", False, "check_disk", "critical"), + ErrorPattern(r"[Ww]rite error", "write_error", "device", + "Device write error", False, "check_disk", "critical"), +] + +# Category 11: Compilation & Build Errors +BUILD_ERRORS = [ + ErrorPattern(r"[Nn]o rule to make target", "no_make_rule", "build", + "Make target not found", False, "check_makefile"), + ErrorPattern(r"[Mm]issing separator", "missing_separator", "build", + "Makefile syntax error", False, "fix_makefile"), + ErrorPattern(r"[Uu]ndefined reference", "undefined_reference", "build", + "Undefined symbol", True, "add_library"), + ErrorPattern(r"[Ss]ymbol lookup error", "symbol_lookup", "build", + "Symbol not found", True, "fix_ldpath"), + ErrorPattern(r"[Ll]ibrary not found", "library_not_found", "build", + "Library not found", True, "install_lib"), + ErrorPattern(r"[Hh]eader.*not found", "header_not_found", "build", + "Header file not found", True, "install_dev"), + ErrorPattern(r"[Rr]elocation error", "relocation_error", "build", + "Relocation error", True, "fix_ldpath"), + ErrorPattern(r"[Cc]ompilation terminated", "compilation_failed", "build", + "Compilation failed", False, "check_errors"), +] + +# Category 12: Archive & Compression Errors +ARCHIVE_ERRORS = [ + ErrorPattern(r"[Uu]nexpected end of file", "unexpected_eof_archive", "archive", + "Archive truncated", False, "redownload"), + ErrorPattern(r"[Cc]orrupt archive", "corrupt_archive", "archive", + "Archive corrupted", False, "redownload"), + ErrorPattern(r"[Ii]nvalid tar magic", "invalid_tar", "archive", + "Invalid tar archive", False, "check_format"), + ErrorPattern(r"[Cc]hecksum error", "checksum_error", "archive", + "Checksum mismatch", False, "redownload"), + ErrorPattern(r"[Nn]ot in gzip format", "not_gzip", "archive", + "Not gzip format", False, "check_format"), + ErrorPattern(r"[Dd]ecompression failed", "decompress_failed", "archive", + "Decompression failed", False, "check_format"), +] + +# Category 13: Shell Script Errors +SCRIPT_ERRORS = [ + ErrorPattern(r"[Bb]ad interpreter", "bad_interpreter", "script", + "Interpreter not found", True, "fix_shebang"), + ErrorPattern(r"[Ll]ine \d+:.*command not found", "script_cmd_not_found", "script", + "Command in script not found", True, "install_dependency"), + ErrorPattern(r"[Ii]nteger expression expected", "integer_expected", "script", + "Expected integer", False, "fix_syntax"), + ErrorPattern(r"[Cc]onditional binary operator expected", "conditional_expected", "script", + "Expected conditional", False, "fix_syntax"), +] + +# Category 14: Environment & PATH Errors +ENVIRONMENT_ERRORS = [ + ErrorPattern(r"[Vv]ariable not set", "var_not_set", "environment", + "Environment variable not set", True, "set_variable"), + ErrorPattern(r"[Pp][Aa][Tt][Hh] not set", "path_not_set", "environment", + "PATH not configured", True, "set_path"), + ErrorPattern(r"[Ee]nvironment corrupt", "env_corrupt", "environment", + "Environment corrupted", True, "reset_env"), + ErrorPattern(r"[Ll]ibrary path not found", "lib_path_missing", "environment", + "Library path missing", True, "set_ldpath"), + ErrorPattern(r"LD_LIBRARY_PATH", "ld_path_issue", "environment", + "Library path issue", True, "set_ldpath"), +] + +# Category 15: Service & System Errors +# Category 16: Config File Errors (Nginx, Apache, etc.) +CONFIG_ERRORS = [ + # Nginx errors + ErrorPattern(r"nginx:.*\[emerg\]", "nginx_config_error", "config", + "Nginx configuration error", True, "fix_nginx_config"), + ErrorPattern(r"nginx.*syntax.*error", "nginx_syntax_error", "config", + "Nginx syntax error", True, "fix_nginx_config"), + ErrorPattern(r"nginx.*unexpected", "nginx_unexpected", "config", + "Nginx unexpected token", True, "fix_nginx_config"), + ErrorPattern(r"nginx.*unknown directive", "nginx_unknown_directive", "config", + "Nginx unknown directive", True, "fix_nginx_config"), + ErrorPattern(r"nginx.*test failed", "nginx_test_failed", "config", + "Nginx config test failed", True, "fix_nginx_config"), + ErrorPattern(r"nginx.*could not open", "nginx_file_error", "config", + "Nginx could not open file", True, "fix_nginx_permissions"), + # Apache errors + ErrorPattern(r"apache.*syntax error", "apache_syntax_error", "config", + "Apache syntax error", True, "fix_apache_config"), + ErrorPattern(r"apache2?ctl.*configtest", "apache_config_error", "config", + "Apache config test failed", True, "fix_apache_config"), + ErrorPattern(r"[Ss]yntax error on line \d+", "config_line_error", "config", + "Config syntax error at line", True, "fix_config_line"), + # MySQL/MariaDB errors + ErrorPattern(r"mysql.*error.*config", "mysql_config_error", "config", + "MySQL configuration error", True, "fix_mysql_config"), + # PostgreSQL errors + ErrorPattern(r"postgres.*error.*config", "postgres_config_error", "config", + "PostgreSQL configuration error", True, "fix_postgres_config"), + # Generic config errors + ErrorPattern(r"configuration.*syntax", "generic_config_syntax", "config", + "Configuration syntax error", True, "fix_config_syntax"), + ErrorPattern(r"invalid.*configuration", "invalid_config", "config", + "Invalid configuration", True, "fix_config_syntax"), + ErrorPattern(r"[Cc]onfig.*parse error", "config_parse_error", "config", + "Config parse error", True, "fix_config_syntax"), +] + +SERVICE_ERRORS = [ + ErrorPattern(r"[Ss]ervice failed to start", "service_failed", "service", + "Service failed to start", True, "check_service_logs"), + ErrorPattern(r"[Uu]nit.*failed", "unit_failed", "service", + "Systemd unit failed", True, "check_service_logs"), + ErrorPattern(r"[Jj]ob for.*\.service failed", "job_failed", "service", + "Service job failed", True, "check_service_logs"), + ErrorPattern(r"[Ff]ailed to start.*\.service", "start_failed", "service", + "Failed to start service", True, "check_service_logs"), + ErrorPattern(r"[Dd]ependency failed", "dependency_failed", "service", + "Service dependency failed", True, "start_dependency"), + ErrorPattern(r"[Ii]nactive.*dead", "service_inactive", "service", + "Service not running", True, "start_service"), + ErrorPattern(r"[Mm]asked", "service_masked", "service", + "Service is masked", True, "unmask_service"), + ErrorPattern(r"[Ee]nabled-runtime", "service_enabled_runtime", "service", + "Service enabled at runtime", False, "check_service"), + ErrorPattern(r"[Cc]ontrol process exited with error", "control_process_error", "service", + "Service control process failed", True, "check_service_logs"), + ErrorPattern(r"[Aa]ctivation.*timed out", "activation_timeout", "service", + "Service activation timed out", True, "check_service_logs"), +] + +# Combine all error patterns +ALL_ERROR_PATTERNS = ( + DOCKER_ERRORS + # Check Docker errors first (common) + LOGIN_REQUIRED_ERRORS + # Check login errors (interactive) + CONFIG_ERRORS + # Check config errors (more specific) + COMMAND_SHELL_ERRORS + + FILE_DIRECTORY_ERRORS + + PERMISSION_ERRORS + + PROCESS_ERRORS + + MEMORY_ERRORS + + FILESYSTEM_ERRORS + + NETWORK_ERRORS + + PACKAGE_ERRORS + + USER_AUTH_ERRORS + + DEVICE_ERRORS + + BUILD_ERRORS + + ARCHIVE_ERRORS + + SCRIPT_ERRORS + + ENVIRONMENT_ERRORS + + SERVICE_ERRORS +) + + +# ============================================================================ +# Login/Credential Requirements Configuration +# ============================================================================ + +@dataclass +class LoginRequirement: + """Defines credentials required for a service login.""" + service: str + display_name: str + command_pattern: str # Regex to match commands that need this login + required_fields: list # List of field names needed + field_prompts: dict # Field name -> prompt text + field_secret: dict # Field name -> whether to hide input + login_command_template: str # Template for login command + env_vars: dict = field(default_factory=dict) # Optional env var alternatives + signup_url: str = "" + docs_url: str = "" + + +# Login requirements for various services +LOGIN_REQUIREMENTS = { + "docker": LoginRequirement( + service="docker", + display_name="Docker Registry", + command_pattern=r"docker\s+(login|push|pull)", + required_fields=["registry", "username", "password"], + field_prompts={ + "registry": "Registry URL (press Enter for Docker Hub)", + "username": "Username", + "password": "Password or Access Token", + }, + field_secret={"registry": False, "username": False, "password": True}, + login_command_template="docker login {registry} -u {username} -p {password}", + env_vars={"username": "DOCKER_USERNAME", "password": "DOCKER_PASSWORD"}, + signup_url="https://hub.docker.com/signup", + docs_url="https://docs.docker.com/docker-hub/access-tokens/", + ), + "ghcr": LoginRequirement( + service="ghcr", + display_name="GitHub Container Registry", + command_pattern=r"docker.*ghcr\.io", + required_fields=["username", "token"], + field_prompts={ + "username": "GitHub Username", + "token": "GitHub Personal Access Token (with packages scope)", + }, + field_secret={"username": False, "token": True}, + login_command_template="echo {token} | docker login ghcr.io -u {username} --password-stdin", + env_vars={"token": "GITHUB_TOKEN", "username": "GITHUB_USER"}, + signup_url="https://github.com/join", + docs_url="https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry", + ), + "git_https": LoginRequirement( + service="git_https", + display_name="Git (HTTPS)", + command_pattern=r"git\s+(clone|push|pull|fetch).*https://", + required_fields=["username", "token"], + field_prompts={ + "username": "Git Username", + "token": "Personal Access Token", + }, + field_secret={"username": False, "token": True}, + login_command_template="git config --global credential.helper store && echo 'https://{username}:{token}@github.com' >> ~/.git-credentials", + env_vars={"token": "GIT_TOKEN", "username": "GIT_USER"}, + docs_url="https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/creating-a-personal-access-token", + ), + "npm": LoginRequirement( + service="npm", + display_name="npm Registry", + command_pattern=r"npm\s+(login|publish|adduser)", + required_fields=["username", "password", "email"], + field_prompts={ + "username": "npm Username", + "password": "npm Password", + "email": "Email Address", + }, + field_secret={"username": False, "password": True, "email": False}, + login_command_template="npm login", # npm login is interactive + signup_url="https://www.npmjs.com/signup", + docs_url="https://docs.npmjs.com/creating-and-viewing-access-tokens", + ), + "aws": LoginRequirement( + service="aws", + display_name="AWS", + command_pattern=r"aws\s+", + required_fields=["access_key_id", "secret_access_key", "region"], + field_prompts={ + "access_key_id": "AWS Access Key ID", + "secret_access_key": "AWS Secret Access Key", + "region": "Default Region (e.g., us-east-1)", + }, + field_secret={"access_key_id": False, "secret_access_key": True, "region": False}, + login_command_template="aws configure set aws_access_key_id {access_key_id} && aws configure set aws_secret_access_key {secret_access_key} && aws configure set region {region}", + env_vars={ + "access_key_id": "AWS_ACCESS_KEY_ID", + "secret_access_key": "AWS_SECRET_ACCESS_KEY", + "region": "AWS_DEFAULT_REGION", + }, + docs_url="https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html", + ), + "pypi": LoginRequirement( + service="pypi", + display_name="PyPI", + command_pattern=r"(twine|pip).*upload", + required_fields=["username", "token"], + field_prompts={ + "username": "PyPI Username (use __token__ for API token)", + "token": "PyPI Password or API Token", + }, + field_secret={"username": False, "token": True}, + login_command_template="", # Uses ~/.pypirc + signup_url="https://pypi.org/account/register/", + docs_url="https://pypi.org/help/#apitoken", + ), + "gcloud": LoginRequirement( + service="gcloud", + display_name="Google Cloud", + command_pattern=r"gcloud\s+", + required_fields=[], # Interactive browser auth + field_prompts={}, + field_secret={}, + login_command_template="gcloud auth login", + docs_url="https://cloud.google.com/sdk/docs/authorizing", + ), + "kubectl": LoginRequirement( + service="kubectl", + display_name="Kubernetes", + command_pattern=r"kubectl\s+", + required_fields=["kubeconfig"], + field_prompts={ + "kubeconfig": "Path to kubeconfig file (or press Enter for ~/.kube/config)", + }, + field_secret={"kubeconfig": False}, + login_command_template="export KUBECONFIG={kubeconfig}", + env_vars={"kubeconfig": "KUBECONFIG"}, + docs_url="https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/", + ), + "heroku": LoginRequirement( + service="heroku", + display_name="Heroku", + command_pattern=r"heroku\s+", + required_fields=["api_key"], + field_prompts={ + "api_key": "Heroku API Key", + }, + field_secret={"api_key": True}, + login_command_template="heroku auth:token", # Interactive + env_vars={"api_key": "HEROKU_API_KEY"}, + signup_url="https://signup.heroku.com/", + docs_url="https://devcenter.heroku.com/articles/authentication", + ), +} + + +# ============================================================================ +# Ubuntu Package Mappings +# ============================================================================ + +UBUNTU_PACKAGE_MAP = { + # Commands to packages + "nginx": "nginx", "apache2": "apache2", "httpd": "apache2", + "mysql": "mysql-server", "mysqld": "mysql-server", + "postgres": "postgresql", "psql": "postgresql-client", + "redis": "redis-server", "redis-server": "redis-server", + "mongo": "mongodb", "mongod": "mongodb", + "node": "nodejs", "npm": "npm", "yarn": "yarnpkg", + "python": "python3", "python3": "python3", "pip": "python3-pip", "pip3": "python3-pip", + "docker": "docker.io", "docker-compose": "docker-compose", + "git": "git", "curl": "curl", "wget": "wget", + "vim": "vim", "nano": "nano", "emacs": "emacs", + "gcc": "gcc", "g++": "g++", "make": "make", "cmake": "cmake", + "java": "default-jdk", "javac": "default-jdk", + "ruby": "ruby", "gem": "ruby", + "go": "golang-go", "cargo": "cargo", "rustc": "rustc", + "php": "php", "composer": "composer", + "ffmpeg": "ffmpeg", "imagemagick": "imagemagick", "convert": "imagemagick", + "htop": "htop", "tree": "tree", "jq": "jq", + "nc": "netcat-openbsd", "netcat": "netcat-openbsd", + "ss": "iproute2", "ip": "iproute2", + "dig": "dnsutils", "nslookup": "dnsutils", + "zip": "zip", "unzip": "unzip", + "tar": "tar", "gzip": "gzip", + "rsync": "rsync", "ssh": "openssh-client", "sshd": "openssh-server", + "screen": "screen", "tmux": "tmux", + "awk": "gawk", "sed": "sed", "grep": "grep", + "setfacl": "acl", "getfacl": "acl", + "lsof": "lsof", "strace": "strace", + # System monitoring tools + "sensors": "lm-sensors", "sensors-detect": "lm-sensors", + "htop": "htop", "iotop": "iotop", "iftop": "iftop", + "nmap": "nmap", "netstat": "net-tools", "ifconfig": "net-tools", + "smartctl": "smartmontools", "hdparm": "hdparm", + # Optional tools (may not be in all repos) + "snap": "snapd", "flatpak": "flatpak", +} + +UBUNTU_SERVICE_MAP = { + "nginx": "nginx", + "apache": "apache2", + "mysql": "mysql", + "postgresql": "postgresql", + "redis": "redis-server", + "mongodb": "mongod", + "docker": "docker", + "ssh": "ssh", + "cron": "cron", + "ufw": "ufw", +} + + +# ============================================================================ +# Error Diagnoser Class +# ============================================================================ + +class ErrorDiagnoser: + """Comprehensive error diagnosis for all system error types.""" + + def __init__(self): + self._compile_patterns() + + def _compile_patterns(self): + """Pre-compile regex patterns for performance.""" + self._compiled_patterns = [] + for ep in ALL_ERROR_PATTERNS: + try: + compiled = re.compile(ep.pattern, re.IGNORECASE | re.MULTILINE) + self._compiled_patterns.append((compiled, ep)) + except re.error: + console.print(f"[yellow]Warning: Invalid pattern: {ep.pattern}[/yellow]") + + def extract_path_from_error(self, stderr: str, cmd: str) -> str | None: + """Extract the problematic file path from an error message.""" + patterns = [ + r"cannot (?:access|open|create|stat|read|write) ['\"]?([/\w\.\-_]+)['\"]?", + r"['\"]([/\w\.\-_]+)['\"]?: (?:Permission denied|No such file)", + r"open\(\) ['\"]([/\w\.\-_]+)['\"]? failed", + r"failed to open ['\"]?([/\w\.\-_]+)['\"]?", + r"couldn't open (?:temporary )?file ([/\w\.\-_]+)", + r"([/\w\.\-_]+): Permission denied", + r"([/\w\.\-_]+): No such file or directory", + r"mkdir: cannot create directory ['\"]?([/\w\.\-_]+)['\"]?", + r"touch: cannot touch ['\"]?([/\w\.\-_]+)['\"]?", + r"cp: cannot (?:create|stat|access) ['\"]?([/\w\.\-_]+)['\"]?", + ] + + for pattern in patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + path = match.group(1) + if path.startswith("/"): + return path + + # Extract from command itself + for part in cmd.split(): + if part.startswith("/") and any(c in part for c in ["/etc/", "/var/", "/usr/", "/home/", "/opt/", "/tmp/"]): + return part + + return None + + def extract_service_from_error(self, stderr: str, cmd: str) -> str | None: + """Extract service name from error message or command.""" + cmd_parts = cmd.split() + + # From systemctl/service commands + for i, part in enumerate(cmd_parts): + if part in ["systemctl", "service"]: + for j in range(i + 1, len(cmd_parts)): + candidate = cmd_parts[j] + if candidate not in ["start", "stop", "restart", "reload", "status", + "enable", "disable", "is-active", "is-enabled", + "-q", "--quiet", "--no-pager"]: + return candidate.replace(".service", "") + + # From error message + patterns = [ + r"(?:Unit|Service) ([a-zA-Z0-9\-_]+)(?:\.service)? (?:not found|failed|could not)", + r"Failed to (?:start|stop|restart|enable|disable) ([a-zA-Z0-9\-_]+)", + r"([a-zA-Z0-9\-_]+)\.service", + ] + + for pattern in patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + return match.group(1).replace(".service", "") + + return None + + def extract_package_from_error(self, stderr: str, cmd: str) -> str | None: + """Extract package name from error.""" + patterns = [ + r"[Uu]nable to locate package ([a-zA-Z0-9\-_\.]+)", + r"[Pp]ackage '?([a-zA-Z0-9\-_\.]+)'? (?:is )?not (?:found|installed)", + r"[Nn]o package '?([a-zA-Z0-9\-_\.]+)'? (?:found|available)", + r"apt.*install.*?([a-zA-Z0-9\-_\.]+)", + ] + + for pattern in patterns: + match = re.search(pattern, stderr + " " + cmd, re.IGNORECASE) + if match: + return match.group(1) + + return None + + def extract_port_from_error(self, stderr: str) -> int | None: + """Extract port number from error.""" + patterns = [ + r"[Pp]ort (\d+)", + r"[Aa]ddress.*:(\d+)", + r":(\d{2,5})\s", + ] + + for pattern in patterns: + match = re.search(pattern, stderr) + if match: + port = int(match.group(1)) + if 1 <= port <= 65535: + return port + + return None + + def _extract_container_name(self, stderr: str) -> str | None: + """Extract Docker container name from error message.""" + patterns = [ + r'container name ["\'/]([a-zA-Z0-9_\-]+)["\'/]', + r'["\'/]([a-zA-Z0-9_\-]+)["\'/] is already in use', + r'container ["\']?([a-zA-Z0-9_\-]+)["\']?', + r'No such container:?\s*([a-zA-Z0-9_\-]+)', + ] + + for pattern in patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + return match.group(1) + + return None + + def _extract_image_name(self, stderr: str, cmd: str) -> str | None: + """Extract Docker image name from error or command.""" + # From command + if "docker" in cmd: + parts = cmd.split() + for i, part in enumerate(parts): + if part in ["run", "pull", "push"]: + # Look for image name after flags + for j in range(i + 1, len(parts)): + candidate = parts[j] + if not candidate.startswith("-") and "/" in candidate or ":" in candidate: + return candidate + elif not candidate.startswith("-") and j == len(parts) - 1: + return candidate + + # From error + patterns = [ + r'[Uu]nable to find image ["\']([^"\']+)["\']', + r'repository ["\']?([^"\':\s]+(?::[^"\':\s]+)?)["\']? not found', + r'manifest for ([^\s]+) not found', + ] + + for pattern in patterns: + match = re.search(pattern, stderr) + if match: + return match.group(1) + + return None + + def _extract_port(self, stderr: str) -> str | None: + """Extract port from Docker error.""" + patterns = [ + r'[Pp]ort (\d+)', + r':(\d+)->', + r'address.*:(\d+)', + r'-p\s*(\d+):', + ] + + for pattern in patterns: + match = re.search(pattern, stderr) + if match: + return match.group(1) + + return None + + def extract_config_file_and_line(self, stderr: str) -> tuple[str | None, int | None]: + """Extract config file path and line number from error.""" + patterns = [ + r'in\s+(/[^\s:]+):(\d+)', # "in /path:line" + r'at\s+(/[^\s:]+):(\d+)', # "at /path:line" + r'(/[^\s:]+):(\d+):', # "/path:line:" + r'line\s+(\d+)\s+of\s+(/[^\s:]+)', # "line X of /path" + r'(/[^\s:]+)\s+line\s+(\d+)', # "/path line X" + ] + + for pattern in patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + groups = match.groups() + if groups[0].startswith("/"): + return groups[0], int(groups[1]) + elif len(groups) > 1 and groups[1].startswith("/"): + return groups[1], int(groups[0]) + + return None, None + + def extract_command_from_error(self, stderr: str) -> str | None: + """Extract the failing command name from error.""" + patterns = [ + r"'([a-zA-Z0-9\-_]+)'.*command not found", + r"([a-zA-Z0-9\-_]+): command not found", + r"bash: ([a-zA-Z0-9\-_]+):", + r"/usr/bin/env: '?([a-zA-Z0-9\-_]+)'?:", + ] + + for pattern in patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + return match.group(1) + + return None + + def diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: + """ + Comprehensive error diagnosis using pattern matching. + + Returns a detailed diagnosis dict with: + - error_type: Specific error type + - category: Error category (command_shell, network, etc.) + - description: Human-readable description + - fix_commands: Suggested fix commands + - can_auto_fix: Whether we can auto-fix + - fix_strategy: Strategy name for auto-fixer + - extracted_info: Extracted paths, services, etc. + - severity: error, warning, or critical + """ + diagnosis = { + "error_type": "unknown", + "category": "unknown", + "description": stderr[:300] if len(stderr) > 300 else stderr, + "fix_commands": [], + "can_auto_fix": False, + "fix_strategy": "", + "extracted_path": None, + "extracted_info": {}, + "severity": "error", + } + + stderr_lower = stderr.lower() + + # Extract common info + diagnosis["extracted_path"] = self.extract_path_from_error(stderr, cmd) + diagnosis["extracted_info"]["service"] = self.extract_service_from_error(stderr, cmd) + diagnosis["extracted_info"]["package"] = self.extract_package_from_error(stderr, cmd) + diagnosis["extracted_info"]["port"] = self.extract_port_from_error(stderr) + + config_file, line_num = self.extract_config_file_and_line(stderr) + if config_file: + diagnosis["extracted_info"]["config_file"] = config_file + diagnosis["extracted_info"]["line_num"] = line_num + + # Match against compiled patterns + for compiled, ep in self._compiled_patterns: + if compiled.search(stderr): + diagnosis["error_type"] = ep.error_type + diagnosis["category"] = ep.category + diagnosis["description"] = ep.description + diagnosis["can_auto_fix"] = ep.can_auto_fix + diagnosis["fix_strategy"] = ep.fix_strategy + diagnosis["severity"] = ep.severity + + # Generate fix commands based on category and strategy + self._generate_fix_commands(diagnosis, cmd, stderr) + + return diagnosis + + # Fallback: try generic patterns + if "permission denied" in stderr_lower: + diagnosis["error_type"] = "permission_denied" + diagnosis["category"] = "permission" + diagnosis["description"] = "Permission denied" + diagnosis["can_auto_fix"] = True + diagnosis["fix_strategy"] = "use_sudo" + if not cmd.strip().startswith("sudo"): + diagnosis["fix_commands"] = [f"sudo {cmd}"] + + elif "not found" in stderr_lower or "no such" in stderr_lower: + diagnosis["error_type"] = "not_found" + diagnosis["category"] = "file_directory" + diagnosis["description"] = "File or directory not found" + if diagnosis["extracted_path"]: + diagnosis["can_auto_fix"] = True + diagnosis["fix_strategy"] = "create_path" + + return diagnosis + + def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None: + """Generate specific fix commands based on the error type and strategy.""" + strategy = diagnosis.get("fix_strategy", "") + extracted = diagnosis.get("extracted_info", {}) + path = diagnosis.get("extracted_path") + + # Permission/Sudo strategies + if strategy == "use_sudo": + if not cmd.strip().startswith("sudo"): + diagnosis["fix_commands"] = [f"sudo {cmd}"] + + # Path creation strategies + elif strategy == "create_path": + if path: + parent = os.path.dirname(path) + if parent: + diagnosis["fix_commands"] = [f"sudo mkdir -p {parent}"] + + # Package installation + elif strategy == "install_package": + missing_cmd = self.extract_command_from_error(stderr) or cmd.split()[0] + pkg = UBUNTU_PACKAGE_MAP.get(missing_cmd, missing_cmd) + diagnosis["fix_commands"] = [ + "sudo apt-get update", + f"sudo apt-get install -y {pkg}" + ] + diagnosis["extracted_info"]["missing_command"] = missing_cmd + diagnosis["extracted_info"]["suggested_package"] = pkg + + # Service management + elif strategy == "start_service" or strategy == "check_service": + service = extracted.get("service") + if service: + diagnosis["fix_commands"] = [ + f"sudo systemctl start {service}", + f"sudo systemctl status {service}" + ] + + elif strategy == "check_service_logs": + service = extracted.get("service") + if service: + # For web servers, check for port conflicts and common issues + if service in ("apache2", "httpd", "nginx"): + diagnosis["fix_commands"] = [ + # First check what's using port 80 + "sudo lsof -i :80 -t | head -1", + # Stop conflicting services + "sudo systemctl stop nginx 2>/dev/null || true", + "sudo systemctl stop apache2 2>/dev/null || true", + # Test config + f"sudo {'apache2ctl' if service == 'apache2' else 'nginx'} -t 2>&1 || true", + # Now try starting + f"sudo systemctl start {service}", + ] + elif service in ("mysql", "mariadb", "postgresql", "postgres"): + diagnosis["fix_commands"] = [ + # Check disk space + "df -h /var/lib 2>/dev/null | tail -1", + # Check permissions + f"sudo chown -R {'mysql:mysql' if 'mysql' in service or 'mariadb' in service else 'postgres:postgres'} /var/lib/{'mysql' if 'mysql' in service or 'mariadb' in service else 'postgresql'} 2>/dev/null || true", + # Restart + f"sudo systemctl start {service}", + ] + else: + # Generic service - check logs and try restart + diagnosis["fix_commands"] = [ + f"sudo journalctl -u {service} -n 20 --no-pager 2>&1 | tail -10", + f"sudo systemctl reset-failed {service} 2>/dev/null || true", + f"sudo systemctl start {service}", + ] + + elif strategy == "unmask_service": + service = extracted.get("service") + if service: + diagnosis["fix_commands"] = [ + f"sudo systemctl unmask {service}", + f"sudo systemctl start {service}" + ] + + # Config file fixes + elif strategy in ["fix_nginx_config", "fix_nginx_permissions"]: + config_file = extracted.get("config_file") + line_num = extracted.get("line_num") + if config_file: + diagnosis["fix_commands"] = [ + f"sudo nginx -t 2>&1", + f"# Check config at: {config_file}" + (f":{line_num}" if line_num else ""), + ] + else: + diagnosis["fix_commands"] = [ + "sudo nginx -t 2>&1", + "# Check /etc/nginx/nginx.conf and sites-enabled/*", + ] + + elif strategy == "fix_apache_config": + config_file = extracted.get("config_file") + diagnosis["fix_commands"] = [ + "sudo apache2ctl configtest", + "sudo apache2ctl -S", # Show virtual hosts + ] + if config_file: + diagnosis["fix_commands"].append(f"# Check config at: {config_file}") + + elif strategy in ["fix_config_syntax", "fix_config_line"]: + config_file = extracted.get("config_file") + line_num = extracted.get("line_num") + if config_file and line_num: + diagnosis["fix_commands"] = [ + f"sudo head -n {line_num + 5} {config_file} | tail -n 10", + f"# Edit: sudo nano +{line_num} {config_file}", + ] + elif config_file: + diagnosis["fix_commands"] = [ + f"sudo cat {config_file}", + f"# Edit: sudo nano {config_file}", + ] + + elif strategy == "fix_mysql_config": + diagnosis["fix_commands"] = [ + "sudo mysql --help --verbose 2>&1 | grep -A 1 'Default options'", + "# Edit: sudo nano /etc/mysql/mysql.conf.d/mysqld.cnf", + ] + + elif strategy == "fix_postgres_config": + diagnosis["fix_commands"] = [ + "sudo -u postgres psql -c 'SHOW config_file;'", + "# Edit: sudo nano /etc/postgresql/*/main/postgresql.conf", + ] + + # Package manager + elif strategy == "clear_lock": + diagnosis["fix_commands"] = [ + "sudo rm -f /var/lib/dpkg/lock-frontend", + "sudo rm -f /var/lib/dpkg/lock", + "sudo rm -f /var/cache/apt/archives/lock", + "sudo dpkg --configure -a" + ] + + elif strategy == "update_repos": + pkg = extracted.get("package") + diagnosis["fix_commands"] = ["sudo apt-get update"] + if pkg: + diagnosis["fix_commands"].append(f"apt-cache search {pkg}") + + elif strategy == "fix_dependencies": + diagnosis["fix_commands"] = [ + "sudo apt-get install -f", + "sudo dpkg --configure -a", + "sudo apt-get update", + "sudo apt-get upgrade" + ] + + elif strategy == "fix_broken": + diagnosis["fix_commands"] = [ + "sudo apt-get install -f", + "sudo dpkg --configure -a", + "sudo apt-get clean", + "sudo apt-get update" + ] + + elif strategy == "clean_apt": + diagnosis["fix_commands"] = [ + "sudo apt-get clean", + "sudo rm -rf /var/lib/apt/lists/*", + "sudo apt-get update" + ] + + elif strategy == "fix_gpg": + diagnosis["fix_commands"] = [ + "sudo apt-key adv --refresh-keys --keyserver keyserver.ubuntu.com", + "sudo apt-get update" + ] + + # Docker strategies + elif strategy == "remove_or_rename_container": + container_name = self._extract_container_name(stderr) + if container_name: + diagnosis["fix_commands"] = [ + f"docker rm -f {container_name}", + "# Or rename: docker rename {container_name} {container_name}_old" + ] + diagnosis["suggestion"] = f"Container '{container_name}' already exists. Removing it and retrying." + else: + diagnosis["fix_commands"] = [ + "docker ps -a", + "# Then: docker rm -f " + ] + + elif strategy == "stop_or_use_existing": + container_name = self._extract_container_name(stderr) + diagnosis["fix_commands"] = [ + f"docker stop {container_name}" if container_name else "docker stop ", + "# Or connect to existing: docker exec -it /bin/sh" + ] + + elif strategy == "start_container": + container_name = self._extract_container_name(stderr) + diagnosis["fix_commands"] = [ + f"docker start {container_name}" if container_name else "docker start " + ] + + elif strategy == "pull_image": + image_name = self._extract_image_name(stderr, cmd) + diagnosis["fix_commands"] = [ + f"docker pull {image_name}" if image_name else "docker pull " + ] + + elif strategy == "free_port_or_use_different": + port = self._extract_port(stderr) + if port: + diagnosis["fix_commands"] = [ + f"sudo lsof -i :{port}", + f"# Kill process using port: sudo kill $(sudo lsof -t -i:{port})", + f"# Or use different port: -p {int(port)+1}:{port}" + ] + else: + diagnosis["fix_commands"] = ["docker ps", "# Check which ports are in use"] + + elif strategy == "start_docker_daemon": + diagnosis["fix_commands"] = [ + "sudo systemctl start docker", + "sudo systemctl status docker" + ] + + elif strategy == "create_volume": + volume_name = extracted.get("volume") + diagnosis["fix_commands"] = [ + f"docker volume create {volume_name}" if volume_name else "docker volume create " + ] + + elif strategy == "create_network": + network_name = extracted.get("network") + diagnosis["fix_commands"] = [ + f"docker network create {network_name}" if network_name else "docker network create " + ] + + elif strategy == "check_container_name": + diagnosis["fix_commands"] = [ + "docker ps -a", + "# Check container names and use correct one" + ] + + # Timeout strategies + elif strategy == "retry_with_longer_timeout": + # Check if this is an interactive command that needs TTY + interactive_patterns = ["docker exec -it", "docker run -it", "-ti ", "ollama run", "ollama chat"] + is_interactive = any(p in cmd.lower() for p in interactive_patterns) + + if is_interactive: + diagnosis["fix_commands"] = [ + "# This is an INTERACTIVE command that requires a terminal (TTY)", + "# Run it manually in a separate terminal window:", + f"# {cmd}", + ] + diagnosis["description"] = "Interactive command cannot run in background" + diagnosis["suggestion"] = "This command needs interactive input. Please run it in a separate terminal." + else: + diagnosis["fix_commands"] = [ + "# This command timed out - it may still be running or need more time", + "# For docker pull: The image may be very large, try again with better network", + "# Check if the operation completed in background", + ] + diagnosis["suggestion"] = "The operation timed out. This often happens with large downloads. You can retry manually." + diagnosis["can_auto_fix"] = False # Let user decide what to do + + # Network strategies + elif strategy == "check_network": + diagnosis["fix_commands"] = [ + "ping -c 2 8.8.8.8", + "ip route", + "cat /etc/resolv.conf" + ] + + elif strategy == "check_dns": + diagnosis["fix_commands"] = [ + "cat /etc/resolv.conf", + "systemd-resolve --status", + "sudo systemctl restart systemd-resolved" + ] + + elif strategy == "check_service": + port = extracted.get("port") + if port: + diagnosis["fix_commands"] = [ + f"sudo ss -tlnp sport = :{port}", + f"sudo lsof -i :{port}" + ] + + elif strategy == "find_port_user": + port = extracted.get("port") + if port: + diagnosis["fix_commands"] = [ + f"sudo lsof -i :{port}", + f"sudo ss -tlnp sport = :{port}", + f"# Kill process: sudo kill " + ] + + elif strategy == "check_firewall": + diagnosis["fix_commands"] = [ + "sudo ufw status", + "sudo iptables -L -n" + ] + + # Disk/Memory strategies + elif strategy == "free_disk": + diagnosis["fix_commands"] = [ + "df -h", + "sudo apt-get clean", + "sudo apt-get autoremove -y", + "sudo journalctl --vacuum-size=100M", + "du -sh /var/log/*" + ] + + elif strategy == "free_memory": + diagnosis["fix_commands"] = [ + "free -h", + "sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches", + "top -b -n 1 | head -20" + ] + + elif strategy == "increase_ulimit": + diagnosis["fix_commands"] = [ + "ulimit -a", + "# Add to /etc/security/limits.conf:", + "# * soft nofile 65535", + "# * hard nofile 65535" + ] + + # Filesystem strategies + elif strategy == "remount_rw": + if path: + mount_point = self._find_mount_point(path) + if mount_point: + diagnosis["fix_commands"] = [f"sudo mount -o remount,rw {mount_point}"] + + elif strategy == "create_mountpoint": + if path: + diagnosis["fix_commands"] = [f"sudo mkdir -p {path}"] + + elif strategy == "mount_fs": + diagnosis["fix_commands"] = ["mount", "cat /etc/fstab"] + + # User strategies + elif strategy == "create_user": + # Extract username from error if possible + match = re.search(r"user '?([a-zA-Z0-9_-]+)'?", stderr, re.IGNORECASE) + if match: + user = match.group(1) + diagnosis["fix_commands"] = [ + f"sudo useradd -m {user}", + f"sudo passwd {user}" + ] + + elif strategy == "create_group": + match = re.search(r"group '?([a-zA-Z0-9_-]+)'?", stderr, re.IGNORECASE) + if match: + group = match.group(1) + diagnosis["fix_commands"] = [f"sudo groupadd {group}"] + + # Build strategies + elif strategy == "install_lib": + lib_match = re.search(r"library.*?([a-zA-Z0-9_-]+)", stderr, re.IGNORECASE) + if lib_match: + lib = lib_match.group(1) + diagnosis["fix_commands"] = [ + f"apt-cache search {lib}", + f"# Install with: sudo apt-get install lib{lib}-dev" + ] + + elif strategy == "install_dev": + header_match = re.search(r"([a-zA-Z0-9_/]+\.h)", stderr) + if header_match: + header = header_match.group(1) + diagnosis["fix_commands"] = [ + f"apt-file search {header}", + "# Install the -dev package that provides this header" + ] + + elif strategy == "fix_ldpath": + diagnosis["fix_commands"] = [ + "sudo ldconfig", + "echo $LD_LIBRARY_PATH", + "cat /etc/ld.so.conf.d/*.conf" + ] + + # Wait/Retry strategies + elif strategy == "wait_retry": + diagnosis["fix_commands"] = [ + "sleep 2", + f"# Then retry: {cmd}" + ] + + # Script strategies + elif strategy == "fix_shebang": + if path: + diagnosis["fix_commands"] = [ + f"head -1 {path}", + "# Fix shebang line to point to correct interpreter", + "# e.g., #!/usr/bin/env python3" + ] + + # Environment strategies + elif strategy == "set_variable": + var_match = re.search(r"([A-Z_]+).*not set", stderr, re.IGNORECASE) + if var_match: + var = var_match.group(1) + diagnosis["fix_commands"] = [ + f"export {var}=", + f"# Add to ~/.bashrc: export {var}=" + ] + + elif strategy == "set_path": + diagnosis["fix_commands"] = [ + "echo $PATH", + "export PATH=$PATH:/usr/local/bin", + "# Add to ~/.bashrc" + ] + + elif strategy == "set_ldpath": + diagnosis["fix_commands"] = [ + "echo $LD_LIBRARY_PATH", + "export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH", + "sudo ldconfig" + ] + + # Backup/Overwrite strategy + elif strategy == "backup_overwrite": + if path: + diagnosis["fix_commands"] = [ + f"sudo mv {path} {path}.backup", + f"# Then retry: {cmd}" + ] + + # Symlink strategy + elif strategy == "fix_symlink": + if path: + diagnosis["fix_commands"] = [ + f"ls -la {path}", + f"readlink -f {path}", + f"# Remove broken symlink: sudo rm {path}" + ] + + # Directory not empty + elif strategy == "rm_recursive": + if path: + diagnosis["fix_commands"] = [ + f"ls -la {path}", + f"# Remove recursively (CAUTION): sudo rm -rf {path}" + ] + + # Copy instead of link + elif strategy == "copy_instead": + diagnosis["fix_commands"] = [ + "# Use cp instead of ln/mv for cross-device operations", + f"# cp -a " + ] + + def _find_mount_point(self, path: str) -> str | None: + """Find the mount point for a given path.""" + try: + path = os.path.abspath(path) + while path != "/": + if os.path.ismount(path): + return path + path = os.path.dirname(path) + return "/" + except: + return None + + +# ============================================================================ +# Login Handler Class +# ============================================================================ + +class LoginHandler: + """Handles interactive login/credential prompts for various services.""" + + CREDENTIALS_FILE = os.path.expanduser("~/.cortex/credentials.json") + + def __init__(self): + self.cached_credentials: dict[str, dict] = {} + self._ensure_credentials_dir() + self._load_saved_credentials() + + def _ensure_credentials_dir(self) -> None: + """Ensure the credentials directory exists with proper permissions.""" + cred_dir = os.path.dirname(self.CREDENTIALS_FILE) + if not os.path.exists(cred_dir): + os.makedirs(cred_dir, mode=0o700, exist_ok=True) + + def _encode_credential(self, value: str) -> str: + """Encode a credential value (basic obfuscation, not encryption).""" + import base64 + return base64.b64encode(value.encode()).decode() + + def _decode_credential(self, encoded: str) -> str: + """Decode a credential value.""" + import base64 + try: + return base64.b64decode(encoded.encode()).decode() + except Exception: + return "" + + def _load_saved_credentials(self) -> None: + """Load saved credentials from file.""" + import json + + if not os.path.exists(self.CREDENTIALS_FILE): + return + + try: + with open(self.CREDENTIALS_FILE, 'r') as f: + saved = json.load(f) + + # Decode all saved credentials + for service, creds in saved.items(): + decoded = {} + for field, value in creds.items(): + if field.startswith("_"): # metadata fields + decoded[field] = value + else: + decoded[field] = self._decode_credential(value) + self.cached_credentials[service] = decoded + + except (json.JSONDecodeError, IOError) as e: + console.print(f"[dim]Note: Could not load saved credentials: {e}[/dim]") + + def _save_credentials(self, service: str, credentials: dict[str, str]) -> None: + """Save credentials to file.""" + import json + from datetime import datetime + + # Load existing credentials + all_creds = {} + if os.path.exists(self.CREDENTIALS_FILE): + try: + with open(self.CREDENTIALS_FILE, 'r') as f: + all_creds = json.load(f) + except (json.JSONDecodeError, IOError): + pass + + # Encode new credentials + encoded = {} + for field, value in credentials.items(): + if value: # Only save non-empty values + encoded[field] = self._encode_credential(value) + + # Add metadata + encoded["_saved_at"] = datetime.now().isoformat() + + all_creds[service] = encoded + + # Save to file with restricted permissions + try: + with open(self.CREDENTIALS_FILE, 'w') as f: + json.dump(all_creds, f, indent=2) + os.chmod(self.CREDENTIALS_FILE, 0o600) # Read/write only for owner + console.print(f"[green]✓ Credentials saved to {self.CREDENTIALS_FILE}[/green]") + except IOError as e: + console.print(f"[yellow]Warning: Could not save credentials: {e}[/yellow]") + + def _delete_saved_credentials(self, service: str) -> None: + """Delete saved credentials for a service.""" + import json + + if not os.path.exists(self.CREDENTIALS_FILE): + return + + try: + with open(self.CREDENTIALS_FILE, 'r') as f: + all_creds = json.load(f) + + if service in all_creds: + del all_creds[service] + + with open(self.CREDENTIALS_FILE, 'w') as f: + json.dump(all_creds, f, indent=2) + + console.print(f"[dim]Removed saved credentials for {service}[/dim]") + except (json.JSONDecodeError, IOError): + pass + + def _has_saved_credentials(self, service: str) -> bool: + """Check if we have saved credentials for a service.""" + return service in self.cached_credentials and bool(self.cached_credentials[service]) + + def _ask_use_saved(self, service: str, requirement: LoginRequirement) -> bool: + """Ask user if they want to use saved credentials.""" + saved = self.cached_credentials.get(service, {}) + + # Show what we have saved (without showing secrets) + saved_fields = [] + for field in requirement.required_fields: + if field in saved and saved[field]: + if requirement.field_secret.get(field, False): + saved_fields.append(f"{field}=****") + else: + value = saved[field] + # Truncate long values + if len(value) > 20: + value = value[:17] + "..." + saved_fields.append(f"{field}={value}") + + if not saved_fields: + return False + + console.print() + console.print(f"[cyan]📁 Found saved credentials for {requirement.display_name}:[/cyan]") + console.print(f"[dim] {', '.join(saved_fields)}[/dim]") + + if "_saved_at" in saved: + console.print(f"[dim] Saved: {saved['_saved_at'][:19]}[/dim]") + + console.print() + try: + response = input("Use saved credentials? (y/n/delete): ").strip().lower() + except (EOFError, KeyboardInterrupt): + return False + + if response in ["d", "delete", "del", "remove"]: + self._delete_saved_credentials(service) + if service in self.cached_credentials: + del self.cached_credentials[service] + return False + + return response in ["y", "yes", ""] + + def _ask_save_credentials(self, service: str, credentials: dict[str, str]) -> None: + """Ask user if they want to save credentials for next time.""" + console.print() + console.print(f"[cyan]💾 Save these credentials for next time?[/cyan]") + console.print(f"[dim] Credentials will be stored in {self.CREDENTIALS_FILE}[/dim]") + console.print(f"[dim] (encoded, readable only by you)[/dim]") + + try: + response = input("Save credentials? (y/n): ").strip().lower() + except (EOFError, KeyboardInterrupt): + return + + if response in ["y", "yes"]: + self._save_credentials(service, credentials) + # Also update cache + self.cached_credentials[service] = credentials.copy() + + def detect_login_requirement(self, cmd: str, stderr: str) -> LoginRequirement | None: + """Detect which service needs login based on command and error.""" + cmd_lower = cmd.lower() + stderr_lower = stderr.lower() + + # Check for specific registries in docker commands + if "docker" in cmd_lower: + if "ghcr.io" in cmd_lower or "ghcr.io" in stderr_lower: + return LOGIN_REQUIREMENTS.get("ghcr") + if "gcr.io" in cmd_lower or "gcr.io" in stderr_lower: + return LOGIN_REQUIREMENTS.get("gcloud") + return LOGIN_REQUIREMENTS.get("docker") + + # Check other services + for service, req in LOGIN_REQUIREMENTS.items(): + if re.search(req.command_pattern, cmd, re.IGNORECASE): + return req + + return None + + def check_env_credentials(self, requirement: LoginRequirement) -> dict[str, str]: + """Check if credentials are available in environment variables.""" + found = {} + for field, env_var in requirement.env_vars.items(): + value = os.environ.get(env_var) + if value: + found[field] = value + return found + + def prompt_for_credentials( + self, + requirement: LoginRequirement, + pre_filled: dict[str, str] | None = None + ) -> dict[str, str] | None: + """Prompt user for required credentials.""" + import getpass + + console.print() + console.print(f"[bold cyan]🔐 {requirement.display_name} Authentication Required[/bold cyan]") + console.print() + + if requirement.signup_url: + console.print(f"[dim]Don't have an account? Sign up at: {requirement.signup_url}[/dim]") + if requirement.docs_url: + console.print(f"[dim]Documentation: {requirement.docs_url}[/dim]") + console.print() + + # Check for existing env vars + env_creds = self.check_env_credentials(requirement) + if env_creds: + console.print(f"[green]Found credentials in environment: {', '.join(env_creds.keys())}[/green]") + + credentials = pre_filled.copy() if pre_filled else {} + credentials.update(env_creds) + + try: + for field in requirement.required_fields: + if field in credentials and credentials[field]: + console.print(f"[dim]{requirement.field_prompts[field]}: (using existing)[/dim]") + continue + + prompt_text = requirement.field_prompts.get(field, f"Enter {field}") + is_secret = requirement.field_secret.get(field, False) + + # Handle special defaults + default_value = "" + if field == "registry": + default_value = "docker.io" + elif field == "region": + default_value = "us-east-1" + elif field == "kubeconfig": + default_value = os.path.expanduser("~/.kube/config") + + if default_value: + prompt_text = f"{prompt_text} [{default_value}]" + + console.print(f"[bold]{prompt_text}:[/bold] ", end="") + + if is_secret: + value = getpass.getpass("") + else: + try: + value = input() + except (EOFError, KeyboardInterrupt): + console.print("\n[yellow]Authentication cancelled.[/yellow]") + return None + + # Use default if empty + if not value and default_value: + value = default_value + console.print(f"[dim]Using default: {default_value}[/dim]") + + if not value and field != "registry": # registry can be empty for Docker Hub + console.print(f"[red]Error: {field} is required.[/red]") + return None + + credentials[field] = value + + return credentials + + except (EOFError, KeyboardInterrupt): + console.print("\n[yellow]Authentication cancelled.[/yellow]") + return None + + def execute_login( + self, + requirement: LoginRequirement, + credentials: dict[str, str] + ) -> tuple[bool, str, str]: + """Execute the login command with provided credentials.""" + + # Build the login command + if not requirement.login_command_template: + return False, "", "No login command template defined" + + # Handle special cases + if requirement.service == "docker" and credentials.get("registry") in ["", "docker.io"]: + credentials["registry"] = "" # Docker Hub doesn't need registry in command + + # Format the command + try: + login_cmd = requirement.login_command_template.format(**credentials) + except KeyError as e: + return False, "", f"Missing credential: {e}" + + # For Docker, use stdin for password to avoid it showing in ps + if requirement.service in ["docker", "ghcr"]: + password = credentials.get("password") or credentials.get("token", "") + username = credentials.get("username", "") + registry = credentials.get("registry", "") + + if requirement.service == "ghcr": + registry = "ghcr.io" + + # Build safe command + if registry: + cmd_parts = ["docker", "login", registry, "-u", username, "--password-stdin"] + else: + cmd_parts = ["docker", "login", "-u", username, "--password-stdin"] + + console.print(f"[dim]Executing: docker login {registry or 'docker.io'} -u {username}[/dim]") + + try: + process = subprocess.Popen( + cmd_parts, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + stdout, stderr = process.communicate(input=password, timeout=60) + return process.returncode == 0, stdout.strip(), stderr.strip() + except subprocess.TimeoutExpired: + process.kill() + return False, "", "Login timed out" + except Exception as e: + return False, "", str(e) + + # For other services, execute directly + console.print(f"[dim]Executing login...[/dim]") + try: + result = subprocess.run( + login_cmd, + shell=True, + capture_output=True, + text=True, + timeout=120, + ) + return result.returncode == 0, result.stdout.strip(), result.stderr.strip() + except subprocess.TimeoutExpired: + return False, "", "Login timed out" + except Exception as e: + return False, "", str(e) + + def handle_login(self, cmd: str, stderr: str) -> tuple[bool, str]: + """ + Main entry point: detect login requirement, prompt, and execute. + + Returns: + (success, message) + """ + requirement = self.detect_login_requirement(cmd, stderr) + + if not requirement: + return False, "Could not determine which service needs authentication" + + used_saved = False + credentials = None + + # Check for saved credentials first + if self._has_saved_credentials(requirement.service): + if self._ask_use_saved(requirement.service, requirement): + # Use saved credentials + credentials = self.cached_credentials.get(requirement.service, {}).copy() + # Remove metadata fields + credentials = {k: v for k, v in credentials.items() if not k.startswith("_")} + used_saved = True + + console.print(f"[cyan]Using saved credentials...[/cyan]") + success, stdout, login_stderr = self.execute_login(requirement, credentials) + + if success: + console.print(f"[green]✓ Successfully logged in to {requirement.display_name} using saved credentials[/green]") + return True, f"Logged in to {requirement.display_name} using saved credentials" + else: + console.print(f"[yellow]Saved credentials didn't work: {login_stderr[:100] if login_stderr else 'Unknown error'}[/yellow]") + console.print(f"[dim]Let's enter new credentials...[/dim]") + credentials = None + used_saved = False + + # Prompt for new credentials if we don't have valid ones + if not credentials: + # Pre-fill with any partial saved credentials (like username) + pre_filled = {} + if requirement.service in self.cached_credentials: + saved = self.cached_credentials[requirement.service] + for field in requirement.required_fields: + if field in saved and saved[field] and not requirement.field_secret.get(field, False): + pre_filled[field] = saved[field] + + credentials = self.prompt_for_credentials(requirement, pre_filled if pre_filled else None) + + if not credentials: + return False, "Authentication cancelled by user" + + # Execute login + success, stdout, login_stderr = self.execute_login(requirement, credentials) + + if success: + console.print(f"[green]✓ Successfully logged in to {requirement.display_name}[/green]") + + # Ask to save credentials if they weren't from saved file + if not used_saved: + self._ask_save_credentials(requirement.service, credentials) + + # Update session cache + self.cached_credentials[requirement.service] = credentials.copy() + + return True, f"Successfully authenticated with {requirement.display_name}" + else: + error_msg = login_stderr or "Login failed" + console.print(f"[red]✗ Login failed: {error_msg}[/red]") + + # Offer to retry + console.print() + try: + retry = input("Would you like to try again? (y/n): ").strip().lower() + except (EOFError, KeyboardInterrupt): + retry = "n" + + if retry in ["y", "yes"]: + # Clear cached credentials for this service since they failed + if requirement.service in self.cached_credentials: + del self.cached_credentials[requirement.service] + return self.handle_login(cmd, stderr) # Recursive retry + + return False, f"Login failed: {error_msg}" + + +# Auto-Fixer Class +# ============================================================================ + +class AutoFixer: + """Auto-fixes errors based on diagnosis.""" + + def __init__(self, llm_callback: Callable[[str, dict], dict] | None = None): + self.diagnoser = ErrorDiagnoser() + self.llm_callback = llm_callback + # Track all attempted fixes across multiple calls to avoid repeating + self._attempted_fixes: dict[str, set[str]] = {} # cmd -> set of fix commands tried + self._attempted_strategies: dict[str, set[str]] = {} # cmd -> set of strategies tried + + def _get_fix_key(self, cmd: str) -> str: + """Generate a key for tracking fixes for a command.""" + # Normalize the command (strip sudo, whitespace) + normalized = cmd.strip() + if normalized.startswith("sudo "): + normalized = normalized[5:].strip() + return normalized + + def _is_fix_attempted(self, original_cmd: str, fix_cmd: str) -> bool: + """Check if a fix command has already been attempted for this command.""" + key = self._get_fix_key(original_cmd) + fix_normalized = fix_cmd.strip() + + if key not in self._attempted_fixes: + return False + + return fix_normalized in self._attempted_fixes[key] + + def _mark_fix_attempted(self, original_cmd: str, fix_cmd: str) -> None: + """Mark a fix command as attempted.""" + key = self._get_fix_key(original_cmd) + + if key not in self._attempted_fixes: + self._attempted_fixes[key] = set() + + self._attempted_fixes[key].add(fix_cmd.strip()) + + def _is_strategy_attempted(self, original_cmd: str, strategy: str, error_type: str) -> bool: + """Check if a strategy has been attempted for this command/error combination.""" + key = f"{self._get_fix_key(original_cmd)}:{error_type}" + + if key not in self._attempted_strategies: + return False + + return strategy in self._attempted_strategies[key] + + def _mark_strategy_attempted(self, original_cmd: str, strategy: str, error_type: str) -> None: + """Mark a strategy as attempted for this command/error combination.""" + key = f"{self._get_fix_key(original_cmd)}:{error_type}" + + if key not in self._attempted_strategies: + self._attempted_strategies[key] = set() + + self._attempted_strategies[key].add(strategy) + + def reset_attempts(self, cmd: str | None = None) -> None: + """Reset attempted fixes tracking. If cmd is None, reset all.""" + if cmd is None: + self._attempted_fixes.clear() + self._attempted_strategies.clear() + else: + key = self._get_fix_key(cmd) + if key in self._attempted_fixes: + del self._attempted_fixes[key] + # Clear all strategies for this command + to_delete = [k for k in self._attempted_strategies if k.startswith(key)] + for k in to_delete: + del self._attempted_strategies[k] + + def _get_llm_fix(self, cmd: str, stderr: str, diagnosis: dict) -> dict | None: + """Use LLM to diagnose error and suggest fix commands. + + This is called when pattern matching fails to identify the error. + """ + if not self.llm_callback: + return None + + context = { + "error_command": cmd, + "error_output": stderr[:1000], # Truncate for LLM context + "current_diagnosis": diagnosis, + } + + # Create a targeted prompt for error diagnosis + prompt = f"""Analyze this Linux command error and provide fix commands. + +FAILED COMMAND: {cmd} + +ERROR OUTPUT: +{stderr[:800]} + +Provide a JSON response with: +1. "fix_commands": list of shell commands to fix this error (in order) +2. "reasoning": brief explanation of the error and fix + +Focus on common issues: +- Docker: container already exists (docker rm -f ), port conflicts, daemon not running +- Permissions: use sudo, create directories +- Services: systemctl start/restart +- Files: mkdir -p, touch, chown + +Example response: +{{"fix_commands": ["docker rm -f ollama", "docker run ..."], "reasoning": "Container 'ollama' already exists, removing it first"}}""" + + try: + response = self.llm_callback(prompt, context) + + if response and response.get("response_type") != "error": + # Check if the response contains fix commands directly + if response.get("fix_commands"): + return { + "fix_commands": response["fix_commands"], + "reasoning": response.get("reasoning", "AI-suggested fix") + } + + # Check if it's a do_commands response + if response.get("do_commands"): + return { + "fix_commands": [cmd["command"] for cmd in response["do_commands"]], + "reasoning": response.get("reasoning", "AI-suggested fix") + } + + # Try to parse answer as fix suggestion + if response.get("answer"): + # Extract commands from natural language response + answer = response["answer"] + commands = [] + for line in answer.split('\n'): + line = line.strip() + if line.startswith('$') or line.startswith('sudo ') or line.startswith('docker '): + commands.append(line.lstrip('$ ')) + if commands: + return { + "fix_commands": commands, + "reasoning": "Extracted from AI response" + } + + return None + + except Exception as e: + console.print(f"[dim] LLM fix generation failed: {e}[/dim]") + return None + + def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + """Execute a single command.""" + import sys + + try: + if needs_sudo and not cmd.strip().startswith("sudo"): + cmd = f"sudo {cmd}" + + # Handle comments + if cmd.strip().startswith("#"): + return True, "", "" + + # For sudo commands, we need to handle the password prompt specially + is_sudo = cmd.strip().startswith("sudo") + + if is_sudo: + # Flush output before sudo to ensure clean state + sys.stdout.flush() + sys.stderr.flush() + + result = subprocess.run( + cmd, + shell=True, + capture_output=True, + text=True, + timeout=timeout, + ) + + if is_sudo: + # After sudo, ensure console is in clean state + # Print empty line to reset cursor position after potential password prompt + sys.stdout.write('\n') + sys.stdout.flush() + + return result.returncode == 0, result.stdout.strip(), result.stderr.strip() + except subprocess.TimeoutExpired: + return False, "", f"Command timed out after {timeout} seconds" + except Exception as e: + return False, "", str(e) + + def auto_fix_error( + self, + cmd: str, + stderr: str, + diagnosis: dict[str, Any], + max_attempts: int = 5, + ) -> tuple[bool, str, list[str]]: + """ + General-purpose auto-fix system with retry logic. + + Tracks attempted fixes to avoid repeating the same fixes. + + Returns: + Tuple of (fixed, message, commands_executed) + """ + all_commands_executed = [] + current_stderr = stderr + current_diagnosis = diagnosis + attempt = 0 + skipped_attempts = 0 + max_skips = 3 # Max attempts to skip before giving up + + while attempt < max_attempts and skipped_attempts < max_skips: + attempt += 1 + error_type = current_diagnosis.get("error_type", "unknown") + strategy = current_diagnosis.get("fix_strategy", "") + category = current_diagnosis.get("category", "unknown") + + # Check if this strategy was already attempted for this error + if self._is_strategy_attempted(cmd, strategy, error_type): + console.print(f"[dim] Skipping already-tried strategy: {strategy} for {error_type}[/dim]") + skipped_attempts += 1 + + # Try to get a different diagnosis by re-analyzing + if current_stderr: + # Force a different approach by marking current strategy as exhausted + current_diagnosis["fix_strategy"] = "" + current_diagnosis["can_auto_fix"] = False + continue + + # Mark this strategy as attempted + self._mark_strategy_attempted(cmd, strategy, error_type) + + # Check fix commands that would be generated + fix_commands = current_diagnosis.get("fix_commands", []) + + # Filter out already-attempted fix commands + new_fix_commands = [] + for fix_cmd in fix_commands: + if fix_cmd.startswith("#"): # Comments are always allowed + new_fix_commands.append(fix_cmd) + elif self._is_fix_attempted(cmd, fix_cmd): + console.print(f"[dim] Skipping already-executed: {fix_cmd[:50]}...[/dim]") + else: + new_fix_commands.append(fix_cmd) + + # If all fix commands were already tried, skip this attempt + if fix_commands and not new_fix_commands: + console.print(f"[dim] All fix commands already tried for {error_type}[/dim]") + skipped_attempts += 1 + continue + + # Update diagnosis with filtered commands + current_diagnosis["fix_commands"] = new_fix_commands + + # Reset skip counter since we found something new to try + skipped_attempts = 0 + + severity = current_diagnosis.get("severity", "error") + + # Visual grouping for auto-fix attempts + from rich.panel import Panel + from rich.text import Text + + fix_title = Text() + fix_title.append("🔧 AUTO-FIX ", style="bold yellow") + fix_title.append(f"Attempt {attempt}/{max_attempts}", style="dim") + + severity_color = "red" if severity == "critical" else "yellow" + fix_content = Text() + if severity == "critical": + fix_content.append("⚠️ CRITICAL: ", style="bold red") + fix_content.append(f"[{category}] ", style="dim") + fix_content.append(error_type, style=f"bold {severity_color}") + + console.print() + console.print(Panel( + fix_content, + title=fix_title, + title_align="left", + border_style=severity_color, + padding=(0, 1), + )) + + # Ensure output is flushed before executing fixes + import sys + sys.stdout.flush() + + fixed, message, commands = self.apply_single_fix(cmd, current_stderr, current_diagnosis) + + # Mark all executed commands as attempted + for exec_cmd in commands: + self._mark_fix_attempted(cmd, exec_cmd) + all_commands_executed.extend(commands) + + if fixed: + # Check if it's just a "use sudo" suggestion + if message == "Will retry with sudo": + sudo_cmd = f"sudo {cmd}" if not cmd.startswith("sudo") else cmd + + # Check if we already tried sudo + if self._is_fix_attempted(cmd, sudo_cmd): + console.print(f"[dim] Already tried sudo, skipping...[/dim]") + skipped_attempts += 1 + continue + + self._mark_fix_attempted(cmd, sudo_cmd) + success, stdout, new_stderr = self._execute_command(sudo_cmd) + all_commands_executed.append(sudo_cmd) + + if success: + console.print(Panel( + "[bold green]✓ Fixed with sudo[/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + )) + return True, f"Fixed with sudo after {attempt} attempt(s)", all_commands_executed + else: + current_stderr = new_stderr + current_diagnosis = self.diagnoser.diagnose_error(cmd, new_stderr) + continue + + # Verify the original command now works + console.print(Panel( + f"[bold cyan]✓ Fix applied:[/bold cyan] {message}\n[dim]Verifying original command...[/dim]", + border_style="cyan", + padding=(0, 1), + expand=False, + )) + + verify_cmd = f"sudo {cmd}" if not cmd.startswith("sudo") else cmd + success, stdout, new_stderr = self._execute_command(verify_cmd) + all_commands_executed.append(verify_cmd) + + if success: + console.print(Panel( + "[bold green]✓ Verified![/bold green] Command now succeeds", + border_style="green", + padding=(0, 1), + expand=False, + )) + return True, f"Fixed after {attempt} attempt(s): {message}", all_commands_executed + else: + new_diagnosis = self.diagnoser.diagnose_error(cmd, new_stderr) + + if new_diagnosis["error_type"] == error_type: + console.print(f" [dim yellow]Same error persists, trying different approach...[/dim yellow]") + else: + console.print(f" [yellow]New error: {new_diagnosis['error_type']}[/yellow]") + + current_stderr = new_stderr + current_diagnosis = new_diagnosis + else: + console.print(f" [dim red]Fix attempt failed: {message}[/dim red]") + console.print(f" [dim]Trying fallback...[/dim]") + + # Try with sudo as fallback + sudo_fallback = f"sudo {cmd}" + if not cmd.strip().startswith("sudo") and not self._is_fix_attempted(cmd, sudo_fallback): + self._mark_fix_attempted(cmd, sudo_fallback) + success, _, new_stderr = self._execute_command(sudo_fallback) + all_commands_executed.append(sudo_fallback) + + if success: + return True, f"Fixed with sudo fallback", all_commands_executed + + current_stderr = new_stderr + current_diagnosis = self.diagnoser.diagnose_error(cmd, new_stderr) + else: + if cmd.strip().startswith("sudo"): + console.print(f"[dim] Already running with sudo, no more fallbacks[/dim]") + else: + console.print(f"[dim] Sudo fallback already tried[/dim]") + break + + # Final summary of what was attempted + unique_attempts = len(self._attempted_fixes.get(self._get_fix_key(cmd), set())) + if unique_attempts > 0: + console.print(f"[dim] Total unique fixes attempted: {unique_attempts}[/dim]") + + return False, f"Could not fix after {attempt} attempts ({skipped_attempts} skipped as duplicates)", all_commands_executed + + def apply_single_fix( + self, + cmd: str, + stderr: str, + diagnosis: dict[str, Any], + ) -> tuple[bool, str, list[str]]: + """Apply a single fix attempt based on the error diagnosis.""" + error_type = diagnosis.get("error_type", "unknown") + category = diagnosis.get("category", "unknown") + strategy = diagnosis.get("fix_strategy", "") + fix_commands = diagnosis.get("fix_commands", []) + extracted = diagnosis.get("extracted_info", {}) + path = diagnosis.get("extracted_path") + + commands_executed = [] + + # Strategy-based fixes + + # === Use Sudo === + if strategy == "use_sudo" or error_type in ["permission_denied", "operation_not_permitted", "access_denied"]: + if not cmd.strip().startswith("sudo"): + console.print("[dim] Adding sudo...[/dim]") + return True, "Will retry with sudo", [] + + # === Create Path === + if strategy == "create_path" or error_type == "not_found": + missing_path = path or extracted.get("missing_path") + + if missing_path: + parent_dir = os.path.dirname(missing_path) + + if parent_dir and not os.path.exists(parent_dir): + console.print(f"[dim] Creating directory: {parent_dir}[/dim]") + mkdir_cmd = f"sudo mkdir -p {parent_dir}" + success, _, mkdir_err = self._execute_command(mkdir_cmd) + commands_executed.append(mkdir_cmd) + + if success: + return True, f"Created directory {parent_dir}", commands_executed + else: + return False, f"Failed to create directory: {mkdir_err}", commands_executed + + # === Install Package === + if strategy == "install_package" or error_type == "command_not_found": + missing_cmd = extracted.get("missing_command") or self.diagnoser.extract_command_from_error(stderr) + if not missing_cmd: + missing_cmd = cmd.split()[0] if cmd.split() else "" + + suggested_pkg = UBUNTU_PACKAGE_MAP.get(missing_cmd, missing_cmd) + + if missing_cmd: + console.print(f"[dim] Installing package: {suggested_pkg}[/dim]") + + # Update repos first + update_cmd = "sudo apt-get update" + self._execute_command(update_cmd) + commands_executed.append(update_cmd) + + # Install package + install_cmd = f"sudo apt-get install -y {suggested_pkg}" + success, _, install_err = self._execute_command(install_cmd) + commands_executed.append(install_cmd) + + if success: + return True, f"Installed {suggested_pkg}", commands_executed + else: + # Try without suggested package mapping + if suggested_pkg != missing_cmd: + install_cmd2 = f"sudo apt-get install -y {missing_cmd}" + success, _, _ = self._execute_command(install_cmd2) + commands_executed.append(install_cmd2) + if success: + return True, f"Installed {missing_cmd}", commands_executed + + return False, f"Failed to install: {install_err[:100]}", commands_executed + + # === Clear Package Lock === + if strategy == "clear_lock" or error_type in ["dpkg_lock", "apt_lock", "could_not_get_lock"]: + console.print("[dim] Clearing package locks...[/dim]") + + lock_cmds = [ + "sudo rm -f /var/lib/dpkg/lock-frontend", + "sudo rm -f /var/lib/dpkg/lock", + "sudo rm -f /var/cache/apt/archives/lock", + "sudo dpkg --configure -a", + ] + + for lock_cmd in lock_cmds: + self._execute_command(lock_cmd) + commands_executed.append(lock_cmd) + + return True, "Cleared package locks", commands_executed + + # === Fix Dependencies === + if strategy in ["fix_dependencies", "fix_broken"]: + console.print("[dim] Fixing package dependencies...[/dim]") + + fix_cmds = [ + "sudo apt-get install -f -y", + "sudo dpkg --configure -a", + ] + + for fix_cmd in fix_cmds: + success, _, _ = self._execute_command(fix_cmd) + commands_executed.append(fix_cmd) + + return True, "Attempted dependency fix", commands_executed + + # === Start Service === + if strategy in ["start_service", "check_service"] or error_type in ["service_inactive", "service_not_running"]: + service = extracted.get("service") + + if service: + console.print(f"[dim] Starting service: {service}[/dim]") + start_cmd = f"sudo systemctl start {service}" + success, _, start_err = self._execute_command(start_cmd) + commands_executed.append(start_cmd) + + if success: + return True, f"Started service {service}", commands_executed + else: + # Try enable --now + enable_cmd = f"sudo systemctl enable --now {service}" + success, _, _ = self._execute_command(enable_cmd) + commands_executed.append(enable_cmd) + if success: + return True, f"Enabled and started {service}", commands_executed + return False, f"Failed to start {service}: {start_err[:100]}", commands_executed + + # === Unmask Service === + if strategy == "unmask_service" or error_type == "service_masked": + service = extracted.get("service") + + if service: + console.print(f"[dim] Unmasking service: {service}[/dim]") + unmask_cmd = f"sudo systemctl unmask {service}" + success, _, _ = self._execute_command(unmask_cmd) + commands_executed.append(unmask_cmd) + + if success: + start_cmd = f"sudo systemctl start {service}" + self._execute_command(start_cmd) + commands_executed.append(start_cmd) + return True, f"Unmasked and started {service}", commands_executed + + # === Free Disk Space === + if strategy == "free_disk" or error_type == "no_space": + console.print("[dim] Cleaning up disk space...[/dim]") + + cleanup_cmds = [ + "sudo apt-get clean", + "sudo apt-get autoremove -y", + "sudo journalctl --vacuum-size=100M", + ] + + for cleanup_cmd in cleanup_cmds: + self._execute_command(cleanup_cmd) + commands_executed.append(cleanup_cmd) + + return True, "Freed disk space", commands_executed + + # === Free Memory === + if strategy == "free_memory" or error_type in ["oom", "cannot_allocate", "memory_exhausted"]: + console.print("[dim] Freeing memory...[/dim]") + + mem_cmds = [ + "sudo sync", + "echo 3 | sudo tee /proc/sys/vm/drop_caches", + ] + + for mem_cmd in mem_cmds: + self._execute_command(mem_cmd) + commands_executed.append(mem_cmd) + + return True, "Freed memory caches", commands_executed + + # === Fix Config Syntax (all config error types) === + config_error_types = [ + "config_syntax_error", "nginx_config_error", "nginx_syntax_error", + "nginx_unexpected", "nginx_unknown_directive", "nginx_test_failed", + "apache_syntax_error", "apache_config_error", "config_line_error", + "mysql_config_error", "postgres_config_error", "generic_config_syntax", + "invalid_config", "config_parse_error", "syntax_error" + ] + + if error_type in config_error_types or category == "config": + config_file = extracted.get("config_file") + line_num = extracted.get("line_num") + + # Try to extract config file/line from error if not already done + if not config_file: + config_file, line_num = self.diagnoser.extract_config_file_and_line(stderr) + + if config_file and line_num: + console.print(f"[dim] Config error at {config_file}:{line_num}[/dim]") + fixed, msg = self.fix_config_syntax(config_file, line_num, stderr, cmd) + if fixed: + # Verify the fix (e.g., nginx -t) + if "nginx" in error_type or "nginx" in cmd.lower(): + verify_cmd = "sudo nginx -t" + v_success, _, v_stderr = self._execute_command(verify_cmd) + commands_executed.append(verify_cmd) + if v_success: + return True, f"{msg} - nginx config now valid", commands_executed + else: + console.print(f"[yellow] Config still has errors[/yellow]") + # Re-diagnose for next iteration + return False, f"{msg} but still has errors", commands_executed + return True, msg, commands_executed + else: + return False, msg, commands_executed + else: + # Can't find specific line, provide general guidance + if "nginx" in error_type or "nginx" in cmd.lower(): + console.print("[dim] Testing nginx config...[/dim]") + test_cmd = "sudo nginx -t 2>&1" + success, stdout, test_err = self._execute_command(test_cmd) + commands_executed.append(test_cmd) + if not success: + # Try to extract file/line from test output + cf, ln = self.diagnoser.extract_config_file_and_line(test_err) + if cf and ln: + fixed, msg = self.fix_config_syntax(cf, ln, test_err, cmd) + if fixed: + return True, msg, commands_executed + return False, "Could not identify config file/line to fix", commands_executed + + # === Network Fixes === + if category == "network": + if strategy == "check_dns" or error_type in ["dns_temp_fail", "dns_unknown", "dns_failed"]: + console.print("[dim] Restarting DNS resolver...[/dim]") + dns_cmd = "sudo systemctl restart systemd-resolved" + success, _, _ = self._execute_command(dns_cmd) + commands_executed.append(dns_cmd) + if success: + return True, "Restarted DNS resolver", commands_executed + + if strategy == "find_port_user" or error_type == "address_in_use": + port = extracted.get("port") + if port: + console.print(f"[dim] Port {port} in use, checking...[/dim]") + lsof_cmd = f"sudo lsof -i :{port}" + success, stdout, _ = self._execute_command(lsof_cmd) + commands_executed.append(lsof_cmd) + if stdout: + console.print(f"[dim] Process using port: {stdout[:100]}[/dim]") + return False, f"Port {port} is in use - kill the process first", commands_executed + + # === Remount Read-Write === + if strategy == "remount_rw" or error_type == "readonly_fs": + if path: + console.print("[dim] Remounting filesystem read-write...[/dim]") + # Find mount point + mount_point = "/" + check_path = os.path.abspath(path) if path else "/" + while check_path != "/": + if os.path.ismount(check_path): + mount_point = check_path + break + check_path = os.path.dirname(check_path) + + remount_cmd = f"sudo mount -o remount,rw {mount_point}" + success, _, remount_err = self._execute_command(remount_cmd) + commands_executed.append(remount_cmd) + if success: + return True, f"Remounted {mount_point} read-write", commands_executed + + # === Fix Symlink Loop === + if strategy == "fix_symlink" or error_type == "symlink_loop": + if path: + console.print(f"[dim] Fixing symlink: {path}[/dim]") + # Check if it's a broken symlink + if os.path.islink(path): + rm_cmd = f"sudo rm {path}" + success, _, _ = self._execute_command(rm_cmd) + commands_executed.append(rm_cmd) + if success: + return True, f"Removed broken symlink {path}", commands_executed + + # === Wait and Retry === + if strategy == "wait_retry" or error_type in ["resource_unavailable", "text_file_busy", "device_busy"]: + import time + console.print("[dim] Waiting for resource...[/dim]") + time.sleep(2) + return True, "Waited 2 seconds", commands_executed + + # === Use xargs for long argument lists === + if strategy == "use_xargs" or error_type == "arg_list_too_long": + console.print("[dim] Argument list too long - need to use xargs or loop[/dim]") + return False, "Use xargs or a loop to process files in batches", commands_executed + + # === Execute provided fix commands === + if fix_commands: + console.print("[dim] Executing fix commands...[/dim]") + for fix_cmd in fix_commands: + if fix_cmd.startswith("#"): + continue # Skip comments + success, stdout, err = self._execute_command(fix_cmd) + commands_executed.append(fix_cmd) + if not success and err: + console.print(f"[dim] Warning: {fix_cmd} failed: {err[:50]}[/dim]") + + if commands_executed: + return True, f"Executed {len(commands_executed)} fix commands", commands_executed + + # === Try LLM-based fix if available === + if self.llm_callback and error_type == "unknown": + console.print("[dim] Using AI to diagnose error...[/dim]") + llm_fix = self._get_llm_fix(cmd, stderr, diagnosis) + if llm_fix: + fix_commands = llm_fix.get("fix_commands", []) + reasoning = llm_fix.get("reasoning", "AI-suggested fix") + + if fix_commands: + console.print(f"[cyan] 🤖 AI diagnosis: {reasoning}[/cyan]") + for fix_cmd in fix_commands: + if self._is_fix_attempted(cmd, fix_cmd): + console.print(f"[dim] Skipping (already tried): {fix_cmd}[/dim]") + continue + + console.print(f"[dim] Executing: {fix_cmd}[/dim]") + self._mark_fix_attempted(cmd, fix_cmd) + + needs_sudo = fix_cmd.strip().startswith("sudo") or "docker" in fix_cmd + success, stdout, stderr = self._execute_command(fix_cmd, needs_sudo=needs_sudo) + commands_executed.append(fix_cmd) + + if success: + console.print(f"[green] ✓ Fixed: {fix_cmd}[/green]") + return True, reasoning, commands_executed + + if commands_executed: + return True, f"Executed AI-suggested fixes", commands_executed + + # === Fallback: try with sudo === + if not cmd.strip().startswith("sudo"): + console.print("[dim] Fallback: will try with sudo...[/dim]") + return True, "Will retry with sudo", [] + + return False, f"No fix strategy for {error_type}", commands_executed + + def fix_config_syntax( + self, + config_file: str, + line_num: int, + stderr: str, + original_cmd: str, + ) -> tuple[bool, str]: + """Fix configuration file syntax errors.""" + console.print(f"[dim] Analyzing config: {config_file}:{line_num}[/dim]") + + # Read the config file + success, config_content, read_err = self._execute_command(f"sudo cat {config_file}") + if not success or not config_content: + return False, f"Could not read {config_file}: {read_err}" + + lines = config_content.split('\n') + if line_num > len(lines) or line_num < 1: + return False, f"Invalid line number {line_num}" + + problem_line = lines[line_num - 1] + console.print(f"[dim] Line {line_num}: {problem_line.strip()[:60]}...[/dim]") + + stderr_lower = stderr.lower() + + # Duplicate entry + if "duplicate" in stderr_lower: + console.print("[cyan] Commenting out duplicate entry...[/cyan]") + fix_cmd = f"sudo sed -i '{line_num}s/^/# DUPLICATE: /' {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Commented out duplicate at line {line_num}" + + # Missing semicolon (for nginx, etc.) + if "unexpected" in stderr_lower or "expecting" in stderr_lower: + stripped = problem_line.strip() + if stripped and not stripped.endswith((';', '{', '}', ':', ',', '#', ')')): + console.print("[cyan] Adding missing semicolon...[/cyan]") + escaped_line = stripped.replace('/', '\\/').replace('&', '\\&') + fix_cmd = f"sudo sed -i '{line_num}s/.*/ {escaped_line};/' {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Added semicolon at line {line_num}" + + # Unknown directive + if "unknown" in stderr_lower and ("directive" in stderr_lower or "option" in stderr_lower): + console.print("[cyan] Commenting out unknown directive...[/cyan]") + fix_cmd = f"sudo sed -i '{line_num}s/^/# UNKNOWN: /' {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Commented out unknown directive at line {line_num}" + + # Invalid value/argument + if "invalid" in stderr_lower: + console.print("[cyan] Commenting out line with invalid value...[/cyan]") + fix_cmd = f"sudo sed -i '{line_num}s/^/# INVALID: /' {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Commented out invalid line at line {line_num}" + + # Unterminated string + if "unterminated" in stderr_lower or ("string" in stderr_lower and "quote" in stderr_lower): + if problem_line.count('"') % 2 == 1: + console.print("[cyan] Adding missing double quote...[/cyan]") + fix_cmd = f"sudo sed -i '{line_num}s/$/\"/' {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Added missing quote at line {line_num}" + elif problem_line.count("'") % 2 == 1: + console.print("[cyan] Adding missing single quote...[/cyan]") + fix_cmd = f"sudo sed -i \"{line_num}s/$/'/\" {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Added missing quote at line {line_num}" + + # Fallback: comment out problematic line + console.print("[cyan] Fallback: commenting out problematic line...[/cyan]") + fix_cmd = f"sudo sed -i '{line_num}s/^/# ERROR: /' {config_file}" + success, _, _ = self._execute_command(fix_cmd) + if success: + return True, f"Commented out problematic line {line_num}" + + return False, "Could not identify a fix for this config error" + + +# ============================================================================ +# Utility Functions +# ============================================================================ + +def get_error_category(error_type: str) -> str: + """Get the category for an error type.""" + for pattern in ALL_ERROR_PATTERNS: + if pattern.error_type == error_type: + return pattern.category + return "unknown" + + +def get_severity(error_type: str) -> str: + """Get the severity for an error type.""" + for pattern in ALL_ERROR_PATTERNS: + if pattern.error_type == error_type: + return pattern.severity + return "error" + + +def is_critical_error(error_type: str) -> bool: + """Check if an error type is critical.""" + return get_severity(error_type) == "critical" diff --git a/cortex/do_runner/diagnosis_v2.py b/cortex/do_runner/diagnosis_v2.py new file mode 100644 index 000000000..9cb0bdb40 --- /dev/null +++ b/cortex/do_runner/diagnosis_v2.py @@ -0,0 +1,1857 @@ +""" +Cortex Diagnosis System v2 + +A structured error diagnosis and resolution system with the following flow: +1. Categorize error type (file, login, package, syntax, input, etc.) +2. LLM generates fix commands with variable placeholders +3. Resolve variables from query, LLM, or system_info_generator +4. Execute fix commands and log output +5. If error, push to stack and repeat +6. Test original command, if still fails, repeat + +Uses a stack-based approach for tracking command errors. +""" + +import json +import os +import re +import subprocess +import time +from dataclasses import dataclass, field +from enum import Enum +from typing import Any, Callable + +from rich.console import Console +from rich.panel import Panel +from rich.table import Table +from rich.tree import Tree + +console = Console() + + +# ============================================================================= +# ERROR CATEGORIES +# ============================================================================= + +class ErrorCategory(str, Enum): + """Broad categories of errors that can occur during command execution.""" + + # File & Directory Errors (LOCAL) + FILE_NOT_FOUND = "file_not_found" + FILE_EXISTS = "file_exists" + DIRECTORY_NOT_FOUND = "directory_not_found" + PERMISSION_DENIED_LOCAL = "permission_denied_local" # Local file/dir permission + READ_ONLY_FILESYSTEM = "read_only_filesystem" + DISK_FULL = "disk_full" + + # URL/Link Permission Errors (REMOTE) + PERMISSION_DENIED_URL = "permission_denied_url" # URL/API permission + ACCESS_DENIED_REGISTRY = "access_denied_registry" # Container registry + ACCESS_DENIED_REPO = "access_denied_repo" # Git/package repo + ACCESS_DENIED_API = "access_denied_api" # API endpoint + + # Authentication & Login Errors + LOGIN_REQUIRED = "login_required" + AUTH_FAILED = "auth_failed" + TOKEN_EXPIRED = "token_expired" + INVALID_CREDENTIALS = "invalid_credentials" + + # Legacy - for backward compatibility + PERMISSION_DENIED = "permission_denied" # Will be resolved to LOCAL or URL + + # Package & Resource Errors + PACKAGE_NOT_FOUND = "package_not_found" + IMAGE_NOT_FOUND = "image_not_found" + RESOURCE_NOT_FOUND = "resource_not_found" + DEPENDENCY_MISSING = "dependency_missing" + VERSION_CONFLICT = "version_conflict" + + # Command Errors + COMMAND_NOT_FOUND = "command_not_found" + SYNTAX_ERROR = "syntax_error" + INVALID_ARGUMENT = "invalid_argument" + MISSING_ARGUMENT = "missing_argument" + DEPRECATED_SYNTAX = "deprecated_syntax" + + # Service & Process Errors + SERVICE_NOT_RUNNING = "service_not_running" + SERVICE_FAILED = "service_failed" + PORT_IN_USE = "port_in_use" + PROCESS_KILLED = "process_killed" + TIMEOUT = "timeout" + + # Network Errors + NETWORK_UNREACHABLE = "network_unreachable" + CONNECTION_REFUSED = "connection_refused" + DNS_FAILED = "dns_failed" + SSL_ERROR = "ssl_error" + + # Configuration Errors + CONFIG_SYNTAX_ERROR = "config_syntax_error" + CONFIG_INVALID_VALUE = "config_invalid_value" + CONFIG_MISSING_KEY = "config_missing_key" + + # Resource Errors + OUT_OF_MEMORY = "out_of_memory" + CPU_LIMIT = "cpu_limit" + QUOTA_EXCEEDED = "quota_exceeded" + + # Unknown + UNKNOWN = "unknown" + + +# Error pattern definitions for each category +ERROR_PATTERNS: dict[ErrorCategory, list[tuple[str, str]]] = { + # File & Directory + ErrorCategory.FILE_NOT_FOUND: [ + (r"No such file or directory", "file"), + (r"cannot open '([^']+)'.*No such file", "file"), + (r"stat\(\): cannot stat '([^']+)'", "file"), + (r"File not found:? ([^\n]+)", "file"), + ], + ErrorCategory.FILE_EXISTS: [ + (r"File exists", "file"), + (r"cannot create.*File exists", "file"), + ], + ErrorCategory.DIRECTORY_NOT_FOUND: [ + (r"No such file or directory:.*/$", "directory"), + (r"cannot access '([^']+/)': No such file or directory", "directory"), + (r"mkdir: cannot create directory '([^']+)'.*No such file", "parent_directory"), + ], + # Local file/directory permission denied + ErrorCategory.PERMISSION_DENIED_LOCAL: [ + (r"Permission denied.*(/[^\s:]+)", "path"), + (r"cannot open '([^']+)'.*Permission denied", "path"), + (r"cannot create.*'([^']+)'.*Permission denied", "path"), + (r"cannot access '([^']+)'.*Permission denied", "path"), + (r"Operation not permitted.*(/[^\s:]+)", "path"), + (r"EACCES.*(/[^\s]+)", "path"), + ], + + # URL/Link permission denied (registries, APIs, repos) + ErrorCategory.PERMISSION_DENIED_URL: [ + (r"403 Forbidden.*https?://([^\s/]+)", "host"), + (r"401 Unauthorized.*https?://([^\s/]+)", "host"), + (r"Access denied.*https?://([^\s/]+)", "host"), + ], + + ErrorCategory.ACCESS_DENIED_REGISTRY: [ + (r"denied: requested access to the resource is denied", "registry"), + (r"pull access denied", "registry"), # Higher priority pattern + (r"pull access denied for ([^\s,]+)", "image"), + (r"unauthorized: authentication required.*registry", "registry"), + (r"Error response from daemon.*denied", "registry"), + (r"UNAUTHORIZED.*registry", "registry"), + (r"unauthorized to access repository", "registry"), + ], + + ErrorCategory.ACCESS_DENIED_REPO: [ + (r"Repository not found.*https?://([^\s]+)", "repo"), + (r"fatal: could not read from remote repository", "repo"), + (r"Permission denied \(publickey\)", "repo"), + (r"Host key verification failed", "host"), + (r"remote: Permission to ([^\s]+) denied", "repo"), + ], + + ErrorCategory.ACCESS_DENIED_API: [ + (r"API.*access denied", "api"), + (r"AccessDenied.*Access denied", "api"), # AWS-style error + (r"403.*API", "api"), + (r"unauthorized.*api", "api"), + (r"An error occurred \(AccessDenied\)", "api"), # AWS CLI error + (r"not authorized to perform", "api"), + ], + + # Legacy pattern for generic permission denied + ErrorCategory.PERMISSION_DENIED: [ + (r"Permission denied", "resource"), + (r"Operation not permitted", "operation"), + (r"Access denied", "resource"), + (r"EACCES", "resource"), + ], + ErrorCategory.READ_ONLY_FILESYSTEM: [ + (r"Read-only file system", "filesystem"), + ], + ErrorCategory.DISK_FULL: [ + (r"No space left on device", "device"), + (r"Disk quota exceeded", "quota"), + ], + + # Authentication & Login + ErrorCategory.LOGIN_REQUIRED: [ + (r"Login required", "service"), + (r"Authentication required", "service"), + (r"401 Unauthorized", "service"), + (r"not logged in", "service"), + (r"must be logged in", "service"), + (r"Non-null Username Required", "service"), + ], + ErrorCategory.AUTH_FAILED: [ + (r"Authentication failed", "service"), + (r"invalid username or password", "credentials"), + (r"403 Forbidden", "access"), + (r"access denied", "resource"), + ], + ErrorCategory.TOKEN_EXPIRED: [ + (r"token.*expired", "token"), + (r"session expired", "session"), + (r"credential.*expired", "credential"), + ], + ErrorCategory.INVALID_CREDENTIALS: [ + (r"invalid.*credentials?", "type"), + (r"bad credentials", "type"), + (r"incorrect password", "auth"), + ], + + # Package & Resource + ErrorCategory.PACKAGE_NOT_FOUND: [ + (r"Unable to locate package ([^\s]+)", "package"), + (r"Package ([^\s]+) is not available", "package"), + (r"No package ([^\s]+) available", "package"), + (r"E: Package '([^']+)' has no installation candidate", "package"), + (r"error: package '([^']+)' not found", "package"), + (r"ModuleNotFoundError: No module named '([^']+)'", "module"), + ], + ErrorCategory.IMAGE_NOT_FOUND: [ + (r"manifest.*not found", "image"), + (r"image.*not found", "image"), + (r"repository does not exist", "repository"), + (r"Error response from daemon: manifest for ([^\s]+) not found", "image"), + # Note: "pull access denied" moved to ACCESS_DENIED_REGISTRY + ], + ErrorCategory.RESOURCE_NOT_FOUND: [ + (r"resource.*not found", "resource"), + (r"404 Not Found", "url"), + (r"could not find ([^\n]+)", "resource"), + (r"No matching distribution found for ([^\s]+)", "package"), + (r"Could not find a version that satisfies the requirement ([^\s]+)", "package"), + ], + ErrorCategory.DEPENDENCY_MISSING: [ + (r"Depends:.*but it is not going to be installed", "dependency"), + (r"unmet dependencies", "packages"), + (r"dependency.*not satisfied", "dependency"), + (r"peer dep missing", "dependency"), + ], + ErrorCategory.VERSION_CONFLICT: [ + (r"version conflict", "packages"), + (r"incompatible version", "version"), + (r"requires.*but ([^\s]+) is installed", "conflict"), + ], + + # Command Errors + ErrorCategory.COMMAND_NOT_FOUND: [ + (r"command not found", "command"), + (r"not found", "binary"), + (r"is not recognized as", "command"), + (r"Unknown command", "subcommand"), + ], + ErrorCategory.SYNTAX_ERROR: [ + (r"syntax error", "location"), + (r"parse error", "location"), + (r"unexpected token", "token"), + (r"near unexpected", "token"), + ], + ErrorCategory.INVALID_ARGUMENT: [ + (r"invalid.*argument", "argument"), + (r"unrecognized option", "option"), + (r"unknown option", "option"), + (r"illegal option", "option"), + (r"bad argument", "argument"), + ], + ErrorCategory.MISSING_ARGUMENT: [ + (r"missing.*argument", "argument"), + (r"requires.*argument", "argument"), + (r"missing operand", "operand"), + (r"option.*requires an argument", "option"), + ], + ErrorCategory.DEPRECATED_SYNTAX: [ + (r"deprecated", "feature"), + (r"obsolete", "feature"), + (r"use.*instead", "replacement"), + ], + + # Service & Process + ErrorCategory.SERVICE_NOT_RUNNING: [ + (r"is not running", "service"), + (r"service.*stopped", "service"), + (r"inactive \(dead\)", "service"), + (r"Unit.*not found", "unit"), + (r"Failed to connect to", "service"), + (r"could not be found", "service"), + (r"Unit ([^\s]+)\.service could not be found", "service"), + ], + ErrorCategory.SERVICE_FAILED: [ + (r"failed to start", "service"), + (r"service.*failed", "service"), + (r"Job.*failed", "job"), + (r"Main process exited", "process"), + ], + ErrorCategory.PORT_IN_USE: [ + (r"Address already in use", "port"), + (r"port.*already.*use", "port"), + (r"bind\(\): Address already in use", "port"), + (r"EADDRINUSE", "port"), + ], + ErrorCategory.PROCESS_KILLED: [ + (r"Killed", "signal"), + (r"SIGKILL", "signal"), + (r"Out of memory", "oom"), + ], + ErrorCategory.TIMEOUT: [ + (r"timed out", "operation"), + (r"timeout", "operation"), + (r"deadline exceeded", "operation"), + ], + + # Network + ErrorCategory.NETWORK_UNREACHABLE: [ + (r"Network is unreachable", "network"), + (r"No route to host", "host"), + (r"Could not resolve host", "host"), + ], + ErrorCategory.CONNECTION_REFUSED: [ + (r"Connection refused", "target"), + (r"ECONNREFUSED", "target"), + (r"couldn't connect to host", "host"), + ], + ErrorCategory.DNS_FAILED: [ + (r"Name or service not known", "hostname"), + (r"Temporary failure in name resolution", "dns"), + (r"DNS lookup failed", "hostname"), + ], + ErrorCategory.SSL_ERROR: [ + (r"SSL.*error", "ssl"), + (r"certificate.*error", "certificate"), + (r"CERT_", "certificate"), + ], + + # Configuration + ErrorCategory.CONFIG_SYNTAX_ERROR: [ + (r"configuration.*syntax.*error", "config"), + (r"invalid configuration", "config"), + (r"parse error in", "config"), + (r"nginx:.*emerg.*", "nginx_config"), + (r"Failed to parse", "config"), + ], + ErrorCategory.CONFIG_INVALID_VALUE: [ + (r"invalid value", "config"), + (r"unknown directive", "directive"), + (r"invalid parameter", "parameter"), + ], + ErrorCategory.CONFIG_MISSING_KEY: [ + (r"missing.*key", "key"), + (r"required.*not set", "key"), + (r"undefined variable", "variable"), + ], + + # Resource + ErrorCategory.OUT_OF_MEMORY: [ + (r"Out of memory", "memory"), + (r"Cannot allocate memory", "memory"), + (r"MemoryError", "memory"), + (r"OOMKilled", "oom"), + ], + ErrorCategory.QUOTA_EXCEEDED: [ + (r"quota exceeded", "quota"), + (r"limit reached", "limit"), + (r"rate limit", "rate"), + ], +} + + +# ============================================================================= +# DATA STRUCTURES +# ============================================================================= + +@dataclass +class DiagnosisResult: + """Result of error diagnosis (Step 1).""" + category: ErrorCategory + error_message: str + extracted_info: dict[str, str] = field(default_factory=dict) + confidence: float = 1.0 + raw_stderr: str = "" + + +@dataclass +class FixCommand: + """A single fix command with variable placeholders.""" + command_template: str # Command with {variable} placeholders + purpose: str + variables: list[str] = field(default_factory=list) # Variable names found + requires_sudo: bool = False + + def __post_init__(self): + # Extract variables from template + self.variables = re.findall(r'\{(\w+)\}', self.command_template) + + +@dataclass +class FixPlan: + """Plan for fixing an error (Step 2 output).""" + category: ErrorCategory + commands: list[FixCommand] + reasoning: str + all_variables: set[str] = field(default_factory=set) + + def __post_init__(self): + # Collect all unique variables + for cmd in self.commands: + self.all_variables.update(cmd.variables) + + +@dataclass +class VariableResolution: + """Resolution for a variable (Step 3).""" + name: str + value: str + source: str # "query", "llm", "system_info", "default" + + +@dataclass +class ExecutionResult: + """Result of executing a fix command (Step 4).""" + command: str + success: bool + stdout: str + stderr: str + execution_time: float + + +@dataclass +class ErrorStackEntry: + """Entry in the error stack for tracking.""" + original_command: str + intent: str + error: str + category: ErrorCategory + fix_plan: FixPlan | None = None + fix_attempts: int = 0 + timestamp: float = field(default_factory=time.time) + + +# ============================================================================= +# DIAGNOSIS ENGINE +# ============================================================================= + +class DiagnosisEngine: + """ + Main diagnosis engine implementing the structured error resolution flow. + + Flow: + 1. Categorize error type + 2. LLM generates fix commands with variables + 3. Resolve variables + 4. Execute fix commands + 5. If error, push to stack and repeat + 6. Test original command + """ + + MAX_FIX_ATTEMPTS = 5 + MAX_STACK_DEPTH = 10 + + # Known URL/remote service patterns in commands + URL_COMMAND_PATTERNS = [ + r"docker\s+(pull|push|login)", + r"git\s+(clone|push|pull|fetch|remote)", + r"npm\s+(publish|login|install.*@)", + r"pip\s+install.*--index-url", + r"curl\s+", + r"wget\s+", + r"aws\s+", + r"gcloud\s+", + r"kubectl\s+", + r"helm\s+", + r"az\s+", # Azure CLI + r"gh\s+", # GitHub CLI + ] + + # Known registries and their authentication services + KNOWN_SERVICES = { + "ghcr.io": "ghcr", + "docker.io": "docker", + "registry.hub.docker.com": "docker", + "github.com": "git_https", + "gitlab.com": "git_https", + "bitbucket.org": "git_https", + "registry.npmjs.org": "npm", + "pypi.org": "pypi", + "amazonaws.com": "aws", + "gcr.io": "gcloud", + } + + def __init__( + self, + api_key: str | None = None, + provider: str = "claude", + model: str | None = None, + debug: bool = False, + ): + self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + self.provider = provider.lower() + self.model = model or self._default_model() + self.debug = debug + + # Error stack for tracking command errors + self.error_stack: list[ErrorStackEntry] = [] + + # Resolution cache to avoid re-resolving same variables + self.variable_cache: dict[str, str] = {} + + # Execution history for logging + self.execution_history: list[dict[str, Any]] = [] + + # Initialize LoginHandler for credential management + self._login_handler = None + try: + from cortex.do_runner.diagnosis import LoginHandler + self._login_handler = LoginHandler() + except ImportError: + pass + + self._initialize_client() + + def _default_model(self) -> str: + if self.provider == "openai": + return "gpt-4o" + elif self.provider == "claude": + return "claude-sonnet-4-20250514" + return "gpt-4o" + + def _initialize_client(self): + """Initialize the LLM client.""" + if not self.api_key: + console.print("[yellow]⚠ No API key found - LLM features disabled[/yellow]") + self.client = None + return + + if self.provider == "openai": + try: + from openai import OpenAI + self.client = OpenAI(api_key=self.api_key) + except ImportError: + self.client = None + elif self.provider == "claude": + try: + from anthropic import Anthropic + self.client = Anthropic(api_key=self.api_key) + except ImportError: + self.client = None + else: + self.client = None + + # ========================================================================= + # PERMISSION TYPE DETECTION + # ========================================================================= + + def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[bool, str | None, str | None]: + """ + Determine if permission denied is for a local file/dir or a URL/link. + + Returns: + Tuple of (is_url_based, service_name, url_or_host) + """ + # Check if command involves known remote operations + is_remote_command = any( + re.search(pattern, command, re.IGNORECASE) + for pattern in self.URL_COMMAND_PATTERNS + ) + + # Check stderr for URL patterns + url_patterns = [ + r"https?://([^\s/]+)", + r"([a-zA-Z0-9.-]+\.(io|com|org|net))", + r"registry[.\s]", + r"(ghcr\.io|docker\.io|gcr\.io|quay\.io)", + ] + + found_host = None + for pattern in url_patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + found_host = match.group(1) if match.groups() else match.group(0) + break + + # Also check command for URLs/hosts + if not found_host: + for pattern in url_patterns: + match = re.search(pattern, command, re.IGNORECASE) + if match: + found_host = match.group(1) if match.groups() else match.group(0) + break + + # Determine service + service = None + if found_host: + for host_pattern, svc in self.KNOWN_SERVICES.items(): + if host_pattern in found_host.lower(): + service = svc + break + + # Detect service from command if not found from host + if not service: + if "git " in command.lower(): + service = "git_https" + if not found_host: + found_host = "git remote" + elif "aws " in command.lower(): + service = "aws" + if not found_host: + found_host = "aws" + elif "docker " in command.lower(): + service = "docker" + elif "npm " in command.lower(): + service = "npm" + + # Git-specific patterns + git_remote_patterns = [ + "remote:" in stderr.lower(), + "permission to" in stderr.lower() and ".git" in stderr.lower(), + "denied to" in stderr.lower(), + "could not read from remote repository" in stderr.lower(), + "fatal: authentication failed" in stderr.lower(), + ] + + # AWS-specific patterns + aws_patterns = [ + "accessdenied" in stderr.lower().replace(" ", ""), + "an error occurred" in stderr.lower() and "denied" in stderr.lower(), + "not authorized" in stderr.lower(), + ] + + # If it's a remote command with a host or URL-based error patterns + is_url_based = bool(is_remote_command and found_host) or any([ + "401" in stderr, + "403" in stderr, + "unauthorized" in stderr.lower(), + "authentication required" in stderr.lower(), + "login required" in stderr.lower(), + "access denied" in stderr.lower() and found_host, + "pull access denied" in stderr.lower(), + "denied: requested access" in stderr.lower(), + ]) or any(git_remote_patterns) or any(aws_patterns) + + if is_url_based: + console.print(f"[cyan] 🌐 Detected URL-based permission error[/cyan]") + console.print(f"[dim] Host: {found_host or 'unknown'}[/dim]") + console.print(f"[dim] Service: {service or 'unknown'}[/dim]") + + return is_url_based, service, found_host + + def _is_local_file_permission_error(self, command: str, stderr: str) -> tuple[bool, str | None]: + """ + Check if permission error is for a local file/directory. + + Returns: + Tuple of (is_local_file, file_path) + """ + # Check for local path patterns in stderr + local_patterns = [ + r"Permission denied.*(/[^\s:]+)", + r"cannot open '([^']+)'.*Permission denied", + r"cannot create.*'([^']+)'.*Permission denied", + r"cannot access '([^']+)'.*Permission denied", + r"cannot read '([^']+)'", + r"failed to open '([^']+)'", + r"open\(\) \"([^\"]+)\" failed", + ] + + for pattern in local_patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + path = match.group(1) + # Verify it's a local path (starts with / or ./) + if path.startswith("/") or path.startswith("./"): + console.print(f"[cyan] 📁 Detected local file permission error[/cyan]") + console.print(f"[dim] Path: {path}[/dim]") + return True, path + + # Check command for local paths being accessed + path_match = re.search(r"(/[^\s]+)", command) + if path_match and "permission denied" in stderr.lower(): + path = path_match.group(1) + console.print(f"[cyan] 📁 Detected local file permission error (from command)[/cyan]") + console.print(f"[dim] Path: {path}[/dim]") + return True, path + + return False, None + + def _resolve_permission_error_type( + self, + command: str, + stderr: str, + current_category: ErrorCategory, + ) -> tuple[ErrorCategory, dict[str, str]]: + """ + Resolve generic PERMISSION_DENIED to specific LOCAL or URL category. + + Returns: + Tuple of (refined_category, additional_info) + """ + additional_info = {} + + # Only process if it's a generic permission error + permission_categories = [ + ErrorCategory.PERMISSION_DENIED, + ErrorCategory.PERMISSION_DENIED_LOCAL, + ErrorCategory.PERMISSION_DENIED_URL, + ErrorCategory.ACCESS_DENIED_REGISTRY, + ErrorCategory.ACCESS_DENIED_REPO, + ErrorCategory.ACCESS_DENIED_API, + ErrorCategory.AUTH_FAILED, + ] + + if current_category not in permission_categories: + return current_category, additional_info + + # Check URL-based first (more specific) + is_url, service, host = self._is_url_based_permission_error(command, stderr) + if is_url: + additional_info["service"] = service or "unknown" + additional_info["host"] = host or "unknown" + + # Determine more specific category + if "registry" in stderr.lower() or service in ["docker", "ghcr", "gcloud"]: + return ErrorCategory.ACCESS_DENIED_REGISTRY, additional_info + elif "git" in command.lower() or service in ["git_https"]: + return ErrorCategory.ACCESS_DENIED_REPO, additional_info + elif "api" in stderr.lower() or service in ["aws", "gcloud", "azure"]: + # AWS, GCloud, Azure are API-based services + return ErrorCategory.ACCESS_DENIED_API, additional_info + elif "aws " in command.lower() or "az " in command.lower() or "gcloud " in command.lower(): + # Cloud CLI commands are API-based + return ErrorCategory.ACCESS_DENIED_API, additional_info + else: + return ErrorCategory.PERMISSION_DENIED_URL, additional_info + + # Check local file + is_local, path = self._is_local_file_permission_error(command, stderr) + if is_local: + additional_info["path"] = path or "" + return ErrorCategory.PERMISSION_DENIED_LOCAL, additional_info + + # Default to local for generic permission denied + return ErrorCategory.PERMISSION_DENIED_LOCAL, additional_info + + # ========================================================================= + # STEP 1: Categorize Error + # ========================================================================= + + def categorize_error(self, command: str, stderr: str, stdout: str = "") -> DiagnosisResult: + """ + Step 1: Categorize the error type. + + Examines stderr (and stdout) to determine the broad category of error. + For permission errors, distinguishes between local file/dir and URL/link. + """ + self._log_step(1, "Categorizing error type") + + combined_output = f"{stderr}\n{stdout}".lower() + + best_match: tuple[ErrorCategory, dict[str, str], float] | None = None + + for category, patterns in ERROR_PATTERNS.items(): + for pattern, info_key in patterns: + match = re.search(pattern, stderr, re.IGNORECASE) + if match: + extracted_info = {info_key: match.group(1) if match.groups() else ""} + + # Calculate confidence based on pattern specificity + confidence = len(pattern) / 50.0 # Longer patterns = more specific + confidence = min(confidence, 1.0) + + if best_match is None or confidence > best_match[2]: + best_match = (category, extracted_info, confidence) + + if best_match: + category, extracted_info, confidence = best_match + + # Refine permission errors to LOCAL or URL + refined_category, additional_info = self._resolve_permission_error_type( + command, stderr, category + ) + extracted_info.update(additional_info) + + result = DiagnosisResult( + category=refined_category, + error_message=stderr[:500], + extracted_info=extracted_info, + confidence=confidence, + raw_stderr=stderr, + ) + else: + result = DiagnosisResult( + category=ErrorCategory.UNKNOWN, + error_message=stderr[:500], + confidence=0.0, + raw_stderr=stderr, + ) + + self._print_diagnosis(result, command) + return result + + # ========================================================================= + # STEP 2: Generate Fix Plan via LLM + # ========================================================================= + + def generate_fix_plan( + self, + command: str, + intent: str, + diagnosis: DiagnosisResult + ) -> FixPlan: + """ + Step 2: LLM generates fix commands with variable placeholders. + + Context given: command, intent, error, category + Output: List of commands with {variable} placeholders + """ + self._log_step(2, "Generating fix plan via LLM") + + if not self.client: + # Fallback to rule-based fix generation + return self._generate_fallback_fix_plan(command, intent, diagnosis) + + system_prompt = self._get_fix_generation_prompt() + + user_prompt = f"""Generate fix commands for this error: + +**Command:** `{command}` +**Intent:** {intent} +**Error Category:** {diagnosis.category.value} +**Error Message:** {diagnosis.error_message} +**Extracted Info:** {json.dumps(diagnosis.extracted_info)} + +Provide fix commands with variable placeholders in {{curly_braces}} for any values that need to be determined at runtime. + +Respond with JSON: +{{ + "reasoning": "explanation of the fix approach", + "commands": [ + {{ + "command": "command with {{variable}} placeholders", + "purpose": "what this command does", + "requires_sudo": true/false + }} + ] +}}""" + + try: + response = self._call_llm(system_prompt, user_prompt) + + # Parse response + json_match = re.search(r'\{[\s\S]*\}', response) + if json_match: + data = json.loads(json_match.group()) + + commands = [] + for cmd_data in data.get("commands", []): + commands.append(FixCommand( + command_template=cmd_data.get("command", ""), + purpose=cmd_data.get("purpose", ""), + requires_sudo=cmd_data.get("requires_sudo", False), + )) + + plan = FixPlan( + category=diagnosis.category, + commands=commands, + reasoning=data.get("reasoning", ""), + ) + + self._print_fix_plan(plan) + return plan + + except Exception as e: + console.print(f"[yellow]⚠ LLM fix generation failed: {e}[/yellow]") + + # Fallback + return self._generate_fallback_fix_plan(command, intent, diagnosis) + + def _get_fix_generation_prompt(self) -> str: + return """You are a Linux system error diagnosis expert. Generate shell commands to fix errors. + +RULES: +1. Use {variable} placeholders for values that need to be determined at runtime +2. Common variables: {file_path}, {package_name}, {service_name}, {user}, {port}, {config_file} +3. Commands should be atomic and specific +4. Include sudo only when necessary +5. Order commands logically (prerequisites first) + +VARIABLE NAMING: +- {file_path} - path to a file +- {dir_path} - path to a directory +- {package} - package name to install +- {service} - systemd service name +- {user} - username +- {port} - port number +- {config_file} - configuration file path +- {config_line} - line number in config +- {image} - Docker/container image name +- {registry} - Container registry URL +- {username} - Login username +- {token} - Auth token or password + +EXAMPLE OUTPUT: +{ + "reasoning": "Permission denied on /etc/nginx - need sudo to write, also backup first", + "commands": [ + { + "command": "sudo cp {config_file} {config_file}.backup", + "purpose": "Backup the configuration file before modifying", + "requires_sudo": true + }, + { + "command": "sudo sed -i 's/{old_value}/{new_value}/' {config_file}", + "purpose": "Fix the configuration value", + "requires_sudo": true + } + ] +}""" + + def _generate_fallback_fix_plan( + self, + command: str, + intent: str, + diagnosis: DiagnosisResult + ) -> FixPlan: + """Generate a fix plan using rules when LLM is unavailable.""" + commands: list[FixCommand] = [] + reasoning = f"Rule-based fix for {diagnosis.category.value}" + + category = diagnosis.category + info = diagnosis.extracted_info + + # LOCAL permission denied - use sudo + if category == ErrorCategory.PERMISSION_DENIED_LOCAL: + path = info.get("path", "") + reasoning = f"Local file/directory permission denied - using elevated privileges" + commands.append(FixCommand( + command_template=f"sudo {command}", + purpose=f"Retry with elevated privileges for local path{' ' + path if path else ''}", + requires_sudo=True, + )) + + # URL-based permission - handle login + elif category in [ + ErrorCategory.PERMISSION_DENIED_URL, + ErrorCategory.ACCESS_DENIED_REGISTRY, + ErrorCategory.ACCESS_DENIED_REPO, + ErrorCategory.ACCESS_DENIED_API, + ]: + service = info.get("service", "unknown") + host = info.get("host", "unknown") + reasoning = f"URL/remote access denied - requires authentication to {service or host}" + + # Generate login command based on service + if service == "docker" or service == "ghcr" or "registry" in category.value: + registry = host if host != "unknown" else "{registry}" + commands.extend([ + FixCommand( + command_template=f"docker login {registry}", + purpose=f"Login to container registry {registry}", + ), + FixCommand( + command_template=command, + purpose="Retry original command after login", + ), + ]) + elif service == "git_https" or "repo" in category.value: + commands.extend([ + FixCommand( + command_template="git config --global credential.helper store", + purpose="Enable credential storage for git", + ), + FixCommand( + command_template=command, + purpose="Retry original command (will prompt for credentials)", + ), + ]) + elif service == "npm": + commands.extend([ + FixCommand( + command_template="npm login", + purpose="Login to npm registry", + ), + FixCommand( + command_template=command, + purpose="Retry original command after login", + ), + ]) + elif service == "aws": + commands.extend([ + FixCommand( + command_template="aws configure", + purpose="Configure AWS credentials", + ), + FixCommand( + command_template=command, + purpose="Retry original command after configuration", + ), + ]) + else: + # Generic login placeholder + commands.append(FixCommand( + command_template="{login_command}", + purpose=f"Login to {service or host}", + )) + commands.append(FixCommand( + command_template=command, + purpose="Retry original command after login", + )) + + # Legacy generic permission denied - try to determine type + elif category == ErrorCategory.PERMISSION_DENIED: + commands.append(FixCommand( + command_template=f"sudo {command}", + purpose="Retry with elevated privileges", + requires_sudo=True, + )) + + elif category == ErrorCategory.FILE_NOT_FOUND: + file_path = info.get("file", "{file_path}") + commands.append(FixCommand( + command_template=f"touch {file_path}", + purpose=f"Create missing file", + )) + + elif category == ErrorCategory.DIRECTORY_NOT_FOUND: + dir_path = info.get("directory", info.get("parent_directory", "{dir_path}")) + commands.append(FixCommand( + command_template=f"mkdir -p {dir_path}", + purpose="Create missing directory", + )) + + elif category == ErrorCategory.COMMAND_NOT_FOUND: + # Try to guess package from command + cmd_name = command.split()[0] if command else "{package}" + commands.append(FixCommand( + command_template=f"sudo apt install -y {{package}}", + purpose=f"Install package providing the command", + requires_sudo=True, + )) + + elif category == ErrorCategory.SERVICE_NOT_RUNNING: + service = info.get("service", "{service}") + commands.append(FixCommand( + command_template=f"sudo systemctl start {service}", + purpose="Start the service", + requires_sudo=True, + )) + + elif category == ErrorCategory.LOGIN_REQUIRED: + service = info.get("service", "{service}") + commands.append(FixCommand( + command_template="{login_command}", + purpose=f"Login to {service}", + )) + + elif category == ErrorCategory.PACKAGE_NOT_FOUND: + package = info.get("package", "{package}") + commands.extend([ + FixCommand( + command_template="sudo apt update", + purpose="Update package lists", + requires_sudo=True, + ), + FixCommand( + command_template=f"sudo apt install -y {package}", + purpose=f"Install the package", + requires_sudo=True, + ), + ]) + + elif category == ErrorCategory.PORT_IN_USE: + port = info.get("port", "{port}") + commands.extend([ + FixCommand( + command_template=f"sudo lsof -i :{port}", + purpose="Find process using the port", + requires_sudo=True, + ), + FixCommand( + command_template="sudo kill -9 {pid}", + purpose="Kill the process using the port", + requires_sudo=True, + ), + ]) + + elif category == ErrorCategory.CONFIG_SYNTAX_ERROR: + config_file = info.get("config", info.get("nginx_config", "{config_file}")) + commands.extend([ + FixCommand( + command_template=f"cat -n {config_file}", + purpose="Show config file with line numbers", + ), + FixCommand( + command_template=f"sudo nano {config_file}", + purpose="Edit config file to fix syntax", + requires_sudo=True, + ), + ]) + + else: + # Generic retry with sudo + commands.append(FixCommand( + command_template=f"sudo {command}", + purpose="Retry with elevated privileges", + requires_sudo=True, + )) + + plan = FixPlan( + category=diagnosis.category, + commands=commands, + reasoning=reasoning, + ) + + self._print_fix_plan(plan) + return plan + + # ========================================================================= + # STEP 3: Resolve Variables + # ========================================================================= + + def resolve_variables( + self, + fix_plan: FixPlan, + original_query: str, + command: str, + diagnosis: DiagnosisResult, + ) -> dict[str, str]: + """ + Step 3: Resolve variable values using: + 1. Extract from original query + 2. LLM call with context + 3. system_info_command_generator + """ + self._log_step(3, "Resolving variables") + + if not fix_plan.all_variables: + console.print("[dim] No variables to resolve[/dim]") + return {} + + console.print(f"[cyan] Variables to resolve: {', '.join(fix_plan.all_variables)}[/cyan]") + + resolved: dict[str, str] = {} + + for var_name in fix_plan.all_variables: + # Check cache first + if var_name in self.variable_cache: + resolved[var_name] = self.variable_cache[var_name] + console.print(f"[dim] {var_name}: {resolved[var_name]} (cached)[/dim]") + continue + + # Try extraction from diagnosis info + value = self._try_extract_from_diagnosis(var_name, diagnosis) + if value: + resolved[var_name] = value + console.print(f"[green] ✓ {var_name}: {value} (from error)[/green]") + continue + + # Try extraction from query + value = self._try_extract_from_query(var_name, original_query) + if value: + resolved[var_name] = value + console.print(f"[green] ✓ {var_name}: {value} (from query)[/green]") + continue + + # Try system_info_command_generator + value = self._try_system_info(var_name, command, diagnosis) + if value: + resolved[var_name] = value + console.print(f"[green] ✓ {var_name}: {value} (from system)[/green]") + continue + + # Fall back to LLM + value = self._try_llm_resolution(var_name, original_query, command, diagnosis) + if value: + resolved[var_name] = value + console.print(f"[green] ✓ {var_name}: {value} (from LLM)[/green]") + continue + + # Prompt user as last resort + console.print(f"[yellow] ⚠ Could not resolve {var_name}[/yellow]") + try: + from rich.prompt import Prompt + value = Prompt.ask(f" Enter value for {var_name}") + if value: + resolved[var_name] = value + console.print(f"[green] ✓ {var_name}: {value} (from user)[/green]") + except Exception: + pass + + # Update cache + self.variable_cache.update(resolved) + + return resolved + + def _try_extract_from_diagnosis(self, var_name: str, diagnosis: DiagnosisResult) -> str | None: + """Try to extract variable from diagnosis extracted_info.""" + # Map variable names to diagnosis info keys + mappings = { + "file_path": ["file", "path"], + "dir_path": ["directory", "parent_directory", "dir"], + "package": ["package", "module"], + "service": ["service", "unit"], + "port": ["port"], + "config_file": ["config", "nginx_config", "config_file"], + "user": ["user"], + "image": ["image", "repository"], + } + + keys_to_check = mappings.get(var_name, [var_name]) + for key in keys_to_check: + if key in diagnosis.extracted_info and diagnosis.extracted_info[key]: + return diagnosis.extracted_info[key] + + return None + + def _try_extract_from_query(self, var_name: str, query: str) -> str | None: + """Try to extract variable from the original query.""" + # Pattern-based extraction from query + patterns = { + "file_path": [r"file\s+['\"]?([/\w.-]+)['\"]?", r"([/\w]+\.\w+)"], + "dir_path": [r"directory\s+['\"]?([/\w.-]+)['\"]?", r"folder\s+['\"]?([/\w.-]+)['\"]?"], + "package": [r"install\s+(\w[\w-]*)", r"package\s+(\w[\w-]*)"], + "service": [r"service\s+(\w[\w-]*)", r"(\w+)\.service"], + "port": [r"port\s+(\d+)", r":(\d{2,5})"], + "image": [r"image\s+([^\s]+)", r"docker.*\s+([^\s]+:[^\s]*)"], + } + + if var_name in patterns: + for pattern in patterns[var_name]: + match = re.search(pattern, query, re.IGNORECASE) + if match: + return match.group(1) + + return None + + def _try_system_info(self, var_name: str, command: str, diagnosis: DiagnosisResult) -> str | None: + """Use system_info_command_generator to get variable value.""" + try: + from cortex.system_info_generator import SystemInfoGenerator + + # System info commands for different variable types + system_queries = { + "user": "whoami", + "home_dir": "echo $HOME", + "current_dir": "pwd", + } + + if var_name in system_queries: + result = subprocess.run( + system_queries[var_name], + shell=True, + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + + # For package commands, try to find the package + if var_name == "package": + cmd_name = command.split()[0] if command else "" + # Common command-to-package mappings for Ubuntu + package_map = { + "nginx": "nginx", + "docker": "docker.io", + "python": "python3", + "pip": "python3-pip", + "node": "nodejs", + "npm": "npm", + "git": "git", + "curl": "curl", + "wget": "wget", + "htop": "htop", + "vim": "vim", + "nano": "nano", + } + if cmd_name in package_map: + return package_map[cmd_name] + + # Try apt-file search if available + result = subprocess.run( + f"apt-file search --regexp 'bin/{cmd_name}$' 2>/dev/null | head -1 | cut -d: -f1", + shell=True, + capture_output=True, + text=True, + timeout=10, + ) + if result.returncode == 0 and result.stdout.strip(): + return result.stdout.strip() + + # For service names, try systemctl + if var_name == "service": + # Extract service name from command if present + service_match = re.search(r'systemctl\s+\w+\s+(\S+)', command) + if service_match: + return service_match.group(1) + + except Exception as e: + if self.debug: + console.print(f"[dim] System info failed for {var_name}: {e}[/dim]") + + return None + + def _try_llm_resolution( + self, + var_name: str, + query: str, + command: str, + diagnosis: DiagnosisResult, + ) -> str | None: + """Use LLM to resolve variable value.""" + if not self.client: + return None + + prompt = f"""Extract the value for variable '{var_name}' from this context: + +Query: {query} +Command: {command} +Error Category: {diagnosis.category.value} +Error: {diagnosis.error_message[:200]} + +Respond with ONLY the value, nothing else. If you cannot determine the value, respond with "UNKNOWN".""" + + try: + response = self._call_llm("You extract specific values from context.", prompt) + value = response.strip().strip('"\'') + if value and value.upper() != "UNKNOWN": + return value + except Exception: + pass + + return None + + # ========================================================================= + # URL AUTHENTICATION HANDLING + # ========================================================================= + + def handle_url_authentication( + self, + command: str, + diagnosis: DiagnosisResult, + ) -> tuple[bool, str]: + """ + Handle URL-based permission errors by prompting for login. + + Uses LoginHandler to: + 1. Detect the service/website + 2. Prompt for credentials + 3. Store credentials for future use + 4. Execute login command + + Returns: + Tuple of (success, message) + """ + console.print("\n[bold cyan]🔐 URL Authentication Required[/bold cyan]") + + if not self._login_handler: + console.print("[yellow]⚠ LoginHandler not available[/yellow]") + return False, "LoginHandler not available" + + service = diagnosis.extracted_info.get("service", "unknown") + host = diagnosis.extracted_info.get("host", "") + + console.print(f"[dim] Service: {service}[/dim]") + console.print(f"[dim] Host: {host}[/dim]") + + try: + # Use LoginHandler to manage authentication + login_req = self._login_handler.detect_login_requirement(command, diagnosis.raw_stderr) + + if login_req: + console.print(f"\n[cyan]📝 Login to {login_req.display_name}[/cyan]") + + # Handle login (will prompt, execute, and optionally save credentials) + success, message = self._login_handler.handle_login(command, diagnosis.raw_stderr) + + if success: + console.print(f"[green]✓ {message}[/green]") + return True, message + else: + console.print(f"[yellow]⚠ {message}[/yellow]") + return False, message + else: + # No matching login requirement, try generic approach + console.print("[yellow] Unknown service, trying generic login...[/yellow]") + return self._handle_generic_login(command, diagnosis) + + except Exception as e: + console.print(f"[red]✗ Authentication error: {e}[/red]") + return False, str(e) + + def _handle_generic_login( + self, + command: str, + diagnosis: DiagnosisResult, + ) -> tuple[bool, str]: + """Handle login for unknown services with interactive prompts.""" + from rich.prompt import Prompt, Confirm + + host = diagnosis.extracted_info.get("host", "unknown service") + + console.print(f"\n[cyan]Login required for: {host}[/cyan]") + + try: + # Prompt for credentials + username = Prompt.ask("Username") + if not username: + return False, "Username is required" + + password = Prompt.ask("Password", password=True) + + # Determine login command based on command context + login_cmd = None + + if "docker" in command.lower(): + registry = diagnosis.extracted_info.get("host", "") + login_cmd = f"docker login {registry}" if registry else "docker login" + elif "git" in command.lower(): + # Store git credentials + subprocess.run("git config --global credential.helper store", shell=True) + login_cmd = None # Git will prompt automatically + elif "npm" in command.lower(): + login_cmd = "npm login" + elif "pip" in command.lower() or "pypi" in host.lower(): + login_cmd = f"pip config set global.index-url https://{username}:{{password}}@pypi.org/simple/" + + if login_cmd: + console.print(f"[dim] Running: {login_cmd}[/dim]") + + # Execute login with password via stdin if needed + if "{password}" in login_cmd: + login_cmd = login_cmd.replace("{password}", password) + result = subprocess.run(login_cmd, shell=True, capture_output=True, text=True) + else: + # Interactive login + result = subprocess.run( + login_cmd, + shell=True, + input=f"{username}\n{password}\n", + capture_output=True, + text=True, + ) + + if result.returncode == 0: + # Offer to save credentials + if self._login_handler and Confirm.ask("Save credentials for future use?", default=True): + self._login_handler._save_credentials(host, { + "username": username, + "password": password, + }) + console.print("[green]✓ Credentials saved[/green]") + + return True, f"Logged in to {host}" + else: + return False, f"Login failed: {result.stderr[:200]}" + + return False, "Could not determine login command" + + except KeyboardInterrupt: + return False, "Login cancelled" + except Exception as e: + return False, str(e) + + # ========================================================================= + # STEP 4: Execute Fix Commands + # ========================================================================= + + def execute_fix_commands( + self, + fix_plan: FixPlan, + resolved_variables: dict[str, str] + ) -> list[ExecutionResult]: + """ + Step 4: Execute fix commands with resolved variables. + """ + self._log_step(4, "Executing fix commands") + + results: list[ExecutionResult] = [] + + for i, fix_cmd in enumerate(fix_plan.commands, 1): + # Substitute variables + command = fix_cmd.command_template + for var_name, value in resolved_variables.items(): + command = command.replace(f"{{{var_name}}}", value) + + # Check for unresolved variables + unresolved = re.findall(r'\{(\w+)\}', command) + if unresolved: + console.print(f"[yellow] ⚠ Skipping command with unresolved variables: {unresolved}[/yellow]") + results.append(ExecutionResult( + command=command, + success=False, + stdout="", + stderr=f"Unresolved variables: {unresolved}", + execution_time=0, + )) + continue + + console.print(f"\n[cyan] [{i}/{len(fix_plan.commands)}] {command}[/cyan]") + console.print(f"[dim] └─ {fix_cmd.purpose}[/dim]") + + # Execute + start_time = time.time() + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=120, + ) + execution_time = time.time() - start_time + + exec_result = ExecutionResult( + command=command, + success=result.returncode == 0, + stdout=result.stdout.strip(), + stderr=result.stderr.strip(), + execution_time=execution_time, + ) + + if exec_result.success: + console.print(f"[green] ✓ Success ({execution_time:.2f}s)[/green]") + if exec_result.stdout and self.debug: + console.print(f"[dim] Output: {exec_result.stdout[:200]}[/dim]") + else: + console.print(f"[red] ✗ Failed: {exec_result.stderr[:200]}[/red]") + + results.append(exec_result) + + # Log to history + self.execution_history.append({ + "command": command, + "success": exec_result.success, + "stderr": exec_result.stderr[:500], + "timestamp": time.time(), + }) + + except subprocess.TimeoutExpired: + console.print(f"[red] ✗ Timeout after 120s[/red]") + results.append(ExecutionResult( + command=command, + success=False, + stdout="", + stderr="Command timed out", + execution_time=120, + )) + except Exception as e: + console.print(f"[red] ✗ Error: {e}[/red]") + results.append(ExecutionResult( + command=command, + success=False, + stdout="", + stderr=str(e), + execution_time=time.time() - start_time, + )) + + return results + + # ========================================================================= + # STEP 5 & 6: Error Stack Management and Retry Logic + # ========================================================================= + + def push_error(self, entry: ErrorStackEntry) -> None: + """Push an error onto the stack.""" + if len(self.error_stack) >= self.MAX_STACK_DEPTH: + console.print(f"[red]⚠ Error stack depth limit ({self.MAX_STACK_DEPTH}) reached[/red]") + return + + self.error_stack.append(entry) + self._print_error_stack() + + def pop_error(self) -> ErrorStackEntry | None: + """Pop an error from the stack.""" + if self.error_stack: + return self.error_stack.pop() + return None + + def diagnose_and_fix( + self, + command: str, + stderr: str, + intent: str, + original_query: str, + stdout: str = "", + ) -> tuple[bool, str]: + """ + Main diagnosis and fix flow. + + Returns: + Tuple of (success, message) + """ + console.print(Panel( + f"[bold]Starting Diagnosis[/bold]\n" + f"Command: [cyan]{command}[/cyan]\n" + f"Intent: {intent}", + title="🔧 Cortex Diagnosis Engine", + border_style="blue", + )) + + # Push initial error to stack + initial_entry = ErrorStackEntry( + original_command=command, + intent=intent, + error=stderr, + category=ErrorCategory.UNKNOWN, # Will be set in Step 1 + ) + self.push_error(initial_entry) + + # Process error stack + while self.error_stack: + entry = self.error_stack[-1] # Peek at top + + if entry.fix_attempts >= self.MAX_FIX_ATTEMPTS: + console.print(f"[red]✗ Max fix attempts ({self.MAX_FIX_ATTEMPTS}) reached for command[/red]") + self.pop_error() + continue + + entry.fix_attempts += 1 + console.print(f"\n[bold]Fix Attempt {entry.fix_attempts}/{self.MAX_FIX_ATTEMPTS}[/bold]") + + # Step 1: Categorize error + diagnosis = self.categorize_error(entry.original_command, entry.error) + entry.category = diagnosis.category + + # SPECIAL HANDLING: URL-based permission errors need authentication + url_auth_categories = [ + ErrorCategory.PERMISSION_DENIED_URL, + ErrorCategory.ACCESS_DENIED_REGISTRY, + ErrorCategory.ACCESS_DENIED_REPO, + ErrorCategory.ACCESS_DENIED_API, + ErrorCategory.LOGIN_REQUIRED, + ] + + if diagnosis.category in url_auth_categories: + console.print(f"[cyan]🌐 URL-based access error detected - handling authentication[/cyan]") + + auth_success, auth_message = self.handle_url_authentication( + entry.original_command, + diagnosis + ) + + if auth_success: + # Re-test the original command after login + console.print(f"\n[cyan]📋 Testing original command after login...[/cyan]") + + test_result = subprocess.run( + entry.original_command, + shell=True, + capture_output=True, + text=True, + timeout=120, + ) + + if test_result.returncode == 0: + console.print(f"[green]✓ Command succeeded after authentication![/green]") + self.pop_error() + if not self.error_stack: + return True, f"Fixed via authentication: {auth_message}" + continue + else: + # Different error after login + entry.error = test_result.stderr.strip() + console.print(f"[yellow]⚠ New error after login, continuing diagnosis...[/yellow]") + continue + else: + console.print(f"[yellow]⚠ Authentication failed: {auth_message}[/yellow]") + # Continue with normal fix flow + + # Step 2: Generate fix plan + fix_plan = self.generate_fix_plan( + entry.original_command, + entry.intent, + diagnosis + ) + entry.fix_plan = fix_plan + + # Step 3: Resolve variables + resolved_vars = self.resolve_variables( + fix_plan, + original_query, + entry.original_command, + diagnosis, + ) + + # Check if all variables resolved + unresolved = fix_plan.all_variables - set(resolved_vars.keys()) + if unresolved: + console.print(f"[yellow]⚠ Could not resolve all variables: {unresolved}[/yellow]") + # Continue anyway with what we have + + # Step 4: Execute fix commands + results = self.execute_fix_commands(fix_plan, resolved_vars) + + # Check for errors in fix commands (Step 5) + fix_errors = [r for r in results if not r.success] + if fix_errors: + console.print(f"\n[yellow]⚠ {len(fix_errors)} fix command(s) failed[/yellow]") + + # Push the first error back to stack for diagnosis + first_error = fix_errors[0] + if first_error.stderr and "Unresolved variables" not in first_error.stderr: + new_entry = ErrorStackEntry( + original_command=first_error.command, + intent=f"Fix command for: {entry.intent}", + error=first_error.stderr, + category=ErrorCategory.UNKNOWN, + ) + self.push_error(new_entry) + continue + + # Step 6: Test original command + console.print(f"\n[cyan]📋 Testing original command: {entry.original_command}[/cyan]") + + test_result = subprocess.run( + entry.original_command, + shell=True, + capture_output=True, + text=True, + timeout=120, + ) + + if test_result.returncode == 0: + console.print(f"[green]✓ Original command now succeeds![/green]") + self.pop_error() + + # Check if stack is empty + if not self.error_stack: + return True, "All errors resolved successfully" + else: + new_error = test_result.stderr.strip() + console.print(f"[yellow]⚠ Original command still fails[/yellow]") + + if new_error != entry.error: + console.print(f"[cyan] New error detected, updating...[/cyan]") + entry.error = new_error + # Loop will continue with same entry + + # Stack empty but we didn't explicitly succeed + return False, "Could not resolve all errors" + + # ========================================================================= + # HELPERS + # ========================================================================= + + def _call_llm(self, system_prompt: str, user_prompt: str) -> str: + """Call the LLM and return response text.""" + if self.provider == "claude": + response = self.client.messages.create( + model=self.model, + max_tokens=2048, + system=system_prompt, + messages=[{"role": "user", "content": user_prompt}], + ) + return response.content[0].text + elif self.provider == "openai": + response = self.client.chat.completions.create( + model=self.model, + max_tokens=2048, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + ) + return response.choices[0].message.content + else: + raise ValueError(f"Unsupported provider: {self.provider}") + + def _log_step(self, step_num: int, description: str) -> None: + """Log a diagnosis step.""" + console.print(f"\n[bold blue]Step {step_num}:[/bold blue] {description}") + + def _print_diagnosis(self, diagnosis: DiagnosisResult, command: str) -> None: + """Print diagnosis result.""" + table = Table(title="Error Diagnosis", show_header=False, border_style="dim") + table.add_column("Field", style="bold") + table.add_column("Value") + + table.add_row("Category", f"[cyan]{diagnosis.category.value}[/cyan]") + table.add_row("Confidence", f"{diagnosis.confidence:.0%}") + + if diagnosis.extracted_info: + info_str = ", ".join(f"{k}={v}" for k, v in diagnosis.extracted_info.items() if v) + table.add_row("Extracted", info_str) + + table.add_row("Error", diagnosis.error_message[:100] + "..." if len(diagnosis.error_message) > 100 else diagnosis.error_message) + + console.print(table) + + def _print_fix_plan(self, plan: FixPlan) -> None: + """Print fix plan.""" + console.print(f"\n[bold]Fix Plan:[/bold] {plan.reasoning}") + + for i, cmd in enumerate(plan.commands, 1): + sudo_tag = "[sudo]" if cmd.requires_sudo else "" + vars_tag = f"[vars: {', '.join(cmd.variables)}]" if cmd.variables else "" + console.print(f" {i}. [cyan]{cmd.command_template}[/cyan] {sudo_tag} {vars_tag}") + console.print(f" [dim]{cmd.purpose}[/dim]") + + def _print_error_stack(self) -> None: + """Print current error stack.""" + if not self.error_stack: + console.print("[dim] Error stack: empty[/dim]") + return + + tree = Tree("[bold]Error Stack[/bold]") + for i, entry in enumerate(reversed(self.error_stack)): + branch = tree.add(f"[{'yellow' if i == 0 else 'dim'}]{entry.original_command[:50]}[/]") + branch.add(f"[dim]Category: {entry.category.value}[/dim]") + branch.add(f"[dim]Attempts: {entry.fix_attempts}[/dim]") + + console.print(tree) + + def get_execution_summary(self) -> dict[str, Any]: + """Get summary of all executions.""" + return { + "total_commands": len(self.execution_history), + "successful": sum(1 for h in self.execution_history if h.get("success")), + "failed": sum(1 for h in self.execution_history if not h.get("success")), + "history": self.execution_history[-20:], # Last 20 + "variables_cached": len(self.variable_cache), + } + + +# ============================================================================= +# FACTORY FUNCTION +# ============================================================================= + +def get_diagnosis_engine( + provider: str = "claude", + debug: bool = False, +) -> DiagnosisEngine: + """Factory function to create a DiagnosisEngine.""" + api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + return DiagnosisEngine(api_key=api_key, provider=provider, debug=debug) + + +# ============================================================================= +# CLI TEST +# ============================================================================= + +if __name__ == "__main__": + import sys + + console.print("[bold]Diagnosis Engine Test[/bold]\n") + + engine = get_diagnosis_engine(debug=True) + + # Test error categorization + test_cases = [ + ("cat /nonexistent/file", "cat: /nonexistent/file: No such file or directory"), + ("docker pull ghcr.io/test/image", "Error: Non-null Username Required"), + ("apt install fakepackage", "E: Unable to locate package fakepackage"), + ("nginx -t", "nginx: [emerg] unknown directive \"invalid\" in /etc/nginx/nginx.conf:10"), + ("systemctl start myservice", "Failed to start myservice.service: Unit myservice.service not found."), + ] + + for cmd, error in test_cases: + console.print(f"\n[bold]Test:[/bold] {cmd}") + console.print(f"[dim]Error: {error}[/dim]") + + diagnosis = engine.categorize_error(cmd, error) + console.print(f"[green]Category: {diagnosis.category.value}[/green]") + console.print("") + diff --git a/cortex/do_runner/executor.py b/cortex/do_runner/executor.py new file mode 100644 index 000000000..dce6b0c7f --- /dev/null +++ b/cortex/do_runner/executor.py @@ -0,0 +1,468 @@ +"""Task Tree Executor for advanced command execution with auto-repair.""" + +import os +import subprocess +import time +from typing import Any, Callable + +from rich.console import Console +from rich.prompt import Confirm + +from .models import ( + CommandLog, + CommandStatus, + DoRun, + TaskNode, + TaskTree, + TaskType, +) +from .terminal import TerminalMonitor + +console = Console() + + +class TaskTreeExecutor: + """ + Executes a task tree with auto-repair capabilities. + + This handles: + - Executing commands in order + - Spawning repair sub-tasks when commands fail + - Asking for additional permissions when needed + - Monitoring terminals during manual intervention + - Providing detailed reasoning for failures + """ + + def __init__( + self, + user_manager: type, + paths_manager: Any, + llm_callback: Callable[[str], dict] | None = None, + ): + self.user_manager = user_manager + self.paths_manager = paths_manager + self.llm_callback = llm_callback + self.tree = TaskTree() + self._granted_privileges: list[str] = [] + self._permission_sets_requested: int = 0 + self._terminal_monitor: TerminalMonitor | None = None + + self._in_manual_mode = False + self._manual_commands_executed: list[dict] = [] + + def build_tree_from_commands( + self, + commands: list[dict[str, str]], + ) -> TaskTree: + """Build a task tree from a list of commands.""" + for cmd in commands: + self.tree.add_root_task( + command=cmd.get("command", ""), + purpose=cmd.get("purpose", ""), + ) + return self.tree + + def execute_tree( + self, + confirm_callback: Callable[[list[TaskNode]], bool] | None = None, + notify_callback: Callable[[str, str], None] | None = None, + ) -> tuple[bool, str]: + """ + Execute the task tree with auto-repair. + + Returns: + Tuple of (success, summary) + """ + total_success = 0 + total_failed = 0 + total_repaired = 0 + repair_details = [] + + for root_task in self.tree.root_tasks: + success, repaired = self._execute_task_with_repair( + root_task, + confirm_callback, + notify_callback, + ) + + if success: + total_success += 1 + if repaired: + total_repaired += 1 + else: + total_failed += 1 + if root_task.failure_reason: + repair_details.append(f"- {root_task.command[:40]}...: {root_task.failure_reason}") + + summary_parts = [ + f"Completed: {total_success}", + f"Failed: {total_failed}", + ] + if total_repaired > 0: + summary_parts.append(f"Auto-repaired: {total_repaired}") + + summary = f"Tasks: {' | '.join(summary_parts)}" + + if repair_details: + summary += "\n\nFailure reasons:\n" + "\n".join(repair_details) + + return total_failed == 0, summary + + def _execute_task_with_repair( + self, + task: TaskNode, + confirm_callback: Callable[[list[TaskNode]], bool] | None = None, + notify_callback: Callable[[str, str], None] | None = None, + ) -> tuple[bool, bool]: + """Execute a task and attempt repair if it fails.""" + was_repaired = False + + task.status = CommandStatus.RUNNING + success, output, error, duration = self._execute_command(task.command) + + task.output = output + task.error = error + task.duration_seconds = duration + + if success: + task.status = CommandStatus.SUCCESS + console.print(f"[green]✓[/green] {task.purpose}") + return True, False + + task.status = CommandStatus.NEEDS_REPAIR + diagnosis = self._diagnose_error(task.command, error, output) + task.failure_reason = diagnosis.get("description", "Unknown error") + + console.print(f"[yellow]⚠[/yellow] {task.purpose} - {diagnosis['error_type']}") + console.print(f"[dim] └─ {diagnosis['description']}[/dim]") + + if diagnosis.get("can_auto_fix") and task.repair_attempts < task.max_repair_attempts: + task.repair_attempts += 1 + fix_commands = diagnosis.get("fix_commands", []) + + if fix_commands: + console.print(f"[cyan]🔧 Attempting auto-repair ({task.repair_attempts}/{task.max_repair_attempts})...[/cyan]") + + new_paths = self._identify_paths_needing_privileges(fix_commands) + if new_paths and confirm_callback: + repair_tasks = [] + for cmd in fix_commands: + repair_task = self.tree.add_repair_task( + parent=task, + command=cmd, + purpose=f"Repair: {diagnosis['description'][:50]}", + reasoning=diagnosis.get("reasoning", ""), + ) + repair_tasks.append(repair_task) + + self._permission_sets_requested += 1 + console.print(f"\n[yellow]🔐 Permission request #{self._permission_sets_requested} for repair commands:[/yellow]") + + if confirm_callback(repair_tasks): + all_repairs_success = True + for repair_task in repair_tasks: + repair_success, _ = self._execute_task_with_repair( + repair_task, confirm_callback, notify_callback + ) + if not repair_success: + all_repairs_success = False + + if all_repairs_success: + console.print(f"[cyan]↻ Retrying original command...[/cyan]") + success, output, error, duration = self._execute_command(task.command) + task.output = output + task.error = error + task.duration_seconds += duration + + if success: + task.status = CommandStatus.SUCCESS + task.reasoning = f"Auto-repaired after {task.repair_attempts} attempt(s)" + console.print(f"[green]✓[/green] {task.purpose} [dim](repaired)[/dim]") + return True, True + else: + all_repairs_success = True + for cmd in fix_commands: + repair_task = self.tree.add_repair_task( + parent=task, + command=cmd, + purpose=f"Repair: {diagnosis['description'][:50]}", + reasoning=diagnosis.get("reasoning", ""), + ) + repair_success, _ = self._execute_task_with_repair( + repair_task, confirm_callback, notify_callback + ) + if not repair_success: + all_repairs_success = False + + if all_repairs_success: + console.print(f"[cyan]↻ Retrying original command...[/cyan]") + success, output, error, duration = self._execute_command(task.command) + task.output = output + task.error = error + task.duration_seconds += duration + + if success: + task.status = CommandStatus.SUCCESS + task.reasoning = f"Auto-repaired after {task.repair_attempts} attempt(s)" + console.print(f"[green]✓[/green] {task.purpose} [dim](repaired)[/dim]") + return True, True + + task.status = CommandStatus.FAILED + task.reasoning = self._generate_failure_reasoning(task, diagnosis) + + if diagnosis.get("manual_suggestion") and notify_callback: + console.print(f"\n[yellow]📋 Manual intervention suggested:[/yellow]") + console.print(f"[dim]{diagnosis['manual_suggestion']}[/dim]") + + if Confirm.ask("Would you like to run this manually while Cortex monitors?", default=False): + success = self._supervise_manual_intervention( + task, + diagnosis.get("manual_suggestion", ""), + notify_callback, + ) + if success: + task.status = CommandStatus.SUCCESS + task.reasoning = "Completed via manual intervention with Cortex monitoring" + return True, True + + console.print(f"\n[red]✗ Failed:[/red] {task.purpose}") + console.print(f"[dim] Reason: {task.reasoning}[/dim]") + + return False, was_repaired + + def _execute_command(self, command: str) -> tuple[bool, str, str, float]: + """Execute a command.""" + start_time = time.time() + + try: + needs_sudo = self._needs_sudo(command) + + if needs_sudo and not command.strip().startswith("sudo"): + command = f"sudo {command}" + + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=300, + ) + + duration = time.time() - start_time + success = result.returncode == 0 + + return success, result.stdout, result.stderr, duration + + except subprocess.TimeoutExpired: + return False, "", "Command timed out after 300 seconds", time.time() - start_time + except Exception as e: + return False, "", str(e), time.time() - start_time + + def _needs_sudo(self, command: str) -> bool: + """Determine if a command needs sudo.""" + sudo_keywords = [ + "systemctl", "service", "apt", "apt-get", "dpkg", + "useradd", "usermod", "userdel", "groupadd", + "chmod", "chown", "mount", "umount", "fdisk", + "iptables", "ufw", "firewall-cmd", + ] + + system_paths = ["/etc/", "/var/", "/usr/", "/opt/", "/sys/", "/proc/"] + + cmd_parts = command.strip().split() + if not cmd_parts: + return False + + base_cmd = cmd_parts[0] + + if base_cmd in sudo_keywords: + return True + + for part in cmd_parts: + for path in system_paths: + if path in part: + if any(op in command for op in [">", ">>", "cp ", "mv ", "rm ", "mkdir ", "touch ", "sed ", "tee "]): + return True + + return False + + def _diagnose_error( + self, + command: str, + stderr: str, + stdout: str, + ) -> dict[str, Any]: + """Diagnose why a command failed and suggest repairs.""" + error_lower = stderr.lower() + combined = (stderr + stdout).lower() + + if "permission denied" in error_lower: + import re + path_match = None + path_patterns = [ + r"cannot (?:create|open|access|stat|remove|modify) (?:regular file |directory )?['\"]?([^'\":\n]+)['\"]?", + r"open\(\) ['\"]?([^'\"]+)['\"]? failed", + r"['\"]([^'\"]+)['\"]?: [Pp]ermission denied", + ] + for pattern in path_patterns: + match = re.search(pattern, stderr) + if match: + path_match = match.group(1).strip() + break + + return { + "error_type": "Permission Denied", + "description": f"Insufficient permissions to access: {path_match or 'unknown path'}", + "can_auto_fix": True, + "fix_commands": [f"sudo {command}"] if not command.strip().startswith("sudo") else [], + "manual_suggestion": f"Run with sudo: sudo {command}", + "reasoning": f"The command tried to access '{path_match or 'a protected resource'}' without sufficient privileges.", + } + + if "no such file or directory" in error_lower: + import re + path_match = re.search(r"['\"]?([^'\"\n]+)['\"]?: [Nn]o such file", stderr) + missing_path = path_match.group(1) if path_match else None + + if missing_path: + parent_dir = os.path.dirname(missing_path) + if parent_dir: + return { + "error_type": "File Not Found", + "description": f"Path does not exist: {missing_path}", + "can_auto_fix": True, + "fix_commands": [f"sudo mkdir -p {parent_dir}"], + "manual_suggestion": f"Create the directory: sudo mkdir -p {parent_dir}", + "reasoning": f"The target path '{missing_path}' doesn't exist.", + } + + return { + "error_type": "File Not Found", + "description": "A required file or directory does not exist", + "can_auto_fix": False, + "fix_commands": [], + "manual_suggestion": "Check the file path and ensure it exists", + "reasoning": "The command references a non-existent path.", + } + + if "command not found" in error_lower or "not found" in error_lower: + import re + cmd_match = re.search(r"(\w+): (?:command )?not found", stderr) + missing_cmd = cmd_match.group(1) if cmd_match else None + + return { + "error_type": "Command Not Found", + "description": f"Command not installed: {missing_cmd or 'unknown'}", + "can_auto_fix": bool(missing_cmd), + "fix_commands": [f"sudo apt install -y {missing_cmd}"] if missing_cmd else [], + "manual_suggestion": f"Install: sudo apt install {missing_cmd}" if missing_cmd else "Install the required command", + "reasoning": f"The command '{missing_cmd or 'required'}' is not installed.", + } + + return { + "error_type": "Unknown Error", + "description": stderr[:200] if stderr else "Command failed with no error output", + "can_auto_fix": False, + "fix_commands": [], + "manual_suggestion": f"Review the error and try: {command}", + "reasoning": f"The command failed with an unexpected error.", + } + + def _generate_failure_reasoning(self, task: TaskNode, diagnosis: dict) -> str: + """Generate detailed reasoning for why a task failed.""" + parts = [ + f"Error type: {diagnosis.get('error_type', 'Unknown')}", + f"Description: {diagnosis.get('description', 'No details available')}", + ] + + if task.repair_attempts > 0: + parts.append(f"Repair attempts: {task.repair_attempts} (all failed)") + + if diagnosis.get("reasoning"): + parts.append(f"Analysis: {diagnosis['reasoning']}") + + if diagnosis.get("manual_suggestion"): + parts.append(f"Suggestion: {diagnosis['manual_suggestion']}") + + return " | ".join(parts) + + def _identify_paths_needing_privileges(self, commands: list[str]) -> list[str]: + """Identify paths in commands that need privilege grants.""" + paths = [] + for cmd in commands: + parts = cmd.split() + for part in parts: + if part.startswith("/") and self.paths_manager.is_protected(part): + paths.append(part) + return paths + + def _supervise_manual_intervention( + self, + task: TaskNode, + instruction: str, + notify_callback: Callable[[str, str], None], + ) -> bool: + """Supervise manual command execution with terminal monitoring.""" + self._in_manual_mode = True + + console.print("\n[bold cyan]═══ Manual Intervention Mode ═══[/bold cyan]") + console.print(f"\n[yellow]Run this command in another terminal:[/yellow]") + console.print(f"[bold]{instruction}[/bold]") + + self._terminal_monitor = TerminalMonitor( + notification_callback=lambda title, msg: notify_callback(title, msg) + ) + self._terminal_monitor.start() + + console.print("\n[dim]Cortex is now monitoring your terminal for issues...[/dim]") + + try: + while True: + choice = Confirm.ask( + "\nHave you completed the manual step?", + default=True, + ) + + if choice: + success = Confirm.ask("Was it successful?", default=True) + + if success: + console.print("[green]✓ Manual step completed successfully[/green]") + return True + else: + console.print("\n[yellow]What went wrong?[/yellow]") + console.print("1. Permission denied") + console.print("2. File not found") + console.print("3. Other error") + + try: + error_choice = int(input("Enter choice (1-3): ")) + except ValueError: + error_choice = 3 + + if error_choice == 1: + console.print(f"[yellow]Try: sudo {instruction}[/yellow]") + elif error_choice == 2: + console.print("[yellow]Check the file path exists[/yellow]") + else: + console.print("[yellow]Describe the error and try again[/yellow]") + + continue_trying = Confirm.ask("Continue trying?", default=True) + if not continue_trying: + return False + else: + console.print("[dim]Take your time. Cortex is still monitoring...[/dim]") + + finally: + self._in_manual_mode = False + if self._terminal_monitor: + self._terminal_monitor.stop() + + def get_tree_summary(self) -> dict: + """Get a summary of the task tree execution.""" + return { + "tree": self.tree.to_dict(), + "permission_requests": self._permission_sets_requested, + "manual_commands": self._manual_commands_executed, + } + diff --git a/cortex/do_runner/handler.py b/cortex/do_runner/handler.py new file mode 100644 index 000000000..fbf954b23 --- /dev/null +++ b/cortex/do_runner/handler.py @@ -0,0 +1,3700 @@ +"""Main DoHandler class for the --do functionality.""" + +import datetime +import os +import shutil +import signal +import subprocess +import sys +import time +from pathlib import Path +from typing import Any, Callable + +from rich.console import Console +from rich.panel import Panel +from rich.prompt import Confirm +from rich.table import Table + +from .database import DoRunDatabase +from .diagnosis import AutoFixer, ErrorDiagnoser, LoginHandler +from .managers import CortexUserManager, ProtectedPathsManager +from .models import ( + CommandLog, + CommandStatus, + DoRun, + RunMode, + TaskNode, + TaskTree, +) +from .terminal import TerminalMonitor +from .verification import ( + ConflictDetector, + FileUsefulnessAnalyzer, + VerificationRunner, +) + +console = Console() + + +class DoHandler: + """Main handler for the --do functionality.""" + + def __init__(self, llm_callback: Callable[[str], dict] | None = None): + self.db = DoRunDatabase() + self.paths_manager = ProtectedPathsManager() + self.user_manager = CortexUserManager + self.current_run: DoRun | None = None + self._granted_privileges: list[str] = [] + self.llm_callback = llm_callback + + self._task_tree: TaskTree | None = None + self._permission_requests_count = 0 + + self._terminal_monitor: TerminalMonitor | None = None + + # Manual intervention tracking + self._expected_manual_commands: list[str] = [] + self._completed_manual_commands: list[str] = [] + + # Session tracking + self.current_session_id: str | None = None + + # Initialize helper classes + self._diagnoser = ErrorDiagnoser() + self._auto_fixer = AutoFixer(llm_callback=llm_callback) + self._login_handler = LoginHandler() + self._conflict_detector = ConflictDetector() + self._verification_runner = VerificationRunner() + self._file_analyzer = FileUsefulnessAnalyzer() + + # Execution state tracking for interruption handling + self._current_process: subprocess.Popen | None = None + self._current_command: str | None = None + self._executed_commands: list[dict] = [] + self._interrupted = False + self._interrupted_command: str | None = None # Track which command was interrupted for retry + self._remaining_commands: list[tuple[str, str, list[str]]] = [] # Commands that weren't executed + self._original_sigtstp = None + self._original_sigint = None + + def cleanup(self) -> None: + """Clean up any running threads or resources.""" + if self._terminal_monitor: + self._terminal_monitor.stop() + self._terminal_monitor = None + + def _is_json_like(self, text: str) -> bool: + """Check if text looks like raw JSON that shouldn't be displayed.""" + if not text: + return False + text = text.strip() + # Check for obvious JSON patterns + json_indicators = [ + text.startswith(('{', '[', ']', '}')), + '"response_type"' in text, + '"do_commands"' in text, + '"command":' in text, + '"requires_sudo"' in text, + '{"' in text and '":' in text, + text.count('"') > 6 and ':' in text, # Multiple quoted keys + ] + return any(json_indicators) + + def _setup_signal_handlers(self): + """Set up signal handlers for Ctrl+Z and Ctrl+C.""" + self._original_sigtstp = signal.signal(signal.SIGTSTP, self._handle_interrupt) + self._original_sigint = signal.signal(signal.SIGINT, self._handle_interrupt) + + def _restore_signal_handlers(self): + """Restore original signal handlers.""" + if self._original_sigtstp is not None: + signal.signal(signal.SIGTSTP, self._original_sigtstp) + if self._original_sigint is not None: + signal.signal(signal.SIGINT, self._original_sigint) + + def _handle_interrupt(self, signum, frame): + """Handle Ctrl+Z (SIGTSTP) or Ctrl+C (SIGINT) to stop current command only. + + This does NOT exit the session - it only stops the currently executing command. + The session continues so the user can decide what to do next. + """ + self._interrupted = True + # Store the interrupted command for potential retry + self._interrupted_command = self._current_command + signal_name = "Ctrl+Z" if signum == signal.SIGTSTP else "Ctrl+C" + + console.print() + console.print(f"[yellow]⚠ {signal_name} detected - Stopping current command...[/yellow]") + + # Kill current subprocess if running + if self._current_process and self._current_process.poll() is None: + try: + self._current_process.terminate() + # Give it a moment to terminate gracefully + try: + self._current_process.wait(timeout=2) + except subprocess.TimeoutExpired: + self._current_process.kill() + console.print(f"[yellow] Stopped: {self._current_command}[/yellow]") + except Exception as e: + console.print(f"[dim] Error stopping process: {e}[/dim]") + + # Note: We do NOT raise KeyboardInterrupt here + # The session continues - only the current command is stopped + + def _track_command_start(self, command: str, process: subprocess.Popen | None = None): + """Track when a command starts executing.""" + self._current_command = command + self._current_process = process + + def _track_command_complete(self, command: str, success: bool, output: str = "", error: str = ""): + """Track when a command completes.""" + self._executed_commands.append({ + "command": command, + "success": success, + "output": output[:500] if output else "", + "error": error[:200] if error else "", + "timestamp": datetime.datetime.now().isoformat(), + }) + self._current_command = None + self._current_process = None + + def _reset_execution_state(self): + """Reset execution tracking state for a new run.""" + self._current_process = None + self._current_command = None + self._executed_commands = [] + self._interrupted = False + self._interrupted_command = None + self._remaining_commands = [] + + def __del__(self): + """Destructor to ensure cleanup.""" + self.cleanup() + + def _show_expandable_output(self, output: str, command: str) -> None: + """Show output with expand/collapse capability.""" + from rich.panel import Panel + from rich.text import Text + from rich.prompt import Prompt + + lines = output.split('\n') + total_lines = len(lines) + + # Always show first 3 lines as preview + preview_count = 3 + + if total_lines <= preview_count + 2: + # Small output - just show it all + console.print(Panel( + output, + title=f"[dim]Output[/dim]", + title_align="left", + border_style="dim", + padding=(0, 1), + )) + return + + # Show collapsed preview + preview = '\n'.join(lines[:preview_count]) + remaining = total_lines - preview_count + + content = Text() + content.append(preview) + content.append(f"\n\n[dim]─── {remaining} more lines hidden ───[/dim]", style="dim") + + console.print(Panel( + content, + title=f"[dim]Output ({total_lines} lines)[/dim]", + subtitle="[dim italic]Press Enter to continue, 'e' to expand[/dim italic]", + subtitle_align="right", + title_align="left", + border_style="dim", + padding=(0, 1), + )) + + # Quick check if user wants to expand + try: + response = input().strip().lower() + if response == 'e': + # Show full output + console.print(Panel( + output, + title=f"[dim]Full Output ({total_lines} lines)[/dim]", + title_align="left", + border_style="green", + padding=(0, 1), + )) + except (EOFError, KeyboardInterrupt): + pass + + # Initialize notification manager + try: + from cortex.notification_manager import NotificationManager + self.notifier = NotificationManager() + except ImportError: + self.notifier = None + + def _send_notification(self, title: str, message: str, level: str = "normal"): + """Send a desktop notification.""" + if self.notifier: + self.notifier.send(title, message, level=level) + else: + console.print(f"[bold yellow]🔔 {title}:[/bold yellow] {message}") + + def setup_cortex_user(self) -> bool: + """Ensure the cortex user exists.""" + if not self.user_manager.user_exists(): + console.print("[yellow]Setting up cortex user...[/yellow]") + success, message = self.user_manager.create_user() + if success: + console.print(f"[green]✓ {message}[/green]") + else: + console.print(f"[red]✗ {message}[/red]") + return success + return True + + def analyze_commands_for_protected_paths( + self, + commands: list[tuple[str, str]] + ) -> list[tuple[str, str, list[str]]]: + """Analyze commands and identify protected paths they access.""" + results = [] + + for command, purpose in commands: + protected = [] + parts = command.split() + for part in parts: + if part.startswith("/") or part.startswith("~"): + path = os.path.expanduser(part) + if self.paths_manager.is_protected(path): + protected.append(path) + + results.append((command, purpose, protected)) + + return results + + def request_user_confirmation( + self, + commands: list[tuple[str, str, list[str]]], + ) -> bool: + """Show commands to user and request confirmation with improved visual UI.""" + from rich.panel import Panel + from rich.table import Table + from rich.text import Text + from rich.columns import Columns + from rich import box + + console.print() + + # Create a table for commands + cmd_table = Table( + show_header=True, + header_style="bold cyan", + box=box.ROUNDED, + border_style="blue", + expand=True, + padding=(0, 1), + ) + cmd_table.add_column("#", style="bold cyan", width=3, justify="right") + cmd_table.add_column("Command", style="bold white") + cmd_table.add_column("Purpose", style="dim italic") + + all_protected = [] + for i, (cmd, purpose, protected) in enumerate(commands, 1): + # Truncate long commands for display + cmd_display = cmd if len(cmd) <= 60 else cmd[:57] + "..." + purpose_display = purpose if len(purpose) <= 50 else purpose[:47] + "..." + + # Add protected path indicator + if protected: + cmd_display = f"{cmd_display} [yellow]⚠[/yellow]" + all_protected.extend(protected) + + cmd_table.add_row(str(i), cmd_display, purpose_display) + + # Create header + header_text = Text() + header_text.append("🔐 ", style="bold") + header_text.append("Permission Required", style="bold white") + header_text.append(f" ({len(commands)} command{'s' if len(commands) > 1 else ''})", style="dim") + + console.print(Panel( + cmd_table, + title=header_text, + title_align="left", + border_style="blue", + padding=(1, 1), + )) + + # Show protected paths if any + if all_protected: + protected_set = set(all_protected) + protected_text = Text() + protected_text.append("⚠ Protected paths: ", style="bold yellow") + protected_text.append(", ".join(protected_set), style="dim yellow") + console.print(Panel( + protected_text, + border_style="yellow", + padding=(0, 1), + expand=False, + )) + + console.print() + return Confirm.ask("[bold]Proceed?[/bold]", default=False) + + def _needs_sudo(self, cmd: str, protected_paths: list[str]) -> bool: + """Determine if a command needs sudo to execute.""" + sudo_commands = [ + "systemctl", "service", "apt", "apt-get", "dpkg", + "mount", "umount", "fdisk", "mkfs", "chown", "chmod", + "useradd", "userdel", "usermod", "groupadd", "groupdel", + ] + + cmd_parts = cmd.split() + if not cmd_parts: + return False + + base_cmd = cmd_parts[0] + + if base_cmd in sudo_commands: + return True + + if protected_paths: + return True + + if any(p in cmd for p in ["/etc/", "/var/lib/", "/usr/", "/opt/", "/root/"]): + return True + + return False + + # Commands that benefit from streaming output (long-running with progress) + STREAMING_COMMANDS = [ + "docker pull", "docker push", "docker build", + "apt install", "apt-get install", "apt update", "apt-get update", "apt upgrade", "apt-get upgrade", + "pip install", "pip3 install", "pip download", "pip3 download", + "npm install", "npm ci", "yarn install", "yarn add", + "cargo build", "cargo install", + "go build", "go install", "go get", + "gem install", "bundle install", + "wget", "curl -o", "curl -O", + "git clone", "git pull", "git fetch", + "make", "cmake", "ninja", + "rsync", "scp", + ] + + # Interactive commands that need a TTY - cannot be run in background/automated + INTERACTIVE_COMMANDS = [ + "docker exec -it", "docker exec -ti", "docker run -it", "docker run -ti", + "docker attach", + "ollama run", "ollama chat", + "ssh ", + "bash -i", "sh -i", "zsh -i", + "vi ", "vim ", "nano ", "emacs ", + "python -i", "python3 -i", "ipython", "node -i", + "mysql -u", "psql -U", "mongo ", "redis-cli", + "htop", "top -i", "less ", "more ", + ] + + def _should_stream_output(self, cmd: str) -> bool: + """Check if command should use streaming output.""" + cmd_lower = cmd.lower() + return any(streaming_cmd in cmd_lower for streaming_cmd in self.STREAMING_COMMANDS) + + def _is_interactive_command(self, cmd: str) -> bool: + """Check if command requires interactive TTY and cannot be automated.""" + cmd_lower = cmd.lower() + # Check explicit patterns + if any(interactive in cmd_lower for interactive in self.INTERACTIVE_COMMANDS): + return True + # Check for -it or -ti flags in docker commands + if "docker" in cmd_lower and (" -it " in cmd_lower or " -ti " in cmd_lower or + cmd_lower.endswith(" -it") or cmd_lower.endswith(" -ti")): + return True + return False + + # Timeout settings by command type (in seconds) + COMMAND_TIMEOUTS = { + "docker pull": 1800, # 30 minutes for large images + "docker push": 1800, # 30 minutes for large images + "docker build": 3600, # 1 hour for complex builds + "apt install": 900, # 15 minutes + "apt-get install": 900, + "apt update": 300, # 5 minutes + "apt-get update": 300, + "apt upgrade": 1800, # 30 minutes + "apt-get upgrade": 1800, + "pip install": 600, # 10 minutes + "pip3 install": 600, + "npm install": 900, # 15 minutes + "yarn install": 900, + "git clone": 600, # 10 minutes + "make": 1800, # 30 minutes + "cargo build": 1800, + } + + def _get_command_timeout(self, cmd: str) -> int: + """Get appropriate timeout for a command.""" + cmd_lower = cmd.lower() + for cmd_pattern, timeout in self.COMMAND_TIMEOUTS.items(): + if cmd_pattern in cmd_lower: + return timeout + return 600 # Default 10 minutes for streaming commands + + def _execute_with_streaming( + self, + cmd: str, + needs_sudo: bool, + timeout: int | None = None, # None = auto-detect + ) -> tuple[bool, str, str]: + """Execute a command with real-time output streaming.""" + import select + import sys + + # Auto-detect timeout if not specified + if timeout is None: + timeout = self._get_command_timeout(cmd) + + # Show timeout info for long operations + if timeout > 300: + console.print(f"[dim] ⏱️ Timeout: {timeout // 60} minutes (large operation)[/dim]") + + stdout_lines = [] + stderr_lines = [] + + try: + if needs_sudo: + process = subprocess.Popen( + ["sudo", "bash", "-c", cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, # Line buffered + ) + else: + process = subprocess.Popen( + cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + bufsize=1, + ) + + # Use select for non-blocking reads on both stdout and stderr + import time + start_time = time.time() + + while True: + # Check timeout + if time.time() - start_time > timeout: + process.kill() + return False, "\n".join(stdout_lines), f"Command timed out after {timeout} seconds" + + # Check if process has finished + if process.poll() is not None: + # Read any remaining output + remaining_stdout, remaining_stderr = process.communicate() + if remaining_stdout: + for line in remaining_stdout.splitlines(): + stdout_lines.append(line) + self._print_progress_line(line, is_stderr=False) + if remaining_stderr: + for line in remaining_stderr.splitlines(): + stderr_lines.append(line) + self._print_progress_line(line, is_stderr=True) + break + + # Try to read from stdout/stderr without blocking + try: + readable, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1) + + for stream in readable: + line = stream.readline() + if line: + line = line.rstrip() + if stream == process.stdout: + stdout_lines.append(line) + self._print_progress_line(line, is_stderr=False) + else: + stderr_lines.append(line) + self._print_progress_line(line, is_stderr=True) + except (ValueError, OSError): + # Stream closed + break + + return ( + process.returncode == 0, + "\n".join(stdout_lines).strip(), + "\n".join(stderr_lines).strip(), + ) + + except Exception as e: + return False, "\n".join(stdout_lines), str(e) + + def _print_progress_line(self, line: str, is_stderr: bool = False) -> None: + """Print a progress line with appropriate formatting.""" + if not line.strip(): + return + + line = line.strip() + + # Docker pull progress patterns + if any(p in line for p in ["Pulling from", "Digest:", "Status:", "Pull complete", "Downloading", "Extracting"]): + console.print(f"[dim] 📦 {line}[/dim]") + # Docker build progress + elif line.startswith("Step ") or line.startswith("---> "): + console.print(f"[dim] 🔨 {line}[/dim]") + # apt progress patterns + elif any(p in line for p in ["Get:", "Hit:", "Fetched", "Reading", "Building", "Setting up", "Processing", "Unpacking"]): + console.print(f"[dim] 📦 {line}[/dim]") + # pip progress patterns + elif any(p in line for p in ["Collecting", "Downloading", "Installing", "Successfully"]): + console.print(f"[dim] 📦 {line}[/dim]") + # npm progress patterns + elif any(p in line for p in ["npm", "added", "packages", "audited"]): + console.print(f"[dim] 📦 {line}[/dim]") + # git progress patterns + elif any(p in line for p in ["Cloning", "remote:", "Receiving", "Resolving", "Checking out"]): + console.print(f"[dim] 📦 {line}[/dim]") + # wget/curl progress + elif "%" in line and any(c.isdigit() for c in line): + # Progress percentage - update in place + console.print(f"[dim] ⬇️ {line[:80]}[/dim]", end="\r") + # Error lines + elif is_stderr and any(p in line.lower() for p in ["error", "fail", "denied", "cannot", "unable"]): + console.print(f"[yellow] ⚠ {line}[/yellow]") + # Truncate very long lines + elif len(line) > 100: + console.print(f"[dim] {line[:100]}...[/dim]") + + def _execute_single_command( + self, + cmd: str, + needs_sudo: bool, + timeout: int = 120 + ) -> tuple[bool, str, str]: + """Execute a single command with proper privilege handling and interruption support.""" + # Check for interactive commands that need a TTY + if self._is_interactive_command(cmd): + return self._handle_interactive_command(cmd, needs_sudo) + + # Use streaming for long-running commands + if self._should_stream_output(cmd): + return self._execute_with_streaming(cmd, needs_sudo, timeout=300) + + # Track command start + self._track_command_start(cmd) + + try: + # Flush output before sudo to handle password prompts cleanly + if needs_sudo: + sys.stdout.flush() + sys.stderr.flush() + + # Use Popen for interruptibility + if needs_sudo: + process = subprocess.Popen( + ["sudo", "bash", "-c", cmd], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + else: + process = subprocess.Popen( + cmd, + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + + # Store process for interruption handling + self._current_process = process + + try: + stdout, stderr = process.communicate(timeout=timeout) + + # Check if interrupted during execution + if self._interrupted: + self._track_command_complete(cmd, False, stdout or "", "Command interrupted by user") + return False, stdout.strip() if stdout else "", "Command interrupted by user" + + success = process.returncode == 0 + + # Track completion + self._track_command_complete(cmd, success, stdout, stderr) + + # After sudo, reset console state + if needs_sudo: + sys.stdout.write('') # Force flush + sys.stdout.flush() + + return (success, stdout.strip(), stderr.strip()) + + except subprocess.TimeoutExpired: + process.kill() + stdout, stderr = process.communicate() + self._track_command_complete(cmd, False, stdout, f"Command timed out after {timeout} seconds") + return False, stdout.strip() if stdout else "", f"Command timed out after {timeout} seconds" + except Exception as e: + self._track_command_complete(cmd, False, "", str(e)) + return False, "", str(e) + + def _handle_interactive_command( + self, + cmd: str, + needs_sudo: bool + ) -> tuple[bool, str, str]: + """Handle interactive commands that need a TTY. + + These commands cannot be run in the background - they need user interaction. + We'll either: + 1. Try to open in a new terminal window + 2. Or inform the user to run it manually + """ + console.print() + console.print(f"[yellow]⚡ Interactive command detected[/yellow]") + console.print(f"[dim] This command requires a terminal for interaction.[/dim]") + console.print() + + full_cmd = f"sudo {cmd}" if needs_sudo else cmd + + # Try to detect if we can open a new terminal + terminal_cmds = [ + ("gnome-terminal", f'gnome-terminal -- bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), + ("konsole", f'konsole -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), + ("xterm", f'xterm -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), + ("x-terminal-emulator", f'x-terminal-emulator -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), + ] + + # Check which terminal is available + for term_name, term_cmd in terminal_cmds: + if shutil.which(term_name): + console.print(f"[cyan]🖥️ Opening in new terminal window ({term_name})...[/cyan]") + console.print(f"[dim] Command: {full_cmd}[/dim]") + console.print() + + try: + # Start the terminal in background + subprocess.Popen( + term_cmd, + shell=True, + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + ) + return True, f"Command opened in new {term_name} window", "" + except Exception as e: + console.print(f"[yellow] ⚠ Could not open terminal: {e}[/yellow]") + break + + # Fallback: ask user to run manually + console.print(f"[bold cyan]📋 Please run this command manually in another terminal:[/bold cyan]") + console.print() + console.print(f" [green]{full_cmd}[/green]") + console.print() + console.print(f"[dim] This command needs interactive input (TTY).[/dim]") + console.print(f"[dim] Cortex cannot capture its output automatically.[/dim]") + console.print() + + # Return special status indicating manual run needed + return True, "INTERACTIVE_COMMAND_MANUAL", f"Interactive command - run manually: {full_cmd}" + + def execute_commands_as_cortex( + self, + commands: list[tuple[str, str, list[str]]], + user_query: str, + ) -> DoRun: + """Execute commands with granular error handling and auto-recovery.""" + run = DoRun( + run_id=self.db._generate_run_id(), + summary="", + mode=RunMode.CORTEX_EXEC, + user_query=user_query, + started_at=datetime.datetime.now().isoformat(), + session_id=self.current_session_id or "", + ) + self.current_run = run + + console.print() + console.print("[bold cyan]🚀 Executing commands with conflict detection...[/bold cyan]") + console.print() + + # Phase 1: Conflict Detection + console.print("[dim]Checking for conflicts...[/dim]") + + cleanup_commands = [] + for cmd, purpose, protected in commands: + conflict = self._conflict_detector.check_for_conflicts(cmd, purpose) + if conflict["has_conflict"]: + console.print(f"[yellow] ⚠ {conflict['conflict_type']}: {conflict['suggestion']}[/yellow]") + if conflict["cleanup_commands"]: + cleanup_commands.extend(conflict["cleanup_commands"]) + + if cleanup_commands: + console.print("[dim]Running cleanup commands...[/dim]") + for cleanup_cmd in cleanup_commands: + self._execute_single_command(cleanup_cmd, needs_sudo=True) + + console.print() + + all_protected = set() + for _, _, protected in commands: + all_protected.update(protected) + + if all_protected: + console.print(f"[dim]📁 Protected paths involved: {', '.join(all_protected)}[/dim]") + console.print() + + # Phase 2: Execute Commands + from rich.panel import Panel + from rich.text import Text + + for i, (cmd, purpose, protected) in enumerate(commands, 1): + # Create a visually distinct panel for each command + cmd_header = Text() + cmd_header.append(f"[{i}/{len(commands)}] ", style="bold white on blue") + cmd_header.append(f" {cmd}", style="bold cyan") + + console.print() + console.print(Panel( + f"[bold cyan]{cmd}[/bold cyan]\n[dim]└─ {purpose}[/dim]", + title=f"[bold white] Command {i}/{len(commands)} [/bold white]", + title_align="left", + border_style="blue", + padding=(0, 1), + )) + + file_check = self._file_analyzer.check_file_exists_and_usefulness(cmd, purpose, user_query) + + if file_check["recommendations"]: + self._file_analyzer.apply_file_recommendations(file_check["recommendations"]) + + cmd_log = CommandLog( + command=cmd, + purpose=purpose, + timestamp=datetime.datetime.now().isoformat(), + status=CommandStatus.RUNNING, + ) + + start_time = time.time() + needs_sudo = self._needs_sudo(cmd, protected) + + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) + + if not success: + diagnosis = self._diagnoser.diagnose_error(cmd, stderr) + + # Create error panel for visual grouping + error_info = ( + f"[bold red]⚠ {diagnosis['description']}[/bold red]\n" + f"[dim]Type: {diagnosis['error_type']} | Category: {diagnosis.get('category', 'unknown')}[/dim]" + ) + console.print(Panel( + error_info, + title="[bold red] ❌ Error Detected [/bold red]", + title_align="left", + border_style="red", + padding=(0, 1), + )) + + # Check if this is a login/credential required error + if diagnosis.get("category") == "login_required": + console.print(Panel( + "[bold cyan]🔐 Authentication required for this command[/bold cyan]", + border_style="cyan", + padding=(0, 1), + expand=False, + )) + + login_success, login_msg = self._login_handler.handle_login(cmd, stderr) + + if login_success: + console.print(Panel( + f"[bold green]✓ {login_msg}[/bold green]\n[dim]Retrying command...[/dim]", + border_style="green", + padding=(0, 1), + expand=False, + )) + + # Retry the command after successful login + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) + + if success: + console.print(Panel( + "[bold green]✓ Command succeeded after authentication![/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + )) + else: + console.print(Panel( + f"[bold yellow]Command still failed after login[/bold yellow]\n[dim]{stderr[:100]}[/dim]", + border_style="yellow", + padding=(0, 1), + )) + else: + console.print(f"[yellow]{login_msg}[/yellow]") + else: + # Not a login error, proceed with regular error handling + extra_info = [] + if diagnosis.get("extracted_path"): + extra_info.append(f"[dim]Path:[/dim] {diagnosis['extracted_path']}") + if diagnosis.get("extracted_info"): + for key, value in diagnosis["extracted_info"].items(): + if value: + extra_info.append(f"[dim]{key}:[/dim] {value}") + + if extra_info: + console.print(Panel( + "\n".join(extra_info), + title="[dim] Error Details [/dim]", + title_align="left", + border_style="dim", + padding=(0, 1), + expand=False, + )) + + fixed, fix_message, fix_commands = self._auto_fixer.auto_fix_error( + cmd, stderr, diagnosis, max_attempts=3 + ) + + if fixed: + success = True + console.print(Panel( + f"[bold green]✓ Auto-fixed:[/bold green] {fix_message}", + title="[bold green] Fix Successful [/bold green]", + title_align="left", + border_style="green", + padding=(0, 1), + expand=False, + )) + _, stdout, stderr = self._execute_single_command(cmd, needs_sudo=True) + else: + fix_info = [] + if fix_commands: + fix_info.append(f"[dim]Attempted:[/dim] {len(fix_commands)} fix command(s)") + fix_info.append(f"[bold yellow]Result:[/bold yellow] {fix_message}") + console.print(Panel( + "\n".join(fix_info), + title="[bold yellow] Fix Incomplete [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + )) + + cmd_log.duration_seconds = time.time() - start_time + cmd_log.output = stdout + cmd_log.error = stderr + cmd_log.status = CommandStatus.SUCCESS if success else CommandStatus.FAILED + + run.commands.append(cmd_log) + run.files_accessed.extend(protected) + + if success: + console.print(Panel( + f"[bold green]✓ Success[/bold green] [dim]({cmd_log.duration_seconds:.2f}s)[/dim]", + border_style="green", + padding=(0, 1), + expand=False, + )) + if stdout: + self._show_expandable_output(stdout, cmd) + else: + console.print(Panel( + f"[bold red]✗ Failed[/bold red]\n[dim]{stderr[:200]}[/dim]", + border_style="red", + padding=(0, 1), + )) + + final_diagnosis = self._diagnoser.diagnose_error(cmd, stderr) + if final_diagnosis["fix_commands"] and not final_diagnosis["can_auto_fix"]: + # Create a manual intervention panel + manual_content = [f"[bold yellow]Issue:[/bold yellow] {final_diagnosis['description']}", ""] + manual_content.append("[bold]Suggested commands:[/bold]") + for fix_cmd in final_diagnosis["fix_commands"]: + if not fix_cmd.startswith("#"): + manual_content.append(f" [cyan]$ {fix_cmd}[/cyan]") + else: + manual_content.append(f" [dim]{fix_cmd}[/dim]") + + console.print(Panel( + "\n".join(manual_content), + title="[bold yellow] 💡 Manual Intervention Required [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + )) + + console.print() + + self._granted_privileges = [] + + # Phase 3: Verification Tests + console.print() + console.print(Panel( + "[bold]Running verification tests...[/bold]", + title="[bold cyan] 🧪 Verification Phase [/bold cyan]", + title_align="left", + border_style="cyan", + padding=(0, 1), + expand=False, + )) + all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) + + # Phase 4: Auto-repair if tests failed + if not all_tests_passed: + console.print() + console.print(Panel( + "[bold yellow]Attempting to repair test failures...[/bold yellow]", + title="[bold yellow] 🔧 Auto-Repair Phase [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + expand=False, + )) + + repair_success = self._handle_test_failures(test_results, run) + + if repair_success: + console.print("[dim]Re-running verification tests...[/dim]") + all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) + + run.completed_at = datetime.datetime.now().isoformat() + run.summary = self._generate_summary(run) + + if test_results: + passed = sum(1 for t in test_results if t["passed"]) + run.summary += f" | Tests: {passed}/{len(test_results)} passed" + + self.db.save_run(run) + + # Generate LLM summary/answer + llm_answer = self._generate_llm_answer(run, user_query) + + # Print condensed execution summary with answer + self._print_execution_summary(run, answer=llm_answer) + + console.print() + console.print(f"[dim]Run ID: {run.run_id}[/dim]") + + return run + + def _handle_resource_conflict( + self, + idx: int, + cmd: str, + conflict: dict, + commands_to_skip: set, + cleanup_commands: list, + ) -> bool: + """Handle any resource conflict with user options. + + This is a GENERAL handler for all resource types: + - Docker containers + - Services + - Files/directories + - Packages + - Ports + - Users/groups + - Virtual environments + - Databases + - Cron jobs + """ + resource_type = conflict.get("resource_type", "resource") + resource_name = conflict.get("resource_name", "unknown") + conflict_type = conflict.get("conflict_type", "unknown") + suggestion = conflict.get("suggestion", "") + is_active = conflict.get("is_active", True) + alternatives = conflict.get("alternative_actions", []) + + # Resource type icons + icons = { + "container": "🐳", + "compose": "🐳", + "service": "⚙️", + "file": "📄", + "directory": "📁", + "package": "📦", + "pip_package": "🐍", + "npm_package": "📦", + "port": "🔌", + "user": "👤", + "group": "👥", + "venv": "🐍", + "mysql_database": "🗄️", + "postgres_database": "🗄️", + "cron_job": "⏰", + } + icon = icons.get(resource_type, "📌") + + # Display the conflict with visual grouping + from rich.panel import Panel + + status_text = "[bold cyan]Active[/bold cyan]" if is_active else "[dim yellow]Inactive[/dim yellow]" + conflict_content = ( + f"{icon} [bold]{resource_type.replace('_', ' ').title()}:[/bold] '{resource_name}'\n" + f"[dim]Status:[/dim] {status_text}\n" + f"[dim]{suggestion}[/dim]" + ) + + console.print() + console.print(Panel( + conflict_content, + title="[bold yellow] ⚠️ Resource Conflict [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + )) + + # If there are alternatives, show them + if alternatives: + options_content = ["[bold]What would you like to do?[/bold]", ""] + for j, alt in enumerate(alternatives, 1): + options_content.append(f" {j}. {alt['description']}") + + console.print(Panel( + "\n".join(options_content), + border_style="dim", + padding=(0, 1), + )) + + from rich.prompt import Prompt + choice = Prompt.ask( + " Choose an option", + choices=[str(k) for k in range(1, len(alternatives) + 1)], + default="1" + ) + + selected = alternatives[int(choice) - 1] + action = selected["action"] + action_commands = selected.get("commands", []) + + # Handle different actions + if action in ["use_existing", "use_different"]: + console.print(f"[green] ✓ Using existing {resource_type} '{resource_name}'[/green]") + commands_to_skip.add(idx) + return True + + elif action == "start_existing": + console.print(f"[cyan] Starting existing {resource_type}...[/cyan]") + for start_cmd in action_commands: + needs_sudo = start_cmd.startswith("sudo") + success, _, stderr = self._execute_single_command(start_cmd, needs_sudo=needs_sudo) + if success: + console.print(f"[green] ✓ {start_cmd}[/green]") + else: + console.print(f"[red] ✗ {start_cmd}: {stderr[:50]}[/red]") + commands_to_skip.add(idx) + return True + + elif action in ["restart", "upgrade", "reinstall"]: + console.print(f"[cyan] {action.title()}ing {resource_type}...[/cyan]") + for action_cmd in action_commands: + needs_sudo = action_cmd.startswith("sudo") + success, _, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + if success: + console.print(f"[green] ✓ {action_cmd}[/green]") + else: + console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") + commands_to_skip.add(idx) + return True + + elif action in ["recreate", "backup", "replace", "stop_existing"]: + console.print(f"[cyan] Preparing to {action.replace('_', ' ')}...[/cyan]") + for action_cmd in action_commands: + needs_sudo = action_cmd.startswith("sudo") + success, _, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + if success: + console.print(f"[green] ✓ {action_cmd}[/green]") + else: + console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") + # Don't skip - let the original command run after cleanup + return True + + elif action == "modify": + console.print(f"[cyan] Will modify existing {resource_type}[/cyan]") + # Don't skip - let the original command run to modify + return True + + elif action == "install_first": + # Install a missing tool/dependency first + console.print(f"[cyan] Installing required dependency '{resource_name}'...[/cyan]") + all_success = True + for action_cmd in action_commands: + needs_sudo = action_cmd.startswith("sudo") + success, stdout, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + if success: + console.print(f"[green] ✓ {action_cmd}[/green]") + else: + console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") + all_success = False + + if all_success: + console.print(f"[green] ✓ '{resource_name}' installed. Continuing with original command...[/green]") + # Don't skip - run the original command now that the tool is installed + return True + else: + console.print(f"[red] ✗ Failed to install '{resource_name}'[/red]") + commands_to_skip.add(idx) + return True + + elif action == "use_apt": + # User chose to use apt instead of snap + console.print(f"[cyan] Skipping snap command - use apt instead[/cyan]") + commands_to_skip.add(idx) + return True + + elif action == "refresh": + # Refresh snap package + console.print(f"[cyan] Refreshing snap package...[/cyan]") + for action_cmd in action_commands: + needs_sudo = action_cmd.startswith("sudo") + success, _, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + if success: + console.print(f"[green] ✓ {action_cmd}[/green]") + else: + console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") + commands_to_skip.add(idx) + return True + + # No alternatives - use default behavior (add to cleanup if available) + if conflict.get("cleanup_commands"): + cleanup_commands.extend(conflict["cleanup_commands"]) + + return False + + def _handle_test_failures( + self, + test_results: list[dict[str, Any]], + run: DoRun, + ) -> bool: + """Handle failed verification tests by attempting auto-repair.""" + failed_tests = [t for t in test_results if not t["passed"]] + + if not failed_tests: + return True + + console.print() + console.print("[bold yellow]🔧 Attempting to fix test failures...[/bold yellow]") + + all_fixed = True + + for test in failed_tests: + test_name = test["test"] + output = test["output"] + + console.print(f"[dim] Fixing: {test_name}[/dim]") + + if "nginx -t" in test_name: + diagnosis = self._diagnoser.diagnose_error("nginx -t", output) + fixed, msg, _ = self._auto_fixer.auto_fix_error("nginx -t", output, diagnosis, max_attempts=3) + if fixed: + console.print(f"[green] ✓ Fixed: {msg}[/green]") + else: + console.print(f"[red] ✗ Could not fix: {msg}[/red]") + all_fixed = False + + elif "apache2ctl" in test_name: + diagnosis = self._diagnoser.diagnose_error("apache2ctl configtest", output) + fixed, msg, _ = self._auto_fixer.auto_fix_error("apache2ctl configtest", output, diagnosis, max_attempts=3) + if fixed: + console.print(f"[green] ✓ Fixed: {msg}[/green]") + else: + all_fixed = False + + elif "systemctl is-active" in test_name: + import re + svc_match = re.search(r'is-active\s+(\S+)', test_name) + if svc_match: + service = svc_match.group(1) + success, _, err = self._execute_single_command( + f"sudo systemctl start {service}", needs_sudo=True + ) + if success: + console.print(f"[green] ✓ Started service {service}[/green]") + else: + console.print(f"[yellow] ⚠ Could not start {service}: {err[:50]}[/yellow]") + + elif "file exists" in test_name: + import re + path_match = re.search(r'file exists: (.+)', test_name) + if path_match: + path = path_match.group(1) + parent = os.path.dirname(path) + if parent and not os.path.exists(parent): + self._execute_single_command(f"sudo mkdir -p {parent}", needs_sudo=True) + console.print(f"[green] ✓ Created directory {parent}[/green]") + + return all_fixed + + def execute_with_task_tree( + self, + commands: list[tuple[str, str, list[str]]], + user_query: str, + ) -> DoRun: + """Execute commands using the task tree system with advanced auto-repair.""" + # Reset execution state for new run + self._reset_execution_state() + + run = DoRun( + run_id=self.db._generate_run_id(), + summary="", + mode=RunMode.CORTEX_EXEC, + user_query=user_query, + started_at=datetime.datetime.now().isoformat(), + session_id=self.current_session_id or "", + ) + self.current_run = run + self._permission_requests_count = 0 + + self._task_tree = TaskTree() + for cmd, purpose, protected in commands: + task = self._task_tree.add_root_task(cmd, purpose) + task.reasoning = f"Protected paths: {', '.join(protected)}" if protected else "" + + console.print() + console.print(Panel( + "[bold cyan]🌳 Task Tree Execution Mode[/bold cyan]\n" + "[dim]Commands will be executed with auto-repair capabilities.[/dim]\n" + "[dim]Conflict detection and verification tests enabled.[/dim]\n" + "[dim yellow]Press Ctrl+Z or Ctrl+C to stop execution at any time.[/dim yellow]", + expand=False, + )) + console.print() + + # Set up signal handlers for Ctrl+Z and Ctrl+C + self._setup_signal_handlers() + + # Phase 1: Conflict Detection - Claude-like header + console.print("[bold blue]━━━[/bold blue] [bold]Checking for Conflicts[/bold]") + + conflicts_found = [] + cleanup_commands = [] + commands_to_skip = set() # Track commands that should be skipped (use existing) + commands_to_replace = {} # Track commands that should be replaced + resource_decisions = {} # Track user decisions for each resource to avoid duplicate prompts + + for i, (cmd, purpose, protected) in enumerate(commands): + conflict = self._conflict_detector.check_for_conflicts(cmd, purpose) + if conflict["has_conflict"]: + conflicts_found.append((i, cmd, conflict)) + + if conflicts_found: + # Deduplicate conflicts by resource name + unique_resources = {} + for idx, cmd, conflict in conflicts_found: + resource_name = conflict.get("resource_name", cmd) + if resource_name not in unique_resources: + unique_resources[resource_name] = [] + unique_resources[resource_name].append((idx, cmd, conflict)) + + console.print(f" [yellow]●[/yellow] Found [bold]{len(unique_resources)}[/bold] unique conflict(s)") + + for resource_name, resource_conflicts in unique_resources.items(): + # Only ask once per unique resource + first_idx, first_cmd, first_conflict = resource_conflicts[0] + + # Handle the first conflict to get user's decision + decision = self._handle_resource_conflict(first_idx, first_cmd, first_conflict, commands_to_skip, cleanup_commands) + resource_decisions[resource_name] = decision + + # Apply the same decision to all other commands affecting this resource + if len(resource_conflicts) > 1: + for idx, cmd, conflict in resource_conflicts[1:]: + if first_idx in commands_to_skip: + commands_to_skip.add(idx) + + # Run cleanup commands for non-Docker conflicts + if cleanup_commands: + console.print("[dim] Running cleanup commands...[/dim]") + for cleanup_cmd in cleanup_commands: + self._execute_single_command(cleanup_cmd, needs_sudo=True) + console.print(f"[dim] ✓ {cleanup_cmd}[/dim]") + + # Filter out skipped commands + if commands_to_skip: + filtered_commands = [ + (cmd, purpose, protected) + for i, (cmd, purpose, protected) in enumerate(commands) + if i not in commands_to_skip + ] + # Update task tree to skip these tasks + for task in self._task_tree.root_tasks: + task_idx = next( + (i for i, (c, p, pr) in enumerate(commands) if c == task.command), + None + ) + if task_idx in commands_to_skip: + task.status = CommandStatus.SKIPPED + task.output = "Using existing resource" + commands = filtered_commands + else: + console.print(" [green]●[/green] No conflicts detected") + + console.print() + + all_protected = set() + for _, _, protected in commands: + all_protected.update(protected) + + if all_protected: + console.print(f"[dim]📁 Protected paths: {', '.join(all_protected)}[/dim]") + console.print() + + try: + # Phase 2: Execute Commands - Claude-like header + console.print() + console.print("[bold blue]━━━[/bold blue] [bold]Executing Commands[/bold]") + console.print() + + # Track remaining commands for resume functionality + executed_tasks = set() + for i, root_task in enumerate(self._task_tree.root_tasks): + if self._interrupted: + # Store remaining tasks for potential continuation + remaining_tasks = self._task_tree.root_tasks[i:] + self._remaining_commands = [ + (t.command, t.purpose, []) + for t in remaining_tasks + if t.status not in (CommandStatus.SUCCESS, CommandStatus.SKIPPED) + ] + break + self._execute_task_node(root_task, run, commands) + executed_tasks.add(root_task.id) + + if not self._interrupted: + # Phase 3: Verification Tests - Claude-like header + console.print() + console.print("[bold blue]━━━[/bold blue] [bold]Verification[/bold]") + + all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) + + # Phase 4: Auto-repair if tests failed + if not all_tests_passed: + console.print() + console.print("[bold blue]━━━[/bold blue] [bold]Auto-Repair[/bold]") + + repair_success = self._handle_test_failures(test_results, run) + + if repair_success: + console.print() + console.print("[dim] Re-running verification tests...[/dim]") + all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) + else: + all_tests_passed = False + test_results = [] + + run.completed_at = datetime.datetime.now().isoformat() + + if self._interrupted: + run.summary = f"INTERRUPTED after {len(self._executed_commands)} command(s)" + else: + run.summary = self._generate_tree_summary(run) + if test_results: + passed = sum(1 for t in test_results if t["passed"]) + run.summary += f" | Tests: {passed}/{len(test_results)} passed" + + self.db.save_run(run) + + console.print() + console.print("[bold]Task Execution Tree:[/bold]") + self._task_tree.print_tree() + + # Generate LLM summary/answer if available + llm_answer = None + if not self._interrupted: + llm_answer = self._generate_llm_answer(run, user_query) + + # Print condensed execution summary with answer + self._print_execution_summary(run, answer=llm_answer) + + console.print() + if self._interrupted: + console.print(f"[dim]Run ID: {run.run_id} (interrupted)[/dim]") + elif all_tests_passed: + console.print(f"[dim]Run ID: {run.run_id}[/dim]") + + if self._permission_requests_count > 1: + console.print(f"[dim]Permission requests made: {self._permission_requests_count}[/dim]") + + # Reset interrupted flag before interactive session + # This allows the user to continue the session even after stopping a command + was_interrupted = self._interrupted + self._interrupted = False + + # Always go to interactive session - even after interruption + # User can decide what to do next (retry, skip, exit) + self._interactive_session(run, commands, user_query, was_interrupted=was_interrupted) + + return run + + finally: + # Always restore signal handlers + self._restore_signal_handlers() + + def _interactive_session( + self, + run: DoRun, + commands: list[tuple[str, str, list[str]]], + user_query: str, + was_interrupted: bool = False, + ) -> None: + """Interactive session after task completion - suggest next steps. + + If was_interrupted is True, the previous command execution was stopped + by Ctrl+Z/Ctrl+C. We still continue the session so the user can decide + what to do next (retry, skip remaining, run different command, etc). + """ + import sys + from rich.prompt import Prompt + + # Flush any pending output to ensure clean display + sys.stdout.flush() + sys.stderr.flush() + + # Generate context-aware suggestions based on what was done + suggestions = self._generate_suggestions(run, commands, user_query) + + # If interrupted, add special suggestions at the beginning + if was_interrupted: + interrupted_suggestions = [ + { + "label": "🔄 Retry interrupted command", + "description": "Try running the interrupted command again", + "type": "retry_interrupted", + }, + { + "label": "⏭️ Skip and continue", + "description": "Skip the interrupted command and continue with remaining tasks", + "type": "skip_and_continue", + }, + ] + suggestions = interrupted_suggestions + suggestions + + # Track context for natural language processing + context = { + "original_query": user_query, + "executed_commands": [cmd for cmd, _, _ in commands], + "session_actions": [], + "was_interrupted": was_interrupted, + } + + console.print() + if was_interrupted: + console.print("[bold yellow]━━━[/bold yellow] [bold]Execution Interrupted - What would you like to do?[/bold]") + else: + console.print("[bold blue]━━━[/bold blue] [bold]Next Steps[/bold]") + console.print() + + # Display suggestions + self._display_suggestions(suggestions) + + console.print() + console.print("[dim]You can type any request in natural language[/dim]") + console.print() + + # Ensure prompt is visible + sys.stdout.flush() + + while True: + try: + response = Prompt.ask( + "[bold cyan]>[/bold cyan]", + default="exit" + ) + + response_stripped = response.strip() + response_lower = response_stripped.lower() + + # Check for exit keywords + if response_lower in ["exit", "quit", "done", "no", "n", "bye", "thanks", "nothing", ""]: + console.print("[dim]👋 Session ended. Run 'cortex do history' to see past runs.[/dim]") + break + + # Try to parse as number (for suggestion selection) + try: + choice = int(response_stripped) + if suggestions and 1 <= choice <= len(suggestions): + suggestion = suggestions[choice - 1] + self._execute_suggestion(suggestion, run, user_query) + context["session_actions"].append(suggestion.get("label", "")) + + # Update last query to the suggestion for context-aware follow-ups + suggestion_label = suggestion.get("label", "") + context["last_query"] = suggestion_label + + # Continue the session with suggestions based on what was just done + console.print() + suggestions = self._generate_suggestions_for_query(suggestion_label, context) + self._display_suggestions(suggestions) + console.print() + continue + elif suggestions and choice == len(suggestions) + 1: + console.print("[dim]👋 Session ended.[/dim]") + break + except ValueError: + pass + + # Handle natural language request + handled = self._handle_natural_language_request( + response_stripped, + suggestions, + context, + run, + commands + ) + + if handled: + context["session_actions"].append(response_stripped) + # Update context with the new query for better suggestions + context["last_query"] = response_stripped + + # Refresh suggestions based on NEW query (not combined) + # This ensures suggestions are relevant to what user just asked + console.print() + suggestions = self._generate_suggestions_for_query(response_stripped, context) + self._display_suggestions(suggestions) + console.print() + + except (EOFError, KeyboardInterrupt): + console.print("\n[dim]👋 Session ended.[/dim]") + break + + # Cleanup: ensure any terminal monitors are stopped + if self._terminal_monitor: + self._terminal_monitor.stop() + self._terminal_monitor = None + + def _generate_suggestions_for_query(self, query: str, context: dict) -> list[dict]: + """Generate suggestions based on the current query and context. + + This generates follow-up suggestions relevant to what the user just asked/did, + not tied to the original task. + """ + suggestions = [] + query_lower = query.lower() + + # User management related queries + if any(w in query_lower for w in ["user", "locked", "password", "account", "login"]): + suggestions.append({ + "type": "info", + "icon": "👥", + "label": "List all users", + "description": "Show all system users", + "command": "cat /etc/passwd | cut -d: -f1", + "purpose": "List all users", + }) + suggestions.append({ + "type": "info", + "icon": "🔐", + "label": "Check sudo users", + "description": "Show users with sudo access", + "command": "getent group sudo", + "purpose": "List sudo group members", + }) + suggestions.append({ + "type": "action", + "icon": "🔓", + "label": "Unlock a user", + "description": "Unlock a locked user account", + "demo_type": "unlock_user", + }) + + # Service/process related queries + elif any(w in query_lower for w in ["service", "systemctl", "running", "process", "status"]): + suggestions.append({ + "type": "info", + "icon": "📊", + "label": "List running services", + "description": "Show all active services", + "command": "systemctl list-units --type=service --state=running", + "purpose": "List running services", + }) + suggestions.append({ + "type": "info", + "icon": "🔍", + "label": "Check failed services", + "description": "Show services that failed to start", + "command": "systemctl list-units --type=service --state=failed", + "purpose": "List failed services", + }) + + # Disk/storage related queries + elif any(w in query_lower for w in ["disk", "storage", "space", "mount", "partition"]): + suggestions.append({ + "type": "info", + "icon": "💾", + "label": "Check disk usage", + "description": "Show disk space by partition", + "command": "df -h", + "purpose": "Check disk usage", + }) + suggestions.append({ + "type": "info", + "icon": "📁", + "label": "Find large files", + "description": "Show largest files on disk", + "command": "sudo du -ah / 2>/dev/null | sort -rh | head -20", + "purpose": "Find large files", + }) + + # Network related queries + elif any(w in query_lower for w in ["network", "ip", "port", "connection", "firewall"]): + suggestions.append({ + "type": "info", + "icon": "🌐", + "label": "Show network interfaces", + "description": "Display IP addresses and interfaces", + "command": "ip addr show", + "purpose": "Show network interfaces", + }) + suggestions.append({ + "type": "info", + "icon": "🔌", + "label": "List open ports", + "description": "Show listening ports", + "command": "sudo ss -tlnp", + "purpose": "List open ports", + }) + + # Security related queries + elif any(w in query_lower for w in ["security", "audit", "log", "auth", "fail"]): + suggestions.append({ + "type": "info", + "icon": "🔒", + "label": "Check auth logs", + "description": "Show recent authentication attempts", + "command": "sudo tail -50 /var/log/auth.log", + "purpose": "Check auth logs", + }) + suggestions.append({ + "type": "info", + "icon": "⚠️", + "label": "Check failed logins", + "description": "Show failed login attempts", + "command": "sudo lastb | head -20", + "purpose": "Check failed logins", + }) + + # Package/installation related queries + elif any(w in query_lower for w in ["install", "package", "apt", "update"]): + suggestions.append({ + "type": "action", + "icon": "📦", + "label": "Update system", + "description": "Update package lists and upgrade", + "command": "sudo apt update && sudo apt upgrade -y", + "purpose": "Update system packages", + }) + suggestions.append({ + "type": "info", + "icon": "📋", + "label": "List installed packages", + "description": "Show recently installed packages", + "command": "apt list --installed 2>/dev/null | tail -20", + "purpose": "List installed packages", + }) + + # Default: generic helpful suggestions + if not suggestions: + suggestions.append({ + "type": "info", + "icon": "📊", + "label": "System overview", + "description": "Show system info and resource usage", + "command": "uname -a && uptime && free -h", + "purpose": "System overview", + }) + suggestions.append({ + "type": "info", + "icon": "🔍", + "label": "Check system logs", + "description": "View recent system messages", + "command": "sudo journalctl -n 20 --no-pager", + "purpose": "Check system logs", + }) + + return suggestions + + def _display_suggestions(self, suggestions: list[dict]) -> None: + """Display numbered suggestions.""" + if not suggestions: + console.print("[dim]No specific suggestions available.[/dim]") + return + + for i, suggestion in enumerate(suggestions, 1): + icon = suggestion.get("icon", "💡") + label = suggestion.get("label", "") + desc = suggestion.get("description", "") + console.print(f" [cyan]{i}.[/cyan] {icon} {label}") + if desc: + console.print(f" [dim]{desc}[/dim]") + + console.print(f" [cyan]{len(suggestions) + 1}.[/cyan] 🚪 Exit session") + + def _handle_natural_language_request( + self, + request: str, + suggestions: list[dict], + context: dict, + run: DoRun, + commands: list[tuple[str, str, list[str]]], + ) -> bool: + """Handle a natural language request from the user. + + Uses LLM if available for full understanding, falls back to pattern matching. + Returns True if the request was handled, False otherwise. + """ + request_lower = request.lower() + + # Quick keyword matching for common actions (fast path) + keyword_handlers = [ + (["start", "run", "begin", "launch", "execute"], "start"), + (["setup", "configure", "config", "set up"], "setup"), + (["demo", "example", "sample", "code"], "demo"), + (["test", "verify", "check", "validate"], "test"), + ] + + # Check if request is a simple match to existing suggestions + for keywords, action_type in keyword_handlers: + if any(kw in request_lower for kw in keywords): + # Only use quick match if it's a very simple request + if len(request.split()) <= 4: + for suggestion in suggestions: + if suggestion.get("type") == action_type: + self._execute_suggestion(suggestion, run, context["original_query"]) + return True + + # Use LLM for full understanding if available + console.print() + console.print(f"[cyan]🤔 Understanding your request...[/cyan]") + + if self.llm_callback: + return self._handle_request_with_llm(request, context, run, commands) + else: + # Fall back to pattern matching + return self._handle_request_with_patterns(request, context, run) + + def _handle_request_with_llm( + self, + request: str, + context: dict, + run: DoRun, + commands: list[tuple[str, str, list[str]]], + ) -> bool: + """Handle request using LLM for full understanding.""" + try: + # Call LLM to understand the request + llm_response = self.llm_callback(request, context) + + if not llm_response or llm_response.get("response_type") == "error": + console.print(f"[yellow]⚠ Could not process request: {llm_response.get('error', 'Unknown error')}[/yellow]") + return False + + response_type = llm_response.get("response_type") + + # HARD CHECK: Filter out any raw JSON from reasoning field + reasoning = llm_response.get("reasoning", "") + if reasoning: + # Remove any JSON-like content from reasoning + import re + # If reasoning looks like JSON or contains JSON patterns, clean it + if (reasoning.strip().startswith(('{', '[', ']', '"response_type"')) or + re.search(r'"do_commands"\s*:', reasoning) or + re.search(r'"command"\s*:', reasoning) or + re.search(r'"requires_sudo"\s*:', reasoning)): + # Extract just the text explanation if possible + text_match = re.search(r'"reasoning"\s*:\s*"([^"]+)"', reasoning) + if text_match: + reasoning = text_match.group(1) + else: + reasoning = "Processing your request..." + llm_response["reasoning"] = reasoning + + # Handle do_commands - execute with confirmation + if response_type == "do_commands" and llm_response.get("do_commands"): + do_commands = llm_response["do_commands"] + reasoning = llm_response.get("reasoning", "") + + # Final safety check: don't print JSON-looking reasoning + if reasoning and not self._is_json_like(reasoning): + console.print() + console.print(f"[cyan]🤖 {reasoning}[/cyan]") + console.print() + + # Show commands and ask for confirmation + console.print("[bold]📋 Commands to execute:[/bold]") + for i, cmd_info in enumerate(do_commands, 1): + cmd = cmd_info.get("command", "") + purpose = cmd_info.get("purpose", "") + sudo = "🔐 " if cmd_info.get("requires_sudo") else "" + console.print(f" {i}. {sudo}[green]{cmd}[/green]") + if purpose: + console.print(f" [dim]{purpose}[/dim]") + console.print() + + if not Confirm.ask("Execute these commands?", default=True): + console.print("[dim]Skipped.[/dim]") + return False + + # Execute the commands + console.print() + from rich.panel import Panel + + executed_in_session = [] + for idx, cmd_info in enumerate(do_commands, 1): + cmd = cmd_info.get("command", "") + purpose = cmd_info.get("purpose", "Execute command") + needs_sudo = cmd_info.get("requires_sudo", False) or self._needs_sudo(cmd, []) + + # Create visual grouping for each command + console.print() + console.print(Panel( + f"[bold cyan]{cmd}[/bold cyan]\n[dim]└─ {purpose}[/dim]", + title=f"[bold] Command {idx}/{len(do_commands)} [/bold]", + title_align="left", + border_style="blue", + padding=(0, 1), + )) + + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) + + if success: + console.print(Panel( + f"[bold green]✓ Success[/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + )) + if stdout: + output_preview = stdout[:300] + ('...' if len(stdout) > 300 else '') + console.print(f"[dim]{output_preview}[/dim]") + executed_in_session.append(cmd) + else: + console.print(Panel( + f"[bold red]✗ Failed[/bold red]\n[dim]{stderr[:150]}[/dim]", + border_style="red", + padding=(0, 1), + )) + + # Offer to diagnose and fix + if Confirm.ask("Try to auto-fix?", default=True): + diagnosis = self._diagnoser.diagnose_error(cmd, stderr) + fixed, msg, _ = self._auto_fixer.auto_fix_error(cmd, stderr, diagnosis) + if fixed: + console.print(Panel( + f"[bold green]✓ Fixed:[/bold green] {msg}", + border_style="green", + padding=(0, 1), + expand=False, + )) + executed_in_session.append(cmd) + + # Track executed commands in context for suggestion generation + if "executed_commands" not in context: + context["executed_commands"] = [] + context["executed_commands"].extend(executed_in_session) + + return True + + # Handle single command - execute directly + elif response_type == "command" and llm_response.get("command"): + cmd = llm_response["command"] + reasoning = llm_response.get("reasoning", "") + + console.print() + console.print(f"[cyan]📋 Running:[/cyan] [green]{cmd}[/green]") + if reasoning: + console.print(f" [dim]{reasoning}[/dim]") + + needs_sudo = self._needs_sudo(cmd, []) + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) + + if success: + console.print(f"[green]✓ Success[/green]") + if stdout: + console.print(f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]") + else: + console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") + + return True + + # Handle answer - just display it (filter raw JSON) + elif response_type == "answer" and llm_response.get("answer"): + answer = llm_response["answer"] + # Don't print raw JSON or internal processing messages + if not (self._is_json_like(answer) or + "I'm processing your request" in answer or + "I have a plan to execute" in answer): + console.print() + console.print(answer) + return True + + else: + console.print(f"[yellow]I didn't understand that. Could you rephrase?[/yellow]") + return False + + except Exception as e: + console.print(f"[yellow]⚠ Error processing request: {e}[/yellow]") + # Fall back to pattern matching + return self._handle_request_with_patterns(request, context, run) + + def _handle_request_with_patterns( + self, + request: str, + context: dict, + run: DoRun, + ) -> bool: + """Handle request using pattern matching (fallback when LLM not available).""" + # Try to generate a command from the natural language request + generated = self._generate_command_from_request(request, context) + + if generated: + cmd = generated.get("command") + purpose = generated.get("purpose", "Execute user request") + needs_confirm = generated.get("needs_confirmation", True) + + console.print() + console.print(f"[cyan]📋 I'll run this command:[/cyan]") + console.print(f" [green]{cmd}[/green]") + console.print(f" [dim]{purpose}[/dim]") + console.print() + + if needs_confirm: + if not Confirm.ask("Proceed?", default=True): + console.print("[dim]Skipped.[/dim]") + return False + + # Execute the command + needs_sudo = self._needs_sudo(cmd, []) + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) + + if success: + console.print(f"[green]✓ Success[/green]") + if stdout: + output_preview = stdout[:500] + ('...' if len(stdout) > 500 else '') + console.print(f"[dim]{output_preview}[/dim]") + else: + console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") + + # Offer to diagnose the error + if Confirm.ask("Would you like me to try to fix this?", default=True): + diagnosis = self._diagnoser.diagnose_error(cmd, stderr) + fixed, msg, _ = self._auto_fixer.auto_fix_error(cmd, stderr, diagnosis) + if fixed: + console.print(f"[green]✓ Fixed: {msg}[/green]") + + return True + + # Couldn't understand the request + console.print(f"[yellow]I'm not sure how to do that. Could you be more specific?[/yellow]") + console.print(f"[dim]Try something like: 'run the container', 'show me the config', or select a number.[/dim]") + return False + + def _generate_command_from_request( + self, + request: str, + context: dict, + ) -> dict | None: + """Generate a command from a natural language request.""" + request_lower = request.lower() + executed_cmds = context.get("executed_commands", []) + cmd_context = " ".join(executed_cmds).lower() + + # Pattern matching for common requests + patterns = [ + # Docker patterns + (r"run.*(?:container|image|docker)(?:.*port\s*(\d+))?", self._gen_docker_run), + (r"stop.*(?:container|docker)", self._gen_docker_stop), + (r"remove.*(?:container|docker)", self._gen_docker_remove), + (r"(?:show|list).*(?:containers?|images?)", self._gen_docker_list), + (r"logs?(?:\s+of)?(?:\s+the)?(?:\s+container)?", self._gen_docker_logs), + (r"exec.*(?:container|docker)|shell.*(?:container|docker)", self._gen_docker_exec), + + # Service patterns + (r"(?:start|restart).*(?:service|nginx|apache|postgres|mysql|redis)", self._gen_service_start), + (r"stop.*(?:service|nginx|apache|postgres|mysql|redis)", self._gen_service_stop), + (r"status.*(?:service|nginx|apache|postgres|mysql|redis)", self._gen_service_status), + + # Package patterns + (r"install\s+(.+)", self._gen_install_package), + (r"update\s+(?:packages?|system)", self._gen_update_packages), + + # File patterns + (r"(?:show|cat|view|read).*(?:config|file|log)(?:.*?([/\w\.\-]+))?", self._gen_show_file), + (r"edit.*(?:config|file)(?:.*?([/\w\.\-]+))?", self._gen_edit_file), + + # Info patterns + (r"(?:check|show|what).*(?:version|status)", self._gen_check_version), + (r"(?:how|where).*(?:connect|access|use)", self._gen_show_connection_info), + ] + + import re + for pattern, handler in patterns: + match = re.search(pattern, request_lower) + if match: + return handler(request, match, context) + + # Use LLM if available to generate command + if self.llm_callback: + return self._llm_generate_command(request, context) + + return None + + # Command generators + def _gen_docker_run(self, request: str, match, context: dict) -> dict: + # Find the image from context + executed = context.get("executed_commands", []) + image = "your-image" + for cmd in executed: + if "docker pull" in cmd: + image = cmd.split("docker pull")[-1].strip() + break + + # Check for port in request + port = match.group(1) if match.lastindex and match.group(1) else "8080" + container_name = image.split("/")[-1].split(":")[0] + + return { + "command": f"docker run -d --name {container_name} -p {port}:{port} {image}", + "purpose": f"Run {image} container on port {port}", + "needs_confirmation": True, + } + + def _gen_docker_stop(self, request: str, match, context: dict) -> dict: + return { + "command": "docker ps -q | xargs -r docker stop", + "purpose": "Stop all running containers", + "needs_confirmation": True, + } + + def _gen_docker_remove(self, request: str, match, context: dict) -> dict: + return { + "command": "docker ps -aq | xargs -r docker rm", + "purpose": "Remove all containers", + "needs_confirmation": True, + } + + def _gen_docker_list(self, request: str, match, context: dict) -> dict: + if "image" in request.lower(): + return {"command": "docker images", "purpose": "List Docker images", "needs_confirmation": False} + return {"command": "docker ps -a", "purpose": "List all containers", "needs_confirmation": False} + + def _gen_docker_logs(self, request: str, match, context: dict) -> dict: + return { + "command": "docker logs $(docker ps -lq) --tail 50", + "purpose": "Show logs of the most recent container", + "needs_confirmation": False, + } + + def _gen_docker_exec(self, request: str, match, context: dict) -> dict: + return { + "command": "docker exec -it $(docker ps -lq) /bin/sh", + "purpose": "Open shell in the most recent container", + "needs_confirmation": True, + } + + def _gen_service_start(self, request: str, match, context: dict) -> dict: + # Extract service name + services = ["nginx", "apache2", "postgresql", "mysql", "redis", "docker"] + service = "nginx" # default + for svc in services: + if svc in request.lower(): + service = svc + break + + if "restart" in request.lower(): + return {"command": f"sudo systemctl restart {service}", "purpose": f"Restart {service}", "needs_confirmation": True} + return {"command": f"sudo systemctl start {service}", "purpose": f"Start {service}", "needs_confirmation": True} + + def _gen_service_stop(self, request: str, match, context: dict) -> dict: + services = ["nginx", "apache2", "postgresql", "mysql", "redis", "docker"] + service = "nginx" + for svc in services: + if svc in request.lower(): + service = svc + break + return {"command": f"sudo systemctl stop {service}", "purpose": f"Stop {service}", "needs_confirmation": True} + + def _gen_service_status(self, request: str, match, context: dict) -> dict: + services = ["nginx", "apache2", "postgresql", "mysql", "redis", "docker"] + service = "nginx" + for svc in services: + if svc in request.lower(): + service = svc + break + return {"command": f"systemctl status {service}", "purpose": f"Check {service} status", "needs_confirmation": False} + + def _gen_install_package(self, request: str, match, context: dict) -> dict: + package = match.group(1).strip() if match.group(1) else "package-name" + # Clean up common words + package = package.replace("please", "").replace("the", "").replace("package", "").strip() + return { + "command": f"sudo apt install -y {package}", + "purpose": f"Install {package}", + "needs_confirmation": True, + } + + def _gen_update_packages(self, request: str, match, context: dict) -> dict: + return { + "command": "sudo apt update && sudo apt upgrade -y", + "purpose": "Update all packages", + "needs_confirmation": True, + } + + def _gen_show_file(self, request: str, match, context: dict) -> dict: + # Try to extract file path or use common config locations + file_path = match.group(1) if match.lastindex and match.group(1) else None + + if not file_path: + if "nginx" in request.lower(): + file_path = "/etc/nginx/nginx.conf" + elif "apache" in request.lower(): + file_path = "/etc/apache2/apache2.conf" + elif "postgres" in request.lower(): + file_path = "/etc/postgresql/*/main/postgresql.conf" + else: + file_path = "/etc/hosts" + + return {"command": f"cat {file_path}", "purpose": f"Show {file_path}", "needs_confirmation": False} + + def _gen_edit_file(self, request: str, match, context: dict) -> dict: + file_path = match.group(1) if match.lastindex and match.group(1) else "/etc/hosts" + return { + "command": f"sudo nano {file_path}", + "purpose": f"Edit {file_path}", + "needs_confirmation": True, + } + + def _gen_check_version(self, request: str, match, context: dict) -> dict: + # Try to determine what to check version of + tools = { + "docker": "docker --version", + "node": "node --version && npm --version", + "python": "python3 --version && pip3 --version", + "nginx": "nginx -v", + "postgres": "psql --version", + } + + for tool, cmd in tools.items(): + if tool in request.lower(): + return {"command": cmd, "purpose": f"Check {tool} version", "needs_confirmation": False} + + # Default: show multiple versions + return { + "command": "docker --version; node --version 2>/dev/null; python3 --version", + "purpose": "Check installed tool versions", + "needs_confirmation": False, + } + + def _gen_show_connection_info(self, request: str, match, context: dict) -> dict: + executed = context.get("executed_commands", []) + + # Check what was installed to provide relevant connection info + if any("ollama" in cmd for cmd in executed): + return { + "command": "echo 'Ollama API: http://localhost:11434' && curl -s http://localhost:11434/api/tags 2>/dev/null | head -5", + "purpose": "Show Ollama connection info", + "needs_confirmation": False, + } + elif any("postgres" in cmd for cmd in executed): + return { + "command": "echo 'PostgreSQL: psql -U postgres -h localhost' && sudo -u postgres psql -c '\\conninfo'", + "purpose": "Show PostgreSQL connection info", + "needs_confirmation": False, + } + elif any("nginx" in cmd for cmd in executed): + return { + "command": "echo 'Nginx: http://localhost:80' && curl -I http://localhost 2>/dev/null | head -3", + "purpose": "Show Nginx connection info", + "needs_confirmation": False, + } + + return { + "command": "ss -tlnp | head -20", + "purpose": "Show listening ports and services", + "needs_confirmation": False, + } + + def _llm_generate_command(self, request: str, context: dict) -> dict | None: + """Use LLM to generate a command from the request.""" + if not self.llm_callback: + return None + + try: + prompt = f"""Given this context: +- User originally asked: {context.get('original_query', 'N/A')} +- Commands executed: {', '.join(context.get('executed_commands', [])[:5])} +- Previous session actions: {', '.join(context.get('session_actions', [])[:3])} + +The user now asks: "{request}" + +Generate a single Linux command to fulfill this request. +Respond with JSON: {{"command": "...", "purpose": "..."}} +If you cannot generate a safe command, respond with: {{"error": "reason"}}""" + + result = self.llm_callback(prompt) + if result and isinstance(result, dict): + if "command" in result: + return { + "command": result["command"], + "purpose": result.get("purpose", "Execute user request"), + "needs_confirmation": True, + } + except Exception: + pass + + return None + + def _generate_suggestions( + self, + run: DoRun, + commands: list[tuple[str, str, list[str]]], + user_query: str, + ) -> list[dict]: + """Generate context-aware suggestions based on what was installed/configured.""" + suggestions = [] + + # Analyze what was done + executed_cmds = [cmd for cmd, _, _ in commands] + cmd_str = " ".join(executed_cmds).lower() + query_lower = user_query.lower() + + # Docker-related suggestions + if "docker" in cmd_str or "docker" in query_lower: + if "pull" in cmd_str: + # Suggest running the container + for cmd, _, _ in commands: + if "docker pull" in cmd: + image = cmd.split("docker pull")[-1].strip() + suggestions.append({ + "type": "start", + "icon": "🚀", + "label": f"Start the container", + "description": f"Run {image} in a container", + "command": f"docker run -d --name {image.split('/')[-1].split(':')[0]} {image}", + "purpose": f"Start {image} container", + }) + suggestions.append({ + "type": "demo", + "icon": "📝", + "label": "Show demo usage", + "description": f"Example docker-compose and run commands", + "demo_type": "docker", + "image": image, + }) + break + + # Ollama/Model runner suggestions + if "ollama" in cmd_str or "ollama" in query_lower or "model" in query_lower: + suggestions.append({ + "type": "start", + "icon": "🚀", + "label": "Start Ollama server", + "description": "Run Ollama in the background", + "command": "docker run -d --name ollama -p 11434:11434 -v ollama:/root/.ollama ollama/ollama", + "purpose": "Start Ollama server container", + }) + suggestions.append({ + "type": "setup", + "icon": "⚙️", + "label": "Pull a model", + "description": "Download a model like llama2, mistral, or codellama", + "command": "docker exec ollama ollama pull llama2", + "purpose": "Download llama2 model", + }) + suggestions.append({ + "type": "demo", + "icon": "📝", + "label": "Show API demo", + "description": "Example curl commands and Python code", + "demo_type": "ollama", + }) + suggestions.append({ + "type": "test", + "icon": "🧪", + "label": "Test the installation", + "description": "Verify Ollama is running correctly", + "command": "curl http://localhost:11434/api/tags", + "purpose": "Check Ollama API", + }) + + # Nginx suggestions + if "nginx" in cmd_str or "nginx" in query_lower: + suggestions.append({ + "type": "start", + "icon": "🚀", + "label": "Start Nginx", + "description": "Start the Nginx web server", + "command": "sudo systemctl start nginx", + "purpose": "Start Nginx service", + }) + suggestions.append({ + "type": "setup", + "icon": "⚙️", + "label": "Configure a site", + "description": "Set up a new virtual host", + "demo_type": "nginx_config", + }) + suggestions.append({ + "type": "test", + "icon": "🧪", + "label": "Test configuration", + "description": "Verify Nginx config is valid", + "command": "sudo nginx -t", + "purpose": "Test Nginx configuration", + }) + + # PostgreSQL suggestions + if "postgres" in cmd_str or "postgresql" in query_lower: + suggestions.append({ + "type": "start", + "icon": "🚀", + "label": "Start PostgreSQL", + "description": "Start the database server", + "command": "sudo systemctl start postgresql", + "purpose": "Start PostgreSQL service", + }) + suggestions.append({ + "type": "setup", + "icon": "⚙️", + "label": "Create a database", + "description": "Create a new database and user", + "demo_type": "postgres_setup", + }) + suggestions.append({ + "type": "test", + "icon": "🧪", + "label": "Test connection", + "description": "Verify PostgreSQL is accessible", + "command": "sudo -u postgres psql -c '\\l'", + "purpose": "List PostgreSQL databases", + }) + + # Node.js/npm suggestions + if "node" in cmd_str or "npm" in cmd_str or "nodejs" in query_lower: + suggestions.append({ + "type": "demo", + "icon": "📝", + "label": "Show starter code", + "description": "Example Express.js server", + "demo_type": "nodejs", + }) + suggestions.append({ + "type": "test", + "icon": "🧪", + "label": "Verify installation", + "description": "Check Node.js and npm versions", + "command": "node --version && npm --version", + "purpose": "Check Node.js installation", + }) + + # Python/pip suggestions + if "python" in cmd_str or "pip" in cmd_str: + suggestions.append({ + "type": "demo", + "icon": "📝", + "label": "Show example code", + "description": "Example Python usage", + "demo_type": "python", + }) + suggestions.append({ + "type": "test", + "icon": "🧪", + "label": "Test import", + "description": "Verify packages are importable", + "demo_type": "python_test", + }) + + # Generic suggestions if nothing specific matched + if not suggestions: + # Add a generic test suggestion + suggestions.append({ + "type": "test", + "icon": "🧪", + "label": "Run a quick test", + "description": "Verify the installation works", + "demo_type": "generic_test", + }) + + return suggestions[:5] # Limit to 5 suggestions + + def _execute_suggestion( + self, + suggestion: dict, + run: DoRun, + user_query: str, + ) -> None: + """Execute a suggestion.""" + suggestion_type = suggestion.get("type") + + if suggestion_type == "retry_interrupted": + # Retry the command that was interrupted + if self._interrupted_command: + console.print() + console.print(f"[cyan]🔄 Retrying:[/cyan] {self._interrupted_command}") + console.print() + + needs_sudo = "sudo" in self._interrupted_command or self._needs_sudo(self._interrupted_command, []) + success, stdout, stderr = self._execute_single_command( + self._interrupted_command, + needs_sudo=needs_sudo + ) + + if success: + console.print(f"[green]✓ Success[/green]") + if stdout: + console.print(f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]") + self._interrupted_command = None # Clear after successful retry + else: + console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") + else: + console.print("[yellow]No interrupted command to retry.[/yellow]") + elif suggestion_type == "skip_and_continue": + # Skip the interrupted command and continue with remaining + console.print() + console.print("[cyan]⏭️ Skipping interrupted command and continuing...[/cyan]") + self._interrupted_command = None + + if self._remaining_commands: + console.print(f"[dim]Remaining commands: {len(self._remaining_commands)}[/dim]") + for cmd, purpose, protected in self._remaining_commands: + console.print(f"[dim] • {cmd[:60]}{'...' if len(cmd) > 60 else ''}[/dim]") + console.print() + console.print("[dim]Use 'continue all' to execute remaining commands, or type a new request.[/dim]") + else: + console.print("[dim]No remaining commands to execute.[/dim]") + elif suggestion_type == "demo": + self._show_demo(suggestion.get("demo_type", "generic"), suggestion) + elif suggestion_type == "test": + # Show test commands based on what was installed + self._show_test_commands(run, user_query) + elif "command" in suggestion: + console.print() + console.print(f"[cyan]Executing:[/cyan] {suggestion['command']}") + console.print() + + needs_sudo = "sudo" in suggestion["command"] + success, stdout, stderr = self._execute_single_command( + suggestion["command"], + needs_sudo=needs_sudo + ) + + if success: + console.print(f"[green]✓ Success[/green]") + if stdout: + console.print(f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]") + else: + console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") + elif "manual_commands" in suggestion: + # Show manual commands + console.print() + console.print("[bold cyan]📋 Manual Commands:[/bold cyan]") + for cmd in suggestion["manual_commands"]: + console.print(f" [green]$ {cmd}[/green]") + console.print() + console.print("[dim]Copy and run these commands in your terminal.[/dim]") + else: + console.print("[yellow]No specific action available for this suggestion.[/yellow]") + + def _show_test_commands(self, run: DoRun, user_query: str) -> None: + """Show test commands based on what was installed/configured.""" + from rich.panel import Panel + + console.print() + console.print("[bold cyan]🧪 Quick Test Commands[/bold cyan]") + console.print() + + test_commands = [] + query_lower = user_query.lower() + + # Detect what was installed and suggest appropriate tests + executed_cmds = [c.command.lower() for c in run.commands if c.status.value == "success"] + all_cmds_str = " ".join(executed_cmds) + + # Web server tests + if "apache" in all_cmds_str or "apache2" in query_lower: + test_commands.extend([ + ("Check Apache status", "systemctl status apache2"), + ("Test Apache config", "sudo apache2ctl -t"), + ("View in browser", "curl -I http://localhost"), + ]) + + if "nginx" in all_cmds_str or "nginx" in query_lower: + test_commands.extend([ + ("Check Nginx status", "systemctl status nginx"), + ("Test Nginx config", "sudo nginx -t"), + ("View in browser", "curl -I http://localhost"), + ]) + + # Database tests + if "mysql" in all_cmds_str or "mysql" in query_lower: + test_commands.extend([ + ("Check MySQL status", "systemctl status mysql"), + ("Test MySQL connection", "sudo mysql -e 'SELECT VERSION();'"), + ]) + + if "postgresql" in all_cmds_str or "postgres" in query_lower: + test_commands.extend([ + ("Check PostgreSQL status", "systemctl status postgresql"), + ("Test PostgreSQL", "sudo -u postgres psql -c 'SELECT version();'"), + ]) + + # Docker tests + if "docker" in all_cmds_str or "docker" in query_lower: + test_commands.extend([ + ("Check Docker status", "systemctl status docker"), + ("List containers", "docker ps -a"), + ("Test Docker", "docker run hello-world"), + ]) + + # PHP tests + if "php" in all_cmds_str or "php" in query_lower or "lamp" in query_lower: + test_commands.extend([ + ("Check PHP version", "php -v"), + ("Test PHP info", "php -i | head -20"), + ]) + + # Node.js tests + if "node" in all_cmds_str or "nodejs" in query_lower: + test_commands.extend([ + ("Check Node version", "node -v"), + ("Check npm version", "npm -v"), + ]) + + # Python tests + if "python" in all_cmds_str or "python" in query_lower: + test_commands.extend([ + ("Check Python version", "python3 --version"), + ("Check pip version", "pip3 --version"), + ]) + + # Generic service tests + if not test_commands: + # Try to extract service names from commands + for cmd_log in run.commands: + if "systemctl" in cmd_log.command and cmd_log.status.value == "success": + import re + match = re.search(r'systemctl\s+(?:start|enable|restart)\s+(\S+)', cmd_log.command) + if match: + service = match.group(1) + test_commands.append((f"Check {service} status", f"systemctl status {service}")) + + if not test_commands: + test_commands = [ + ("Check system status", "systemctl --failed"), + ("View recent logs", "journalctl -n 20 --no-pager"), + ] + + # Display test commands + for i, (desc, cmd) in enumerate(test_commands[:6], 1): # Limit to 6 + console.print(f" [bold]{i}.[/bold] {desc}") + console.print(f" [green]$ {cmd}[/green]") + console.print() + + console.print("[dim]Copy and run these commands to verify your installation.[/dim]") + console.print() + + # Offer to run the first test + try: + response = input("[dim]Run first test? [y/N]: [/dim]").strip().lower() + if response in ['y', 'yes']: + if test_commands: + desc, cmd = test_commands[0] + console.print() + console.print(f"[cyan]Running:[/cyan] {cmd}") + needs_sudo = cmd.strip().startswith("sudo") + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo=needs_sudo) + if success: + console.print(f"[green]✓ {desc} - Passed[/green]") + if stdout: + console.print(Panel(stdout[:500], title="[dim]Output[/dim]", border_style="dim")) + else: + console.print(f"[red]✗ {desc} - Failed[/red]") + if stderr: + console.print(f"[dim red]{stderr[:200]}[/dim red]") + except (EOFError, KeyboardInterrupt): + pass + + def _show_demo(self, demo_type: str, suggestion: dict) -> None: + """Show demo code/commands for a specific type.""" + console.print() + + if demo_type == "docker": + image = suggestion.get("image", "your-image") + console.print("[bold cyan]📝 Docker Usage Examples[/bold cyan]") + console.print() + console.print("[dim]# Run container in foreground:[/dim]") + console.print(f"[green]docker run -it {image}[/green]") + console.print() + console.print("[dim]# Run container in background:[/dim]") + console.print(f"[green]docker run -d --name myapp {image}[/green]") + console.print() + console.print("[dim]# Run with port mapping:[/dim]") + console.print(f"[green]docker run -d -p 8080:8080 {image}[/green]") + console.print() + console.print("[dim]# Run with volume mount:[/dim]") + console.print(f"[green]docker run -d -v /host/path:/container/path {image}[/green]") + + elif demo_type == "ollama": + console.print("[bold cyan]📝 Ollama API Examples[/bold cyan]") + console.print() + console.print("[dim]# List available models:[/dim]") + console.print("[green]curl http://localhost:11434/api/tags[/green]") + console.print() + console.print("[dim]# Generate text:[/dim]") + console.print('''[green]curl http://localhost:11434/api/generate -d '{ + "model": "llama2", + "prompt": "Hello, how are you?" +}'[/green]''') + console.print() + console.print("[dim]# Python example:[/dim]") + console.print('''[green]import requests + +response = requests.post('http://localhost:11434/api/generate', + json={ + 'model': 'llama2', + 'prompt': 'Explain quantum computing in simple terms', + 'stream': False + }) +print(response.json()['response'])[/green]''') + + elif demo_type == "nginx_config": + console.print("[bold cyan]📝 Nginx Configuration Example[/bold cyan]") + console.print() + console.print("[dim]# Create a new site config:[/dim]") + console.print("[green]sudo nano /etc/nginx/sites-available/mysite[/green]") + console.print() + console.print("[dim]# Example config:[/dim]") + console.print('''[green]server { + listen 80; + server_name example.com; + + location / { + proxy_pass http://localhost:3000; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection 'upgrade'; + proxy_set_header Host $host; + } +}[/green]''') + console.print() + console.print("[dim]# Enable the site:[/dim]") + console.print("[green]sudo ln -s /etc/nginx/sites-available/mysite /etc/nginx/sites-enabled/[/green]") + console.print("[green]sudo nginx -t && sudo systemctl reload nginx[/green]") + + elif demo_type == "postgres_setup": + console.print("[bold cyan]📝 PostgreSQL Setup Example[/bold cyan]") + console.print() + console.print("[dim]# Create a new user and database:[/dim]") + console.print("[green]sudo -u postgres createuser --interactive myuser[/green]") + console.print("[green]sudo -u postgres createdb mydb -O myuser[/green]") + console.print() + console.print("[dim]# Connect to the database:[/dim]") + console.print("[green]psql -U myuser -d mydb[/green]") + console.print() + console.print("[dim]# Python connection example:[/dim]") + console.print('''[green]import psycopg2 + +conn = psycopg2.connect( + dbname="mydb", + user="myuser", + password="mypassword", + host="localhost" +) +cursor = conn.cursor() +cursor.execute("SELECT version();") +print(cursor.fetchone())[/green]''') + + elif demo_type == "nodejs": + console.print("[bold cyan]📝 Node.js Example[/bold cyan]") + console.print() + console.print("[dim]# Create a simple Express server:[/dim]") + console.print('''[green]// server.js +const express = require('express'); +const app = express(); + +app.get('/', (req, res) => { + res.json({ message: 'Hello from Node.js!' }); +}); + +app.listen(3000, () => { + console.log('Server running on http://localhost:3000'); +});[/green]''') + console.print() + console.print("[dim]# Run it:[/dim]") + console.print("[green]npm init -y && npm install express && node server.js[/green]") + + elif demo_type == "python": + console.print("[bold cyan]📝 Python Example[/bold cyan]") + console.print() + console.print("[dim]# Simple HTTP server:[/dim]") + console.print("[green]python3 -m http.server 8000[/green]") + console.print() + console.print("[dim]# Flask web app:[/dim]") + console.print('''[green]from flask import Flask +app = Flask(__name__) + +@app.route('/') +def hello(): + return {'message': 'Hello from Python!'} + +if __name__ == '__main__': + app.run(debug=True)[/green]''') + + else: + console.print("[dim]No specific demo available. Check the documentation for usage examples.[/dim]") + + console.print() + + def _execute_task_node( + self, + task: TaskNode, + run: DoRun, + original_commands: list[tuple[str, str, list[str]]], + depth: int = 0, + ): + """Execute a single task node with auto-repair capabilities.""" + indent = " " * depth + task_num = f"[{task.task_type.value.upper()}]" + + # Check if task was marked as skipped (e.g., using existing resource) + if task.status == CommandStatus.SKIPPED: + # Claude-like skipped output + console.print(f"{indent}[dim]○[/dim] [cyan]{task.command[:65]}{'...' if len(task.command) > 65 else ''}[/cyan]") + console.print(f"{indent} [dim italic]↳ Skipped: {task.output or 'Using existing resource'}[/dim italic]") + + # Log the skipped command + cmd_log = CommandLog( + command=task.command, + purpose=task.purpose, + timestamp=datetime.datetime.now().isoformat(), + status=CommandStatus.SKIPPED, + output=task.output or "Using existing resource", + ) + run.commands.append(cmd_log) + return + + # Claude-like command output + console.print(f"{indent}[bold cyan]●[/bold cyan] [bold]{task.command[:65]}{'...' if len(task.command) > 65 else ''}[/bold]") + console.print(f"{indent} [dim italic]↳ {task.purpose}[/dim italic]") + + protected_paths = [] + user_query = run.user_query if run else "" + for cmd, _, protected in original_commands: + if cmd == task.command: + protected_paths = protected + break + + file_check = self._file_analyzer.check_file_exists_and_usefulness(task.command, task.purpose, user_query) + + if file_check["recommendations"]: + self._file_analyzer.apply_file_recommendations(file_check["recommendations"]) + + task.status = CommandStatus.RUNNING + start_time = time.time() + + needs_sudo = self._needs_sudo(task.command, protected_paths) + success, stdout, stderr = self._execute_single_command(task.command, needs_sudo) + + task.output = stdout + task.error = stderr + task.duration_seconds = time.time() - start_time + + # Check if command was interrupted by Ctrl+Z/Ctrl+C + if self._interrupted: + task.status = CommandStatus.INTERRUPTED + cmd_log = CommandLog( + command=task.command, + purpose=task.purpose, + timestamp=datetime.datetime.now().isoformat(), + status=CommandStatus.INTERRUPTED, + output=stdout, + error="Command interrupted by user (Ctrl+Z/Ctrl+C)", + duration_seconds=task.duration_seconds, + ) + console.print(f"{indent} [yellow]⚠[/yellow] [dim]Interrupted ({task.duration_seconds:.2f}s)[/dim]") + run.commands.append(cmd_log) + return + + cmd_log = CommandLog( + command=task.command, + purpose=task.purpose, + timestamp=datetime.datetime.now().isoformat(), + status=CommandStatus.SUCCESS if success else CommandStatus.FAILED, + output=stdout, + error=stderr, + duration_seconds=task.duration_seconds, + ) + + if success: + task.status = CommandStatus.SUCCESS + # Claude-like success output + console.print(f"{indent} [green]✓[/green] [dim]Done ({task.duration_seconds:.2f}s)[/dim]") + if stdout: + output_preview = stdout[:100] + ('...' if len(stdout) > 100 else '') + console.print(f"{indent} [dim]{output_preview}[/dim]") + console.print() + run.commands.append(cmd_log) + return + + task.status = CommandStatus.NEEDS_REPAIR + diagnosis = self._diagnoser.diagnose_error(task.command, stderr) + task.failure_reason = diagnosis.get("description", "Unknown error") + + # Claude-like error output + console.print(f"{indent} [red]✗[/red] [bold red]{diagnosis['error_type']}[/bold red]") + console.print(f"{indent} [dim]{diagnosis['description'][:80]}{'...' if len(diagnosis['description']) > 80 else ''}[/dim]") + + # Check if this is a login/credential required error + if diagnosis.get("category") == "login_required": + console.print(f"{indent}[cyan] 🔐 Authentication required[/cyan]") + + login_success, login_msg = self._login_handler.handle_login(task.command, stderr) + + if login_success: + console.print(f"{indent}[green] ✓ {login_msg}[/green]") + console.print(f"{indent}[cyan] Retrying command...[/cyan]") + + # Retry the command + needs_sudo = self._needs_sudo(task.command, []) + success, new_stdout, new_stderr = self._execute_single_command(task.command, needs_sudo) + + if success: + task.status = CommandStatus.SUCCESS + task.reasoning = "Succeeded after authentication" + cmd_log.status = CommandStatus.SUCCESS + cmd_log.stdout = new_stdout[:500] if new_stdout else "" + console.print(f"{indent}[green] ✓ Command succeeded after authentication![/green]") + run.commands.append(cmd_log) + return + else: + # Still failed after login + stderr = new_stderr + diagnosis = self._diagnoser.diagnose_error(task.command, stderr) + console.print(f"{indent}[yellow] Command still failed: {stderr[:100]}[/yellow]") + else: + console.print(f"{indent}[yellow] {login_msg}[/yellow]") + + if diagnosis.get("extracted_path"): + console.print(f"{indent}[dim] Path: {diagnosis['extracted_path']}[/dim]") + + # Handle timeout errors specially - don't blindly retry + if diagnosis.get("category") == "timeout" or "timed out" in stderr.lower(): + console.print(f"{indent}[yellow] ⏱️ This operation timed out[/yellow]") + + # Check if it's a docker pull - those might still be running + if "docker pull" in task.command.lower(): + console.print(f"{indent}[cyan] ℹ️ Docker pull may still be downloading in background[/cyan]") + console.print(f"{indent}[dim] Check with: docker images | grep [/dim]") + console.print(f"{indent}[dim] Or retry with: docker pull --timeout=0 [/dim]") + elif "apt" in task.command.lower(): + console.print(f"{indent}[cyan] ℹ️ Package installation timed out[/cyan]") + console.print(f"{indent}[dim] Check apt status: sudo dpkg --configure -a[/dim]") + console.print(f"{indent}[dim] Then retry the command[/dim]") + else: + console.print(f"{indent}[cyan] ℹ️ You can retry this command manually[/cyan]") + + # Mark as needing manual intervention, not auto-fix + task.status = CommandStatus.NEEDS_REPAIR + task.failure_reason = "Operation timed out - may need manual retry" + cmd_log.status = CommandStatus.FAILED + cmd_log.error = stderr + run.commands.append(cmd_log) + return + + if task.repair_attempts < task.max_repair_attempts: + import sys + task.repair_attempts += 1 + console.print(f"{indent}[cyan] 🔧 Auto-fix attempt {task.repair_attempts}/{task.max_repair_attempts}[/cyan]") + + # Flush output before auto-fix to ensure clean display after sudo prompts + sys.stdout.flush() + + fixed, fix_message, fix_commands = self._auto_fixer.auto_fix_error( + task.command, stderr, diagnosis, max_attempts=3 + ) + + for fix_cmd in fix_commands: + repair_task = self._task_tree.add_repair_task( + parent=task, + command=fix_cmd, + purpose=f"Auto-fix: {diagnosis['error_type']}", + reasoning=fix_message, + ) + repair_task.status = CommandStatus.SUCCESS + + if fixed: + task.status = CommandStatus.SUCCESS + task.reasoning = f"Auto-fixed: {fix_message}" + console.print(f"{indent}[green] ✓ {fix_message}[/green]") + cmd_log.status = CommandStatus.SUCCESS + run.commands.append(cmd_log) + return + else: + console.print(f"{indent}[yellow] Auto-fix incomplete: {fix_message}[/yellow]") + + task.status = CommandStatus.FAILED + task.reasoning = self._generate_task_failure_reasoning(task, diagnosis) + + error_type = diagnosis.get("error_type", "unknown") + + # Check if this is a "soft failure" that shouldn't warrant manual intervention + # These are cases where a tool/command simply isn't available and that's OK + soft_failure_types = { + "command_not_found", # Tool not installed + "not_found", # File/command doesn't exist + "no_such_command", + "unable_to_locate_package", # Package doesn't exist in repos + } + + # Also check for patterns in the error message that indicate optional tools + optional_tool_patterns = [ + "sensors", # lm-sensors - optional hardware monitoring + "snap", # snapd - optional package manager + "flatpak", # optional package manager + "docker", # optional if not needed + "podman", # optional container runtime + "nmap", # optional network scanner + "htop", # optional system monitor + "iotop", # optional I/O monitor + "iftop", # optional network monitor + ] + + cmd_base = task.command.split()[0] if task.command else "" + is_optional_tool = any(pattern in cmd_base.lower() for pattern in optional_tool_patterns) + is_soft_failure = error_type in soft_failure_types and is_optional_tool + + if is_soft_failure: + # Mark as skipped instead of failed - this is an optional tool that's not available + task.status = CommandStatus.SKIPPED + task.reasoning = f"Tool '{cmd_base}' not available (optional)" + console.print(f"{indent}[yellow] ○ Skipped: {cmd_base} not available (optional tool)[/yellow]") + console.print(f"{indent}[dim] This tool provides additional info but isn't required[/dim]") + cmd_log.status = CommandStatus.SKIPPED + else: + console.print(f"{indent}[red] ✗ Failed: {diagnosis['description'][:100]}[/red]") + console.print(f"{indent}[dim] Reasoning: {task.reasoning}[/dim]") + + # Only offer manual intervention for errors that could actually be fixed manually + # Don't offer for missing commands/packages that auto-fix couldn't resolve + should_offer_manual = ( + diagnosis.get("fix_commands") or stderr + ) and error_type not in {"command_not_found", "not_found", "unable_to_locate_package"} + + if should_offer_manual: + console.print(f"\n{indent}[yellow]💡 Manual intervention available[/yellow]") + + suggested_cmds = diagnosis.get("fix_commands", [f"sudo {task.command}"]) + console.print(f"{indent}[dim] Suggested commands:[/dim]") + for cmd in suggested_cmds[:3]: + console.print(f"{indent}[cyan] $ {cmd}[/cyan]") + + if Confirm.ask(f"{indent}Run manually while Cortex monitors?", default=False): + manual_success = self._supervise_manual_intervention_for_task( + task, suggested_cmds, run + ) + if manual_success: + task.status = CommandStatus.SUCCESS + task.reasoning = "Completed via monitored manual intervention" + cmd_log.status = CommandStatus.SUCCESS + + cmd_log.status = task.status + run.commands.append(cmd_log) + + def _supervise_manual_intervention_for_task( + self, + task: TaskNode, + suggested_commands: list[str], + run: DoRun, + ) -> bool: + """Supervise manual intervention for a specific task with terminal monitoring.""" + from rich.panel import Panel + from rich.prompt import Prompt + + # If no suggested commands provided, use the task command with sudo + if not suggested_commands: + if task and task.command: + # Add sudo if not already present + cmd = task.command + if not cmd.strip().startswith("sudo"): + cmd = f"sudo {cmd}" + suggested_commands = [cmd] + + # Claude-like manual intervention UI + console.print() + console.print("[bold blue]━━━[/bold blue] [bold]Manual Intervention[/bold]") + console.print() + + # Show the task context + if task and task.purpose: + console.print(f"[bold]Task:[/bold] {task.purpose}") + console.print() + + console.print("[dim]Run these commands in another terminal:[/dim]") + console.print() + + # Show commands in a clear box + if suggested_commands: + from rich.panel import Panel + cmd_text = "\n".join(f" {i}. {cmd}" for i, cmd in enumerate(suggested_commands, 1)) + console.print(Panel( + cmd_text, + title="[bold cyan]📋 Commands to Run[/bold cyan]", + border_style="cyan", + padding=(0, 1), + )) + else: + console.print(" [yellow]⚠ No specific commands - check the task above[/yellow]") + + console.print() + + # Track expected commands for matching + self._expected_manual_commands = suggested_commands.copy() if suggested_commands else [] + self._completed_manual_commands: list[str] = [] + + # Start terminal monitoring with detailed output + self._terminal_monitor = TerminalMonitor( + notification_callback=lambda title, msg: self._send_notification(title, msg) + ) + self._terminal_monitor.start(expected_commands=suggested_commands) + + console.print() + console.print("[dim]Type 'done' when finished, 'help' for tips, or 'cancel' to abort[/dim]") + console.print() + + try: + while True: + try: + user_input = Prompt.ask("[cyan]Status[/cyan]", default="done").strip().lower() + except (EOFError, KeyboardInterrupt): + console.print("\n[yellow]Manual intervention cancelled[/yellow]") + return False + + # Handle natural language responses + if user_input in ["done", "finished", "complete", "completed", "success", "worked", "yes", "y"]: + # Show observed commands and check for matches + observed = self._terminal_monitor.get_observed_commands() + matched_commands = [] + unmatched_commands = [] + + if observed: + console.print(f"\n[cyan]📊 Observed {len(observed)} command(s):[/cyan]") + for obs in observed[-5:]: + obs_cmd = obs['command'] + is_matched = False + + # Check if this matches any expected command + for expected in self._expected_manual_commands: + if self._commands_match(obs_cmd, expected): + matched_commands.append(obs_cmd) + self._completed_manual_commands.append(expected) + console.print(f" • {obs_cmd[:60]}... [green]✓[/green]") + is_matched = True + break + + if not is_matched: + unmatched_commands.append(obs_cmd) + console.print(f" • {obs_cmd[:60]}... [yellow]?[/yellow]") + + # Check if expected commands were actually run + if self._expected_manual_commands and not matched_commands: + console.print() + console.print("[yellow]⚠ None of the expected commands were detected.[/yellow]") + console.print("[dim]Expected:[/dim]") + for cmd in self._expected_manual_commands[:3]: + console.print(f" [cyan]$ {cmd}[/cyan]") + console.print() + + # Send notification with correct commands + self._send_notification( + "⚠️ Cortex: Expected Commands", + f"Run: {self._expected_manual_commands[0][:50]}..." + ) + + console.print("[dim]Type 'done' again to confirm, or run the expected commands first.[/dim]") + continue # Don't mark as success yet - let user try again + + # Check if any observed commands had errors (check last few) + has_errors = False + if observed: + for obs in observed[-3:]: + if obs.get('has_error') or obs.get('status') == 'failed': + has_errors = True + console.print("[yellow]⚠ Some commands may have failed. Please verify.[/yellow]") + break + + if has_errors and not user_input in ["yes", "y", "worked", "success"]: + console.print("[dim]Type 'success' to confirm it worked anyway.[/dim]") + continue + + console.print("[green]✓ Manual step completed successfully[/green]") + + if self._task_tree: + verify_task = self._task_tree.add_verify_task( + parent=task, + command="# Manual verification", + purpose="User confirmed manual intervention success", + ) + verify_task.status = CommandStatus.SUCCESS + + # Mark matched commands as completed so they're not re-executed + if matched_commands: + task.manual_commands_completed = matched_commands + + return True + + elif user_input in ["help", "?", "hint", "tips"]: + console.print() + console.print("[bold]💡 Manual Intervention Tips:[/bold]") + console.print(" • Use [cyan]sudo[/cyan] if you see 'Permission denied'") + console.print(" • Use [cyan]sudo su -[/cyan] to become root") + console.print(" • Check paths with [cyan]ls -la [/cyan]") + console.print(" • Check services: [cyan]systemctl status [/cyan]") + console.print(" • View logs: [cyan]journalctl -u -n 50[/cyan]") + console.print() + + elif user_input in ["cancel", "abort", "quit", "exit", "no", "n"]: + console.print("[yellow]Manual intervention cancelled[/yellow]") + return False + + elif user_input in ["failed", "error", "problem", "issue"]: + console.print() + error_desc = Prompt.ask("[yellow]What error did you encounter?[/yellow]") + error_lower = error_desc.lower() + + # Provide contextual help based on error description + if "permission" in error_lower or "denied" in error_lower: + console.print("\n[cyan]💡 Try running with sudo:[/cyan]") + for cmd in suggested_commands[:2]: + if not cmd.startswith("sudo"): + console.print(f" [green]sudo {cmd}[/green]") + elif "not found" in error_lower or "no such" in error_lower: + console.print("\n[cyan]💡 Check if path/command exists:[/cyan]") + console.print(" [green]which [/green]") + console.print(" [green]ls -la [/green]") + elif "service" in error_lower or "systemctl" in error_lower: + console.print("\n[cyan]💡 Service troubleshooting:[/cyan]") + console.print(" [green]sudo systemctl status [/green]") + console.print(" [green]sudo journalctl -u -n 50[/green]") + else: + console.print("\n[cyan]💡 General debugging:[/cyan]") + console.print(" • Check the error message carefully") + console.print(" • Try running with sudo") + console.print(" • Check if all required packages are installed") + + console.print() + console.print("[dim]Type 'done' when fixed, or 'cancel' to abort[/dim]") + + else: + # Any other input - show status + observed = self._terminal_monitor.get_observed_commands() + console.print(f"[dim]Still monitoring... ({len(observed)} commands observed)[/dim]") + console.print("[dim]Type 'done' when finished, 'help' for tips[/dim]") + + except KeyboardInterrupt: + console.print("\n[yellow]Manual intervention cancelled[/yellow]") + return False + finally: + if self._terminal_monitor: + observed = self._terminal_monitor.stop() + # Log observed commands to run + for obs in observed: + run.commands.append(CommandLog( + command=obs["command"], + purpose=f"Manual execution ({obs['source']})", + timestamp=obs["timestamp"], + status=CommandStatus.SUCCESS, + )) + self._terminal_monitor = None + + # Clear tracking + self._expected_manual_commands = [] + + def _commands_match(self, observed: str, expected: str) -> bool: + """Check if an observed command matches an expected command. + + Handles variations like: + - With/without sudo + - Different whitespace + - Same command with different args still counts + """ + # Normalize commands + obs_normalized = observed.strip().lower() + exp_normalized = expected.strip().lower() + + # Remove sudo prefix for comparison + if obs_normalized.startswith("sudo "): + obs_normalized = obs_normalized[5:].strip() + if exp_normalized.startswith("sudo "): + exp_normalized = exp_normalized[5:].strip() + + # Exact match + if obs_normalized == exp_normalized: + return True + + obs_parts = obs_normalized.split() + exp_parts = exp_normalized.split() + + # Check for service management commands first (need full match including service name) + service_commands = ["systemctl", "service"] + for svc_cmd in service_commands: + if svc_cmd in obs_normalized and svc_cmd in exp_normalized: + # Extract action and service name + obs_action = None + exp_action = None + obs_service = None + exp_service = None + + for i, part in enumerate(obs_parts): + if part in ["restart", "start", "stop", "reload", "status", "enable", "disable"]: + obs_action = part + # Service name is usually the next word + if i + 1 < len(obs_parts): + obs_service = obs_parts[i + 1] + break + + for i, part in enumerate(exp_parts): + if part in ["restart", "start", "stop", "reload", "status", "enable", "disable"]: + exp_action = part + if i + 1 < len(exp_parts): + exp_service = exp_parts[i + 1] + break + + if obs_action and exp_action and obs_service and exp_service: + if obs_action == exp_action and obs_service == exp_service: + return True + else: + return False # Different action or service + + # For non-service commands, check if first 2-3 words match + if len(obs_parts) >= 2 and len(exp_parts) >= 2: + # Skip if either is a service command (handled above) + if obs_parts[0] not in ["systemctl", "service"] and exp_parts[0] not in ["systemctl", "service"]: + # Compare first two words (command and subcommand) + if obs_parts[:2] == exp_parts[:2]: + return True + + return False + + def get_completed_manual_commands(self) -> list[str]: + """Get list of commands completed during manual intervention.""" + return getattr(self, '_completed_manual_commands', []) + + def _generate_task_failure_reasoning( + self, + task: TaskNode, + diagnosis: dict, + ) -> str: + """Generate detailed reasoning for why a task failed.""" + parts = [] + + parts.append(f"Error: {diagnosis.get('error_type', 'unknown')}") + + if task.repair_attempts > 0: + parts.append(f"Repair attempts: {task.repair_attempts} (all failed)") + + if diagnosis.get("extracted_path"): + parts.append(f"Problem path: {diagnosis['extracted_path']}") + + error_type = diagnosis.get("error_type", "") + if "permission" in error_type.lower(): + parts.append("Root cause: Insufficient file system permissions") + elif "not_found" in error_type.lower(): + parts.append("Root cause: Required file or directory does not exist") + elif "service" in error_type.lower(): + parts.append("Root cause: System service issue") + + if diagnosis.get("fix_commands"): + parts.append(f"Suggested fix: {diagnosis['fix_commands'][0][:50]}...") + + return " | ".join(parts) + + def _generate_tree_summary(self, run: DoRun) -> str: + """Generate a summary from the task tree execution.""" + if not self._task_tree: + return self._generate_summary(run) + + summary = self._task_tree.get_summary() + + total = sum(summary.values()) + success = summary.get("success", 0) + failed = summary.get("failed", 0) + repaired = summary.get("needs_repair", 0) + + parts = [ + f"Total tasks: {total}", + f"Successful: {success}", + f"Failed: {failed}", + ] + + if repaired > 0: + parts.append(f"Repair attempted: {repaired}") + + if self._permission_requests_count > 1: + parts.append(f"Permission requests: {self._permission_requests_count}") + + return " | ".join(parts) + + def provide_manual_instructions( + self, + commands: list[tuple[str, str, list[str]]], + user_query: str, + ) -> DoRun: + """Provide instructions for manual execution and monitor progress.""" + run = DoRun( + run_id=self.db._generate_run_id(), + summary="", + mode=RunMode.USER_MANUAL, + user_query=user_query, + started_at=datetime.datetime.now().isoformat(), + session_id=self.current_session_id or "", + ) + self.current_run = run + + console.print() + console.print(Panel( + "[bold cyan]📋 Manual Execution Instructions[/bold cyan]", + expand=False, + )) + console.print() + + cwd = os.getcwd() + console.print(f"[bold]1. Open a new terminal and navigate to:[/bold]") + console.print(f" [cyan]cd {cwd}[/cyan]") + console.print() + + console.print(f"[bold]2. Execute the following commands in order:[/bold]") + console.print() + + for i, (cmd, purpose, protected) in enumerate(commands, 1): + console.print(f" [bold yellow]Step {i}:[/bold yellow] {purpose}") + needs_sudo = self._needs_sudo(cmd, protected) + + if protected: + console.print(f" [red]⚠️ Accesses protected paths: {', '.join(protected)}[/red]") + + if needs_sudo and not cmd.strip().startswith("sudo"): + console.print(f" [cyan]sudo {cmd}[/cyan]") + else: + console.print(f" [cyan]{cmd}[/cyan]") + console.print() + + run.commands.append(CommandLog( + command=cmd, + purpose=purpose, + timestamp=datetime.datetime.now().isoformat(), + status=CommandStatus.PENDING, + )) + + console.print("[bold]3. Once done, return to this terminal and press Enter.[/bold]") + console.print() + + monitor = TerminalMonitor( + notification_callback=lambda title, msg: self._send_notification(title, msg, "normal") + ) + + expected_commands = [cmd for cmd, _, _ in commands] + monitor.start_monitoring(expected_commands) + + console.print("[dim]🔍 Monitoring terminal activity... (press Enter when done)[/dim]") + + try: + input() + except (EOFError, KeyboardInterrupt): + pass + + observed = monitor.stop_monitoring() + + # Add observed commands to the run + for obs in observed: + run.commands.append(CommandLog( + command=obs["command"], + purpose="User-executed command", + timestamp=obs["timestamp"], + status=CommandStatus.SUCCESS, + )) + + run.completed_at = datetime.datetime.now().isoformat() + run.summary = self._generate_summary(run) + + self.db.save_run(run) + + # Generate LLM summary/answer + llm_answer = self._generate_llm_answer(run, user_query) + + # Print condensed execution summary with answer + self._print_execution_summary(run, answer=llm_answer) + + console.print() + console.print(f"[dim]Run ID: {run.run_id}[/dim]") + + return run + + def _generate_summary(self, run: DoRun) -> str: + """Generate a summary of what was done in the run.""" + successful = sum(1 for c in run.commands if c.status == CommandStatus.SUCCESS) + failed = sum(1 for c in run.commands if c.status == CommandStatus.FAILED) + + mode_str = "automated" if run.mode == RunMode.CORTEX_EXEC else "manual" + + if failed == 0: + return f"Successfully executed {successful} commands ({mode_str}) for: {run.user_query[:50]}" + else: + return f"Executed {successful} commands with {failed} failures ({mode_str}) for: {run.user_query[:50]}" + + def _generate_llm_answer(self, run: DoRun, user_query: str) -> str | None: + """Generate an LLM-based answer/summary after command execution.""" + if not self.llm_callback: + return None + + # Collect command outputs + command_results = [] + for cmd in run.commands: + status = "✓" if cmd.status == CommandStatus.SUCCESS else "✗" if cmd.status == CommandStatus.FAILED else "○" + result = { + "command": cmd.command, + "purpose": cmd.purpose, + "status": status, + "output": (cmd.output[:500] if cmd.output else "")[:500], # Limit output size + } + if cmd.error: + result["error"] = cmd.error[:200] + command_results.append(result) + + # Build prompt for LLM + prompt = f"""The user asked: "{user_query}" + +The following commands were executed: +""" + for i, result in enumerate(command_results, 1): + prompt += f"\n{i}. [{result['status']}] {result['command']}" + prompt += f"\n Purpose: {result['purpose']}" + if result.get('output'): + # Only include meaningful output, not empty or whitespace-only + output_preview = result['output'].strip()[:200] + if output_preview: + prompt += f"\n Output: {output_preview}" + if result.get('error'): + prompt += f"\n Error: {result['error']}" + + prompt += """ + +Based on the above execution results, provide a helpful summary/answer for the user. +Focus on: +1. What was accomplished +2. Any issues encountered and their impact +3. Key findings or results from the commands +4. Any recommendations for next steps + +Keep the response concise (2-4 paragraphs max). Do NOT include JSON in your response. +Respond directly with the answer text only.""" + + try: + from rich.console import Console + from rich.status import Status + + console = Console() + with Status("[cyan]Generating summary...[/cyan]", spinner="dots"): + result = self.llm_callback(prompt) + + if result: + # Handle different response formats + if isinstance(result, dict): + # Extract answer from various possible keys + answer = result.get("answer") or result.get("response") or result.get("text") or "" + if not answer and "reasoning" in result: + answer = result.get("reasoning", "") + elif isinstance(result, str): + answer = result + else: + return None + + # Clean the answer + answer = answer.strip() + + # Filter out JSON-like responses + if answer.startswith('{') or answer.startswith('['): + return None + + return answer if answer else None + except Exception as e: + # Silently fail - summary is optional + import logging + logging.debug(f"LLM summary generation failed: {e}") + return None + + return None + + def _print_execution_summary(self, run: DoRun, answer: str | None = None): + """Print a condensed execution summary with improved visual design.""" + from rich.panel import Panel + from rich.table import Table + from rich.text import Text + from rich import box + + # Count statuses + successful = [c for c in run.commands if c.status == CommandStatus.SUCCESS] + failed = [c for c in run.commands if c.status == CommandStatus.FAILED] + skipped = [c for c in run.commands if c.status == CommandStatus.SKIPPED] + interrupted = [c for c in run.commands if c.status == CommandStatus.INTERRUPTED] + + total = len(run.commands) + + # Build status header + console.print() + + # Create a status bar + if total > 0: + status_text = Text() + status_text.append(" ") + if successful: + status_text.append(f"✓ {len(successful)} ", style="bold green") + if failed: + status_text.append(f"✗ {len(failed)} ", style="bold red") + if skipped: + status_text.append(f"○ {len(skipped)} ", style="bold yellow") + if interrupted: + status_text.append(f"⚠ {len(interrupted)} ", style="bold yellow") + + # Calculate success rate + success_rate = (len(successful) / total * 100) if total > 0 else 0 + status_text.append(f" ({success_rate:.0f}% success)", style="dim") + + console.print(Panel( + status_text, + title="[bold white on blue] 📊 Execution Status [/bold white on blue]", + title_align="left", + border_style="blue", + padding=(0, 1), + expand=False, + )) + + # Create a table for detailed results + if successful or failed or skipped: + result_table = Table( + show_header=True, + header_style="bold", + box=box.SIMPLE, + padding=(0, 1), + expand=True, + ) + result_table.add_column("Status", width=8, justify="center") + result_table.add_column("Action", style="white") + + # Add successful commands + for cmd in successful[:4]: + purpose = cmd.purpose[:60] + "..." if len(cmd.purpose) > 60 else cmd.purpose + result_table.add_row("[green]✓ Done[/green]", purpose) + if len(successful) > 4: + result_table.add_row("[dim]...[/dim]", f"[dim]and {len(successful) - 4} more completed[/dim]") + + # Add failed commands + for cmd in failed[:2]: + error_short = (cmd.error[:40] + "...") if cmd.error and len(cmd.error) > 40 else (cmd.error or "Unknown") + result_table.add_row("[red]✗ Failed[/red]", f"{cmd.command[:30]}... - {error_short}") + + # Add skipped commands + for cmd in skipped[:2]: + purpose = cmd.purpose[:50] + "..." if len(cmd.purpose) > 50 else cmd.purpose + result_table.add_row("[yellow]○ Skip[/yellow]", purpose) + + console.print(Panel( + result_table, + title="[bold] 📋 Details [/bold]", + title_align="left", + border_style="dim", + padding=(0, 0), + )) + + # Answer section (for questions) - make it prominent + if answer: + # Clean the answer - remove any JSON-like content that might have leaked + clean_answer = answer + if clean_answer.startswith('{') or '{"' in clean_answer[:50]: + # Looks like JSON leaked through, try to extract readable parts + import re + # Try to extract just the answer field if present + answer_match = re.search(r'"answer"\s*:\s*"([^"]*)"', clean_answer) + if answer_match: + clean_answer = answer_match.group(1) + + # Truncate very long answers + if len(clean_answer) > 500: + display_answer = clean_answer[:500] + "\n\n[dim]... (truncated)[/dim]" + else: + display_answer = clean_answer + + console.print(Panel( + display_answer, + title="[bold white on green] 💡 Answer [/bold white on green]", + title_align="left", + border_style="green", + padding=(1, 2), + )) + + def get_run_history(self, limit: int = 20) -> list[DoRun]: + """Get recent do run history.""" + return self.db.get_recent_runs(limit) + + def get_run(self, run_id: str) -> DoRun | None: + """Get a specific run by ID.""" + return self.db.get_run(run_id) + + # Expose diagnosis and auto-fix methods for external use + def _diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: + """Diagnose a command failure.""" + return self._diagnoser.diagnose_error(cmd, stderr) + + def _auto_fix_error( + self, + cmd: str, + stderr: str, + diagnosis: dict[str, Any], + max_attempts: int = 5, + ) -> tuple[bool, str, list[str]]: + """Auto-fix an error.""" + return self._auto_fixer.auto_fix_error(cmd, stderr, diagnosis, max_attempts) + + def _check_for_conflicts(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for conflicts.""" + return self._conflict_detector.check_for_conflicts(cmd, purpose) + + def _run_verification_tests( + self, + commands_executed: list[CommandLog], + user_query: str, + ) -> tuple[bool, list[dict[str, Any]]]: + """Run verification tests.""" + return self._verification_runner.run_verification_tests(commands_executed, user_query) + + def _check_file_exists_and_usefulness( + self, + cmd: str, + purpose: str, + user_query: str, + ) -> dict[str, Any]: + """Check file existence and usefulness.""" + return self._file_analyzer.check_file_exists_and_usefulness(cmd, purpose, user_query) + + def _analyze_file_usefulness( + self, + content: str, + purpose: str, + user_query: str, + ) -> dict[str, Any]: + """Analyze file usefulness.""" + return self._file_analyzer.analyze_file_usefulness(content, purpose, user_query) + + +def setup_cortex_user() -> bool: + """Setup the cortex user if it doesn't exist.""" + handler = DoHandler() + return handler.setup_cortex_user() + + +def get_do_handler() -> DoHandler: + """Get a DoHandler instance.""" + return DoHandler() + diff --git a/cortex/do_runner/managers.py b/cortex/do_runner/managers.py new file mode 100644 index 000000000..762f9dabf --- /dev/null +++ b/cortex/do_runner/managers.py @@ -0,0 +1,287 @@ +"""User and path management for the Do Runner module.""" + +import json +import os +import pwd +import subprocess +from pathlib import Path + +from rich.console import Console + +console = Console() + + +class ProtectedPathsManager: + """Manages the list of protected files and folders requiring user authentication.""" + + SYSTEM_PROTECTED_PATHS: set[str] = { + # System configuration + "/etc", + "/etc/passwd", + "/etc/shadow", + "/etc/sudoers", + "/etc/sudoers.d", + "/etc/ssh", + "/etc/ssl", + "/etc/pam.d", + "/etc/security", + "/etc/cron.d", + "/etc/cron.daily", + "/etc/crontab", + "/etc/systemd", + "/etc/init.d", + # Boot and kernel + "/boot", + "/boot/grub", + # System binaries + "/usr/bin", + "/usr/sbin", + "/sbin", + "/bin", + # Root directory + "/root", + # System libraries + "/lib", + "/lib64", + "/usr/lib", + # Var system data + "/var/log", + "/var/lib/apt", + "/var/lib/dpkg", + # Proc and sys (virtual filesystems) + "/proc", + "/sys", + } + + USER_PROTECTED_PATHS: set[str] = set() + + def __init__(self): + self.config_file = Path.home() / ".cortex" / "protected_paths.json" + self._ensure_config_dir() + self._load_user_paths() + + def _ensure_config_dir(self): + """Ensure the config directory exists.""" + try: + self.config_file.parent.mkdir(parents=True, exist_ok=True) + except OSError: + self.config_file = Path("/tmp") / ".cortex" / "protected_paths.json" + self.config_file.parent.mkdir(parents=True, exist_ok=True) + + def _load_user_paths(self): + """Load user-configured protected paths.""" + if self.config_file.exists(): + try: + with open(self.config_file) as f: + data = json.load(f) + self.USER_PROTECTED_PATHS = set(data.get("paths", [])) + except (json.JSONDecodeError, OSError): + pass + + def _save_user_paths(self): + """Save user-configured protected paths.""" + try: + self.config_file.parent.mkdir(parents=True, exist_ok=True) + with open(self.config_file, "w") as f: + json.dump({"paths": list(self.USER_PROTECTED_PATHS)}, f, indent=2) + except OSError as e: + console.print(f"[yellow]Warning: Could not save protected paths: {e}[/yellow]") + + def add_protected_path(self, path: str) -> bool: + """Add a path to user-protected paths.""" + self.USER_PROTECTED_PATHS.add(path) + self._save_user_paths() + return True + + def remove_protected_path(self, path: str) -> bool: + """Remove a path from user-protected paths.""" + if path in self.USER_PROTECTED_PATHS: + self.USER_PROTECTED_PATHS.discard(path) + self._save_user_paths() + return True + return False + + def is_protected(self, path: str) -> bool: + """Check if a path requires authentication for access.""" + path = os.path.abspath(path) + all_protected = self.SYSTEM_PROTECTED_PATHS | self.USER_PROTECTED_PATHS + + if path in all_protected: + return True + + for protected in all_protected: + if path.startswith(protected + "/") or path == protected: + return True + + return False + + def get_all_protected(self) -> list[str]: + """Get all protected paths.""" + return sorted(self.SYSTEM_PROTECTED_PATHS | self.USER_PROTECTED_PATHS) + + +class CortexUserManager: + """Manages the cortex system user for privilege-limited execution.""" + + CORTEX_USER = "cortex" + CORTEX_GROUP = "cortex" + + @classmethod + def user_exists(cls) -> bool: + """Check if the cortex user exists.""" + try: + pwd.getpwnam(cls.CORTEX_USER) + return True + except KeyError: + return False + + @classmethod + def create_user(cls) -> tuple[bool, str]: + """Create the cortex user with basic privileges.""" + if cls.user_exists(): + return True, "Cortex user already exists" + + try: + subprocess.run( + ["sudo", "groupadd", "-f", cls.CORTEX_GROUP], + check=True, + capture_output=True, + ) + + subprocess.run( + [ + "sudo", "useradd", + "-r", + "-g", cls.CORTEX_GROUP, + "-d", "/var/lib/cortex", + "-s", "/bin/bash", + "-m", + cls.CORTEX_USER, + ], + check=True, + capture_output=True, + ) + + subprocess.run( + ["sudo", "mkdir", "-p", "/var/lib/cortex/workspace"], + check=True, + capture_output=True, + ) + subprocess.run( + ["sudo", "chown", "-R", f"{cls.CORTEX_USER}:{cls.CORTEX_GROUP}", "/var/lib/cortex"], + check=True, + capture_output=True, + ) + + return True, "Cortex user created successfully" + + except subprocess.CalledProcessError as e: + return False, f"Failed to create cortex user: {e.stderr.decode() if e.stderr else str(e)}" + + @classmethod + def grant_privilege(cls, file_path: str, mode: str = "rw") -> tuple[bool, str]: + """Grant cortex user privilege to access a specific file.""" + if not cls.user_exists(): + return False, "Cortex user does not exist. Run setup first." + + try: + acl_mode = "" + if "r" in mode: + acl_mode += "r" + if "w" in mode: + acl_mode += "w" + if "x" in mode: + acl_mode += "x" + + if not acl_mode: + acl_mode = "r" + + subprocess.run( + ["sudo", "setfacl", "-m", f"u:{cls.CORTEX_USER}:{acl_mode}", file_path], + check=True, + capture_output=True, + ) + + return True, f"Granted {acl_mode} access to {file_path}" + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.decode() if e.stderr else str(e) + if "setfacl" in error_msg or "not found" in error_msg.lower(): + return cls._grant_privilege_chmod(file_path, mode) + return False, f"Failed to grant privilege: {error_msg}" + + @classmethod + def _grant_privilege_chmod(cls, file_path: str, mode: str) -> tuple[bool, str]: + """Fallback privilege granting using chmod.""" + try: + chmod_mode = "" + if "r" in mode: + chmod_mode = "o+r" + if "w" in mode: + chmod_mode = "o+rw" if chmod_mode else "o+w" + if "x" in mode: + chmod_mode = chmod_mode + "x" if chmod_mode else "o+x" + + subprocess.run( + ["sudo", "chmod", chmod_mode, file_path], + check=True, + capture_output=True, + ) + return True, f"Granted {mode} access to {file_path} (chmod fallback)" + + except subprocess.CalledProcessError as e: + return False, f"Failed to grant privilege: {e.stderr.decode() if e.stderr else str(e)}" + + @classmethod + def revoke_privilege(cls, file_path: str) -> tuple[bool, str]: + """Revoke cortex user's privilege from a specific file.""" + try: + subprocess.run( + ["sudo", "setfacl", "-x", f"u:{cls.CORTEX_USER}", file_path], + check=True, + capture_output=True, + ) + return True, f"Revoked access to {file_path}" + + except subprocess.CalledProcessError as e: + error_msg = e.stderr.decode() if e.stderr else str(e) + if "setfacl" in error_msg or "not found" in error_msg.lower(): + return cls._revoke_privilege_chmod(file_path) + return False, f"Failed to revoke privilege: {error_msg}" + + @classmethod + def _revoke_privilege_chmod(cls, file_path: str) -> tuple[bool, str]: + """Fallback privilege revocation using chmod.""" + try: + subprocess.run( + ["sudo", "chmod", "o-rwx", file_path], + check=True, + capture_output=True, + ) + return True, f"Revoked access to {file_path} (chmod fallback)" + except subprocess.CalledProcessError as e: + return False, f"Failed to revoke privilege: {e.stderr.decode() if e.stderr else str(e)}" + + @classmethod + def run_as_cortex(cls, command: str, timeout: int = 60) -> tuple[bool, str, str]: + """Execute a command as the cortex user.""" + if not cls.user_exists(): + return False, "", "Cortex user does not exist" + + try: + result = subprocess.run( + ["sudo", "-u", cls.CORTEX_USER, "bash", "-c", command], + capture_output=True, + text=True, + timeout=timeout, + ) + return ( + result.returncode == 0, + result.stdout.strip(), + result.stderr.strip(), + ) + except subprocess.TimeoutExpired: + return False, "", f"Command timed out after {timeout} seconds" + except Exception as e: + return False, "", str(e) + diff --git a/cortex/do_runner/models.py b/cortex/do_runner/models.py new file mode 100644 index 000000000..6f1081b75 --- /dev/null +++ b/cortex/do_runner/models.py @@ -0,0 +1,352 @@ +"""Data models and enums for the Do Runner module.""" + +from dataclasses import dataclass, field +from enum import Enum +from typing import Any + +from rich.console import Console + +console = Console() + + +class CommandStatus(str, Enum): + """Status of a command execution.""" + PENDING = "pending" + RUNNING = "running" + SUCCESS = "success" + FAILED = "failed" + SKIPPED = "skipped" + NEEDS_REPAIR = "needs_repair" + INTERRUPTED = "interrupted" # Command stopped by Ctrl+Z/Ctrl+C + + +class RunMode(str, Enum): + """Mode of execution for a do run.""" + CORTEX_EXEC = "cortex_exec" + USER_MANUAL = "user_manual" + + +class TaskType(str, Enum): + """Type of task in the task tree.""" + COMMAND = "command" + DIAGNOSTIC = "diagnostic" + REPAIR = "repair" + VERIFY = "verify" + ALTERNATIVE = "alternative" + + +@dataclass +class TaskNode: + """A node in the task tree representing a command or action.""" + id: str + task_type: TaskType + command: str + purpose: str + status: CommandStatus = CommandStatus.PENDING + + # Execution results + output: str = "" + error: str = "" + duration_seconds: float = 0.0 + + # Tree structure + parent_id: str | None = None + children: list["TaskNode"] = field(default_factory=list) + + # Repair context + failure_reason: str = "" + repair_attempts: int = 0 + max_repair_attempts: int = 3 + + # Reasoning + reasoning: str = "" + + def to_dict(self) -> dict[str, Any]: + return { + "id": self.id, + "task_type": self.task_type.value, + "command": self.command, + "purpose": self.purpose, + "status": self.status.value, + "output": self.output, + "error": self.error, + "duration_seconds": self.duration_seconds, + "parent_id": self.parent_id, + "children": [c.to_dict() for c in self.children], + "failure_reason": self.failure_reason, + "repair_attempts": self.repair_attempts, + "reasoning": self.reasoning, + } + + def add_child(self, child: "TaskNode"): + """Add a child task.""" + child.parent_id = self.id + self.children.append(child) + + def get_depth(self) -> int: + """Get the depth of this node in the tree.""" + depth = 0 + node = self + while node.parent_id: + depth += 1 + node = node + return depth + + +class TaskTree: + """A tree structure for managing commands with auto-repair capabilities.""" + + def __init__(self): + self.root_tasks: list[TaskNode] = [] + self._task_counter = 0 + self._all_tasks: dict[str, TaskNode] = {} + + def _generate_task_id(self, prefix: str = "task") -> str: + """Generate a unique task ID.""" + self._task_counter += 1 + return f"{prefix}_{self._task_counter}" + + def add_root_task( + self, + command: str, + purpose: str, + task_type: TaskType = TaskType.COMMAND, + ) -> TaskNode: + """Add a root-level task.""" + task = TaskNode( + id=self._generate_task_id(task_type.value), + task_type=task_type, + command=command, + purpose=purpose, + ) + self.root_tasks.append(task) + self._all_tasks[task.id] = task + return task + + def add_repair_task( + self, + parent: TaskNode, + command: str, + purpose: str, + reasoning: str = "", + ) -> TaskNode: + """Add a repair sub-task to a failed task.""" + task = TaskNode( + id=self._generate_task_id("repair"), + task_type=TaskType.REPAIR, + command=command, + purpose=purpose, + reasoning=reasoning, + ) + parent.add_child(task) + self._all_tasks[task.id] = task + return task + + def add_diagnostic_task( + self, + parent: TaskNode, + command: str, + purpose: str, + ) -> TaskNode: + """Add a diagnostic sub-task to investigate a failure.""" + task = TaskNode( + id=self._generate_task_id("diag"), + task_type=TaskType.DIAGNOSTIC, + command=command, + purpose=purpose, + ) + parent.add_child(task) + self._all_tasks[task.id] = task + return task + + def add_verify_task( + self, + parent: TaskNode, + command: str, + purpose: str, + ) -> TaskNode: + """Add a verification task after a repair.""" + task = TaskNode( + id=self._generate_task_id("verify"), + task_type=TaskType.VERIFY, + command=command, + purpose=purpose, + ) + parent.add_child(task) + self._all_tasks[task.id] = task + return task + + def add_alternative_task( + self, + parent: TaskNode, + command: str, + purpose: str, + reasoning: str = "", + ) -> TaskNode: + """Add an alternative approach when the original fails.""" + task = TaskNode( + id=self._generate_task_id("alt"), + task_type=TaskType.ALTERNATIVE, + command=command, + purpose=purpose, + reasoning=reasoning, + ) + parent.add_child(task) + self._all_tasks[task.id] = task + return task + + def get_task(self, task_id: str) -> TaskNode | None: + """Get a task by ID.""" + return self._all_tasks.get(task_id) + + def get_pending_tasks(self) -> list[TaskNode]: + """Get all pending tasks in order.""" + pending = [] + for root in self.root_tasks: + self._collect_pending(root, pending) + return pending + + def _collect_pending(self, node: TaskNode, pending: list[TaskNode]): + """Recursively collect pending tasks.""" + if node.status == CommandStatus.PENDING: + pending.append(node) + for child in node.children: + self._collect_pending(child, pending) + + def get_failed_tasks(self) -> list[TaskNode]: + """Get all failed tasks.""" + return [t for t in self._all_tasks.values() if t.status == CommandStatus.FAILED] + + def get_summary(self) -> dict[str, int]: + """Get a summary of task statuses.""" + summary = {status.value: 0 for status in CommandStatus} + for task in self._all_tasks.values(): + summary[task.status.value] += 1 + return summary + + def to_dict(self) -> dict[str, Any]: + """Convert tree to dictionary.""" + return { + "root_tasks": [t.to_dict() for t in self.root_tasks], + "summary": self.get_summary(), + } + + def print_tree(self, indent: str = ""): + """Print the task tree structure.""" + for i, root in enumerate(self.root_tasks): + is_last = i == len(self.root_tasks) - 1 + self._print_node(root, indent, is_last) + + def _print_node(self, node: TaskNode, indent: str, is_last: bool): + """Print a single node with its children.""" + status_icons = { + CommandStatus.PENDING: "[dim]○[/dim]", + CommandStatus.RUNNING: "[cyan]◐[/cyan]", + CommandStatus.SUCCESS: "[green]✓[/green]", + CommandStatus.FAILED: "[red]✗[/red]", + CommandStatus.SKIPPED: "[yellow]○[/yellow]", + CommandStatus.NEEDS_REPAIR: "[yellow]⚡[/yellow]", + } + + type_colors = { + TaskType.COMMAND: "white", + TaskType.DIAGNOSTIC: "cyan", + TaskType.REPAIR: "yellow", + TaskType.VERIFY: "blue", + TaskType.ALTERNATIVE: "magenta", + } + + icon = status_icons.get(node.status, "?") + color = type_colors.get(node.task_type, "white") + prefix = "└── " if is_last else "├── " + + console.print(f"{indent}{prefix}{icon} [{color}][{node.task_type.value}][/{color}] {node.command[:50]}...") + + if node.reasoning: + console.print(f"{indent}{' ' if is_last else '│ '}[dim]Reason: {node.reasoning}[/dim]") + + child_indent = indent + (" " if is_last else "│ ") + for j, child in enumerate(node.children): + self._print_node(child, child_indent, j == len(node.children) - 1) + + +@dataclass +class CommandLog: + """Log entry for a single command execution.""" + command: str + purpose: str + timestamp: str + status: CommandStatus + output: str = "" + error: str = "" + duration_seconds: float = 0.0 + useful: bool = True + + def to_dict(self) -> dict[str, Any]: + return { + "command": self.command, + "purpose": self.purpose, + "timestamp": self.timestamp, + "status": self.status.value, + "output": self.output, + "error": self.error, + "duration_seconds": self.duration_seconds, + "useful": self.useful, + } + + @classmethod + def from_dict(cls, data: dict[str, Any]) -> "CommandLog": + return cls( + command=data["command"], + purpose=data["purpose"], + timestamp=data["timestamp"], + status=CommandStatus(data["status"]), + output=data.get("output", ""), + error=data.get("error", ""), + duration_seconds=data.get("duration_seconds", 0.0), + useful=data.get("useful", True), + ) + + +@dataclass +class DoRun: + """Represents a complete do run session.""" + run_id: str + summary: str + mode: RunMode + commands: list[CommandLog] = field(default_factory=list) + started_at: str = "" + completed_at: str = "" + user_query: str = "" + files_accessed: list[str] = field(default_factory=list) + privileges_granted: list[str] = field(default_factory=list) + session_id: str = "" + + def to_dict(self) -> dict[str, Any]: + return { + "run_id": self.run_id, + "summary": self.summary, + "mode": self.mode.value, + "commands": [cmd.to_dict() for cmd in self.commands], + "started_at": self.started_at, + "completed_at": self.completed_at, + "user_query": self.user_query, + "files_accessed": self.files_accessed, + "privileges_granted": self.privileges_granted, + "session_id": self.session_id, + } + + def get_commands_log_string(self) -> str: + """Get all commands as a formatted string for storage.""" + lines = [] + for cmd in self.commands: + lines.append(f"[{cmd.timestamp}] [{cmd.status.value.upper()}] {cmd.command}") + lines.append(f" Purpose: {cmd.purpose}") + if cmd.output: + lines.append(f" Output: {cmd.output[:500]}...") + if cmd.error: + lines.append(f" Error: {cmd.error}") + lines.append(f" Duration: {cmd.duration_seconds:.2f}s | Useful: {cmd.useful}") + lines.append("") + return "\n".join(lines) + diff --git a/cortex/do_runner/terminal.py b/cortex/do_runner/terminal.py new file mode 100644 index 000000000..47593f6eb --- /dev/null +++ b/cortex/do_runner/terminal.py @@ -0,0 +1,2351 @@ +"""Terminal monitoring for the manual execution flow.""" + +import datetime +import json +import os +import re +import subprocess +import threading +import time +from pathlib import Path +from typing import Any, Callable + +from rich.console import Console + +console = Console() + + +class ClaudeLLM: + """Claude LLM client using the LLMRouter for intelligent error analysis.""" + + def __init__(self): + self._router = None + self._available: bool | None = None + + def _get_router(self): + """Lazy initialize the router.""" + if self._router is None: + try: + from cortex.llm_router import LLMRouter, TaskType + self._router = LLMRouter() + self._task_type = TaskType + except Exception: + self._router = False # Mark as failed + return self._router if self._router else None + + def is_available(self) -> bool: + """Check if Claude API is available.""" + if self._available is not None: + return self._available + + router = self._get_router() + self._available = router is not None and router.claude_client is not None + return self._available + + def analyze_error(self, command: str, error_output: str, max_tokens: int = 300) -> dict | None: + """Analyze an error using Claude and return diagnosis with solution.""" + router = self._get_router() + if not router: + return None + + try: + messages = [ + { + "role": "system", + "content": """You are a Linux system debugging expert. Analyze the command error and provide: +1. Root cause (1 sentence) +2. Solution (1-2 specific commands to fix it) + +IMPORTANT: Do NOT suggest commands that require sudo/root privileges, as they cannot be auto-executed. +Only suggest commands that can run as a regular user, such as: +- Checking status (docker ps, systemctl status --user, etc.) +- User-level config fixes +- Environment variable exports +- File operations in user directories + +If the ONLY fix requires sudo, explain what needs to be done but prefix the command with "# MANUAL: " + +Be concise. Output format: +CAUSE: +FIX: +FIX: """ + }, + { + "role": "user", + "content": f"Command: {command}\n\nError:\n{error_output[:500]}" + } + ] + + response = router.complete( + messages=messages, + task_type=self._task_type.ERROR_DEBUGGING, + max_tokens=max_tokens, + temperature=0.3, + ) + + # Parse response + content = response.content + result = { + "cause": "", + "fixes": [], + "raw": content + } + + for line in content.split("\n"): + line = line.strip() + if line.upper().startswith("CAUSE:"): + result["cause"] = line[6:].strip() + elif line.upper().startswith("FIX:"): + fix = line[4:].strip() + if fix and not fix.startswith("#"): + result["fixes"].append(fix) + + return result + + except Exception as e: + console.print(f"[dim]Claude analysis error: {e}[/dim]") + return None + + +class LocalLLM: + """Local LLM client using Ollama with Mistral (fallback).""" + + def __init__(self, model: str = "mistral"): + self.model = model + self._available: bool | None = None + + def is_available(self) -> bool: + """Check if Ollama with the model is available.""" + if self._available is not None: + return self._available + + try: + result = subprocess.run( + ["ollama", "list"], + capture_output=True, text=True, timeout=5 + ) + self._available = result.returncode == 0 and self.model in result.stdout + if not self._available: + # Try to check if ollama is running at least + result = subprocess.run( + ["curl", "-s", "http://localhost:11434/api/tags"], + capture_output=True, text=True, timeout=5 + ) + if result.returncode == 0: + self._available = self.model in result.stdout + except (subprocess.TimeoutExpired, FileNotFoundError, Exception): + self._available = False + + return self._available + + def analyze(self, prompt: str, max_tokens: int = 200, timeout: int = 10) -> str | None: + """Call the local LLM for analysis.""" + if not self.is_available(): + return None + + try: + import urllib.request + import urllib.error + + # Use Ollama API directly via urllib (faster than curl subprocess) + data = json.dumps({ + "model": self.model, + "prompt": prompt, + "stream": False, + "options": { + "num_predict": max_tokens, + "temperature": 0.3, + } + }).encode('utf-8') + + req = urllib.request.Request( + "http://localhost:11434/api/generate", + data=data, + headers={"Content-Type": "application/json"} + ) + + with urllib.request.urlopen(req, timeout=timeout) as response: + result = json.loads(response.read().decode('utf-8')) + return result.get("response", "").strip() + + except (urllib.error.URLError, json.JSONDecodeError, TimeoutError, Exception): + pass + + return None + + +class TerminalMonitor: + """ + Monitors terminal commands for the manual execution flow. + + Monitors ALL terminal sources by default: + - Bash history file (~/.bash_history) + - Zsh history file (~/.zsh_history) + - Fish history file (~/.local/share/fish/fish_history) + - ALL Cursor terminal files (all projects) + - External terminal output files + """ + + def __init__(self, notification_callback: Callable[[str, str], None] | None = None, use_llm: bool = True): + self.notification_callback = notification_callback + self._monitoring = False + self._monitor_thread: threading.Thread | None = None + self._commands_observed: list[dict[str, Any]] = [] + self._lock = threading.Lock() + self._cursor_terminals_dirs: list[Path] = [] + self._expected_commands: list[str] = [] + self._shell_history_files: list[Path] = [] + self._output_buffer: list[dict[str, Any]] = [] # Buffer for terminal output + self._show_live_output = True # Whether to print live output + + # Claude LLM for intelligent error analysis (primary) + self._use_llm = use_llm + self._claude: ClaudeLLM | None = None + self._llm: LocalLLM | None = None # Fallback + if use_llm: + self._claude = ClaudeLLM() + self._llm = LocalLLM(model="mistral") # Keep as fallback + + # Context for LLM + self._session_context: list[str] = [] # Recent commands for context + + # Use existing auto-fix architecture + from cortex.do_runner.diagnosis import ErrorDiagnoser, AutoFixer + self._diagnoser = ErrorDiagnoser() + self._auto_fixer = AutoFixer(llm_callback=self._llm_for_autofix if use_llm else None) + + # Notification manager for desktop notifications + self.notifier = self._create_notifier() + + # Discover all terminal sources + self._discover_terminal_sources() + + def _create_notifier(self): + """Create notification manager for desktop notifications.""" + try: + from cortex.notification_manager import NotificationManager + return NotificationManager() + except ImportError: + return None + + def _llm_for_autofix(self, prompt: str) -> dict: + """LLM callback for the AutoFixer.""" + if not self._llm or not self._llm.is_available(): + return {} + + result = self._llm.analyze(prompt, max_tokens=200, timeout=15) + if result: + return {"response": result, "fix_commands": []} + return {} + + def _discover_terminal_sources(self, verbose: bool = False): + """Discover all available terminal sources to monitor.""" + home = Path.home() + + # Reset lists + self._shell_history_files = [] + self._cursor_terminals_dirs = [] + + # Shell history files + shell_histories = [ + home / ".bash_history", # Bash + home / ".zsh_history", # Zsh + home / ".history", # Generic + home / ".sh_history", # Sh + home / ".local" / "share" / "fish" / "fish_history", # Fish + home / ".ksh_history", # Korn shell + home / ".tcsh_history", # Tcsh + ] + + for hist_file in shell_histories: + if hist_file.exists(): + self._shell_history_files.append(hist_file) + if verbose: + console.print(f"[dim]📝 Monitoring: {hist_file}[/dim]") + + # Find ALL Cursor terminal directories (all projects) + cursor_base = home / ".cursor" / "projects" + if cursor_base.exists(): + for project_dir in cursor_base.iterdir(): + if project_dir.is_dir(): + terminals_path = project_dir / "terminals" + if terminals_path.exists(): + self._cursor_terminals_dirs.append(terminals_path) + if verbose: + console.print(f"[dim]🖥️ Monitoring Cursor terminals: {terminals_path.parent.name}[/dim]") + + # Also check for tmux/screen panes + self._tmux_available = self._check_command_exists("tmux") + self._screen_available = self._check_command_exists("screen") + + if verbose: + if self._tmux_available: + console.print("[dim]📺 Tmux detected - will monitor tmux panes[/dim]") + if self._screen_available: + console.print("[dim]📺 Screen detected - will monitor screen sessions[/dim]") + + def _check_command_exists(self, cmd: str) -> bool: + """Check if a command exists in PATH.""" + import shutil + return shutil.which(cmd) is not None + + def start(self, verbose: bool = True, show_live: bool = True, expected_commands: list[str] | None = None): + """Start monitoring terminal for commands.""" + self.start_monitoring(expected_commands=expected_commands, verbose=verbose, show_live=show_live) + + def _is_service_running(self) -> bool: + """Check if the Cortex Watch systemd service is running.""" + try: + result = subprocess.run( + ["systemctl", "--user", "is-active", "cortex-watch.service"], + capture_output=True, text=True, timeout=3 + ) + return result.stdout.strip() == "active" + except Exception: + return False + + def start_monitoring(self, expected_commands: list[str] | None = None, verbose: bool = True, show_live: bool = True, clear_old_logs: bool = True): + """Start monitoring ALL terminal sources for commands.""" + self._monitoring = True + self._expected_commands = expected_commands or [] + self._show_live_output = show_live + self._output_buffer = [] + self._session_context = [] + + # Mark this terminal as the Cortex terminal so watch hook won't log its commands + os.environ["CORTEX_TERMINAL"] = "1" + + # Record the monitoring start time to filter out old commands + self._monitoring_start_time = datetime.datetime.now() + + # Always clear old watch log to start fresh - this prevents reading old session commands + watch_file = self.get_watch_file_path() + if watch_file.exists(): + # Truncate the file to clear old commands from previous sessions + watch_file.write_text("") + + # Also record starting positions for bash/zsh history files + self._history_start_positions: dict[str, int] = {} + for hist_file in [Path.home() / ".bash_history", Path.home() / ".zsh_history"]: + if hist_file.exists(): + self._history_start_positions[str(hist_file)] = hist_file.stat().st_size + + # Re-discover sources in case new terminals opened + self._discover_terminal_sources(verbose=verbose) + + # Check LLM availability + llm_status = "" + if self._llm and self._use_llm: + if self._llm.is_available(): + llm_status = "\n[green]🤖 AI Analysis: Mistral (local) - Active[/green]" + else: + llm_status = "\n[yellow]🤖 AI Analysis: Mistral not available (install with: ollama pull mistral)[/yellow]" + + if verbose: + from rich.panel import Panel + + watch_file = self.get_watch_file_path() + source_file = Path.home() / ".cortex" / "watch_hook.sh" + + # Check if systemd service is running (best option) + service_running = self._is_service_running() + + # Check if auto-watch is already set up + bashrc = Path.home() / ".bashrc" + hook_installed = False + if bashrc.exists() and "Cortex Terminal Watch Hook" in bashrc.read_text(): + hook_installed = True + + # If service is running, we don't need the hook + if service_running: + setup_info = "[bold green]✓ Cortex Watch Service is running[/bold green]\n" \ + "[dim]All terminal activity is being monitored automatically![/dim]" + else: + # Not using the service, need to set up hooks + if not hook_installed: + # Auto-install the hook to .bashrc + self.setup_auto_watch(permanent=True) + hook_installed = True # Now installed + + # Ensure source file exists + self.setup_auto_watch(permanent=False) + + # Create a super short activation command + short_cmd = f"source {source_file}" + + # Try to copy to clipboard + clipboard_copied = False + try: + # Try xclip first, then xsel + for clip_cmd in [["xclip", "-selection", "clipboard"], ["xsel", "--clipboard", "--input"]]: + try: + proc = subprocess.run(clip_cmd, input=short_cmd.encode(), capture_output=True, timeout=2) + if proc.returncode == 0: + clipboard_copied = True + break + except (FileNotFoundError, subprocess.TimeoutExpired): + continue + except Exception: + pass + + if hook_installed: + clipboard_msg = "[green]📋 Copied to clipboard![/green] " if clipboard_copied else "" + setup_info = "[green]✓ Terminal watch hook is installed in .bashrc[/green]\n" \ + "[dim](New terminals will auto-activate)[/dim]\n\n" \ + f"[bold yellow]For EXISTING terminals, paste this:[/bold yellow]\n" \ + f"[bold cyan]{short_cmd}[/bold cyan]\n" \ + f"{clipboard_msg}\n" \ + "[dim]Or type [/dim][green]cortex watch --install --service[/green][dim] for automatic monitoring![/dim]" + + # Send desktop notification with the command + try: + msg = f"Paste in your OTHER terminal:\n\n{short_cmd}" + if clipboard_copied: + msg += "\n\n(Already copied to clipboard!)" + subprocess.run([ + "notify-send", + "--urgency=critical", + "--icon=dialog-warning", + "--expire-time=15000", + "⚠️ Cortex: Activate Terminal Watching", + msg + ], capture_output=True, timeout=2) + except Exception: + pass + else: + setup_info = f"[bold yellow]⚠ For real-time monitoring in OTHER terminals:[/bold yellow]\n\n" \ + f"[bold cyan]{short_cmd}[/bold cyan]\n\n" \ + "[dim]Or install the watch service: [/dim][green]cortex watch --install --service[/green]" + + console.print() + console.print(Panel( + "[bold cyan]🔍 Terminal Monitoring Active[/bold cyan]\n\n" + f"Watching {len(self._shell_history_files)} shell history files\n" + f"Watching {len(self._cursor_terminals_dirs)} Cursor terminal directories\n" + + ("Watching Tmux panes\n" if self._tmux_available else "") + + llm_status + + "\n\n" + setup_info, + title="[bold green]Live Terminal Monitor[/bold green]", + border_style="green", + )) + console.print() + console.print("[dim]─" * 60 + "[/dim]") + console.print("[bold]📡 Live Terminal Feed:[/bold]") + console.print("[dim]─" * 60 + "[/dim]") + console.print("[dim]Waiting for commands from other terminals...[/dim]") + console.print() + + self._monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True) + self._monitor_thread.start() + + def stop_monitoring(self) -> list[dict[str, Any]]: + """Stop monitoring and return observed commands.""" + self._monitoring = False + if self._monitor_thread: + self._monitor_thread.join(timeout=2) + self._monitor_thread = None + + with self._lock: + result = list(self._commands_observed) + return result + + def stop(self) -> list[dict[str, Any]]: + """Stop monitoring terminal.""" + return self.stop_monitoring() + + def get_observed_commands(self) -> list[dict[str, Any]]: + """Get all observed commands so far.""" + with self._lock: + return list(self._commands_observed) + + def test_monitoring(self): + """Test that monitoring is working by showing what files are being watched.""" + console.print("\n[bold cyan]🔍 Terminal Monitoring Test[/bold cyan]\n") + + # Check shell history files + console.print("[bold]Shell History Files:[/bold]") + for hist_file in self._shell_history_files: + exists = hist_file.exists() + size = hist_file.stat().st_size if exists else 0 + status = "[green]✓[/green]" if exists else "[red]✗[/red]" + console.print(f" {status} {hist_file} ({size} bytes)") + + # Check Cursor terminal directories + console.print("\n[bold]Cursor Terminal Directories:[/bold]") + for terminals_dir in self._cursor_terminals_dirs: + if terminals_dir.exists(): + files = list(terminals_dir.glob("*.txt")) + console.print(f" [green]✓[/green] {terminals_dir} ({len(files)} files)") + for f in files[:5]: # Show first 5 + size = f.stat().st_size + console.print(f" - {f.name} ({size} bytes)") + if len(files) > 5: + console.print(f" ... and {len(files) - 5} more") + else: + console.print(f" [red]✗[/red] {terminals_dir} (not found)") + + # Check tmux + console.print("\n[bold]Other Sources:[/bold]") + console.print(f" Tmux: {'[green]✓ available[/green]' if self._tmux_available else '[dim]not available[/dim]'}") + console.print(f" Screen: {'[green]✓ available[/green]' if self._screen_available else '[dim]not available[/dim]'}") + + console.print("\n[yellow]Tip: For bash history to update in real-time, run in your terminal:[/yellow]") + console.print("[green]export PROMPT_COMMAND='history -a'[/green]") + console.print() + + def inject_test_command(self, command: str, source: str = "test"): + """Inject a test command to verify the display is working.""" + self._process_observed_command(command, source) + + def get_watch_file_path(self) -> Path: + """Get the path to the cortex watch file.""" + return Path.home() / ".cortex" / "terminal_watch.log" + + def setup_terminal_hook(self) -> str: + """Generate a bash command to set up real-time terminal watching. + + Returns the command the user should run in their terminal. + """ + watch_file = self.get_watch_file_path() + watch_file.parent.mkdir(parents=True, exist_ok=True) + + # Create a bash function that logs commands + hook_command = f''' +# Cortex Terminal Hook - paste this in your terminal: +export CORTEX_WATCH_FILE="{watch_file}" +export PROMPT_COMMAND='history -a; echo "$(date +%H:%M:%S) $(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" >> "$CORTEX_WATCH_FILE"' +echo "✓ Cortex is now watching this terminal" +''' + return hook_command.strip() + + def print_setup_instructions(self): + """Print instructions for setting up real-time terminal watching.""" + from rich.panel import Panel + + watch_file = self.get_watch_file_path() + + console.print() + console.print(Panel( + "[bold yellow]⚠ For real-time terminal monitoring, run this in your OTHER terminal:[/bold yellow]\n\n" + f"[green]export PROMPT_COMMAND='history -a; echo \"$(date +%H:%M:%S) $(history 1 | sed \"s/^[ ]*[0-9]*[ ]*//\")\" >> {watch_file}'[/green]\n\n" + "[dim]This makes bash write commands immediately so Cortex can see them.[/dim]", + title="[cyan]Setup Required[/cyan]", + border_style="yellow", + )) + console.print() + + def setup_system_wide_watch(self) -> tuple[bool, str]: + """ + Install the terminal watch hook system-wide in /etc/profile.d/. + + This makes the hook active for ALL users and ALL new terminals automatically. + Requires sudo. + + Returns: + Tuple of (success, message) + """ + import subprocess + + watch_file = self.get_watch_file_path() + profile_script = "/etc/profile.d/cortex-watch.sh" + + # The system-wide hook script + hook_content = f'''#!/bin/bash +# Cortex Terminal Watch Hook - System Wide +# Installed by: cortex do watch --system +# This enables real-time terminal command monitoring for Cortex AI + +# Only run in interactive shells +[[ $- != *i* ]] && return + +# Skip if already set up or if this is the Cortex terminal +[[ -n "$CORTEX_TERMINAL" ]] && return +[[ -n "$__CORTEX_WATCH_ACTIVE" ]] && return +export __CORTEX_WATCH_ACTIVE=1 + +# Watch file location (user-specific) +CORTEX_WATCH_FILE="$HOME/.cortex/terminal_watch.log" +mkdir -p "$HOME/.cortex" 2>/dev/null + +__cortex_last_histnum="" +__cortex_log_cmd() {{ + local histnum="$(history 1 2>/dev/null | awk '{{print $1}}')" + [[ "$histnum" == "$__cortex_last_histnum" ]] && return + __cortex_last_histnum="$histnum" + + local cmd="$(history 1 2>/dev/null | sed "s/^[ ]*[0-9]*[ ]*//")" + [[ -z "${{cmd// /}}" ]] && return + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *"watch_hook"* ]] && return + + echo "$cmd" >> "$CORTEX_WATCH_FILE" 2>/dev/null +}} + +# Add to PROMPT_COMMAND (preserve existing) +if [[ -z "$PROMPT_COMMAND" ]]; then + export PROMPT_COMMAND='history -a; __cortex_log_cmd' +else + export PROMPT_COMMAND="${{PROMPT_COMMAND}}; __cortex_log_cmd" +fi +''' + + try: + # Write to a temp file first + import tempfile + with tempfile.NamedTemporaryFile(mode='w', suffix='.sh', delete=False) as f: + f.write(hook_content) + temp_file = f.name + + # Use sudo to copy to /etc/profile.d/ + result = subprocess.run( + ["sudo", "cp", temp_file, profile_script], + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode != 0: + return False, f"Failed to install: {result.stderr}" + + # Make it executable + subprocess.run( + ["sudo", "chmod", "+x", profile_script], + capture_output=True, + timeout=10 + ) + + # Clean up temp file + Path(temp_file).unlink(missing_ok=True) + + return True, f"✓ Installed system-wide to {profile_script}\n" \ + "All NEW terminals will automatically have Cortex watching enabled.\n" \ + "For current terminals, run: source /etc/profile.d/cortex-watch.sh" + + except subprocess.TimeoutExpired: + return False, "Timeout waiting for sudo" + except Exception as e: + return False, f"Error: {e}" + + def uninstall_system_wide_watch(self) -> tuple[bool, str]: + """Remove the system-wide terminal watch hook.""" + import subprocess + + profile_script = "/etc/profile.d/cortex-watch.sh" + + try: + if not Path(profile_script).exists(): + return True, "System-wide hook not installed" + + result = subprocess.run( + ["sudo", "rm", profile_script], + capture_output=True, + text=True, + timeout=30 + ) + + if result.returncode != 0: + return False, f"Failed to remove: {result.stderr}" + + return True, f"✓ Removed {profile_script}" + + except Exception as e: + return False, f"Error: {e}" + + def is_system_wide_installed(self) -> bool: + """Check if system-wide hook is installed.""" + return Path("/etc/profile.d/cortex-watch.sh").exists() + + def setup_auto_watch(self, permanent: bool = True) -> tuple[bool, str]: + """ + Set up automatic terminal watching for new and existing terminals. + + Args: + permanent: If True, adds the hook to ~/.bashrc for future terminals + + Returns: + Tuple of (success, message) + """ + watch_file = self.get_watch_file_path() + watch_file.parent.mkdir(parents=True, exist_ok=True) + + # The hook command - excludes cortex commands and source commands + # Uses a function to filter out Cortex terminal commands + # Added: tracks last logged command and history number to avoid duplicates + hook_line = f''' +__cortex_last_histnum="" +__cortex_log_cmd() {{ + # Get current history number + local histnum="$(history 1 | awk '{{print $1}}')" + # Skip if same as last logged (prevents duplicate on terminal init) + [[ "$histnum" == "$__cortex_last_histnum" ]] && return + __cortex_last_histnum="$histnum" + + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" + # Skip empty or whitespace-only commands + [[ -z "${{cmd// /}}" ]] && return + # Skip if this is the cortex terminal or cortex-related commands + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *"source"*".cortex"* ]] && return + [[ "$cmd" == *"watch_hook"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + # Include terminal ID (TTY) in the log - format: TTY|COMMAND + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${{tty_name:-unknown}}|$cmd" >> {watch_file} +}} +export PROMPT_COMMAND='history -a; __cortex_log_cmd' +''' + marker = "# Cortex Terminal Watch Hook" + + bashrc = Path.home() / ".bashrc" + zshrc = Path.home() / ".zshrc" + + added_to = [] + + if permanent: + # Add to .bashrc if it exists and doesn't already have the hook + if bashrc.exists(): + content = bashrc.read_text() + if marker not in content: + # Add hook AND a short alias for easy activation + alias_line = f'\nalias cw="source {watch_file.parent}/watch_hook.sh" # Quick Cortex watch activation\n' + with open(bashrc, "a") as f: + f.write(f"\n{marker}\n{hook_line}\n{alias_line}") + added_to.append(".bashrc") + else: + added_to.append(".bashrc (already configured)") + + # Add to .zshrc if it exists + if zshrc.exists(): + content = zshrc.read_text() + if marker not in content: + # Zsh uses precmd instead of PROMPT_COMMAND + # Added tracking to avoid duplicates + zsh_hook = f''' +{marker} +typeset -g __cortex_last_cmd="" +cortex_watch_hook() {{ + local cmd="$(fc -ln -1 | sed 's/^[[:space:]]*//')" + [[ -z "$cmd" ]] && return + [[ "$cmd" == "$__cortex_last_cmd" ]] && return + __cortex_last_cmd="$cmd" + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *".cortex"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + # Include terminal ID (TTY) in the log - format: TTY|COMMAND + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${{tty_name:-unknown}}|$cmd" >> {watch_file} +}} +precmd_functions+=(cortex_watch_hook) +''' + with open(zshrc, "a") as f: + f.write(zsh_hook) + added_to.append(".zshrc") + else: + added_to.append(".zshrc (already configured)") + + # Create a source file for existing terminals + source_file = Path.home() / ".cortex" / "watch_hook.sh" + source_file.write_text(f'''#!/bin/bash +{marker} +{hook_line} +echo "✓ Cortex is now watching this terminal" +''') + source_file.chmod(0o755) + source_file.chmod(0o755) + + if added_to: + msg = f"Added to: {', '.join(added_to)}\n" + msg += f"For existing terminals, run: source {source_file}" + return True, msg + else: + return True, f"Source file created: {source_file}\nRun: source {source_file}" + + def remove_auto_watch(self) -> tuple[bool, str]: + """Remove the automatic terminal watching hook from shell configs.""" + marker = "# Cortex Terminal Watch Hook" + removed_from = [] + + for rc_file in [Path.home() / ".bashrc", Path.home() / ".zshrc"]: + if rc_file.exists(): + content = rc_file.read_text() + if marker in content: + # Remove the hook section + lines = content.split("\n") + new_lines = [] + skip_until_blank = False + + for line in lines: + if marker in line: + skip_until_blank = True + continue + if skip_until_blank: + if line.strip() == "" or line.startswith("export PROMPT") or line.startswith("cortex_watch") or line.startswith("precmd_functions"): + continue + if line.startswith("}"): + continue + skip_until_blank = False + new_lines.append(line) + + rc_file.write_text("\n".join(new_lines)) + removed_from.append(rc_file.name) + + # Remove source file + source_file = Path.home() / ".cortex" / "watch_hook.sh" + if source_file.exists(): + source_file.unlink() + removed_from.append("watch_hook.sh") + + if removed_from: + return True, f"Removed from: {', '.join(removed_from)}" + return True, "No hooks found to remove" + + def broadcast_hook_to_terminals(self) -> int: + """ + Attempt to set up the hook in all running bash terminals. + Uses various methods to inject the hook. + + Returns the number of terminals that were set up. + """ + watch_file = self.get_watch_file_path() + hook_cmd = f'export PROMPT_COMMAND=\'history -a; echo "$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" >> {watch_file}\'' + + count = 0 + + # Method 1: Write to all pts devices (requires proper permissions) + try: + pts_dir = Path("/dev/pts") + if pts_dir.exists(): + for pts in pts_dir.iterdir(): + if pts.name.isdigit(): + try: + # This usually requires the same user + with open(pts, "w") as f: + f.write(f"\n# Cortex: Setting up terminal watch...\n") + f.write(f"source ~/.cortex/watch_hook.sh\n") + count += 1 + except (PermissionError, OSError): + pass + except Exception: + pass + + return count + + def _monitor_loop(self): + """Monitor loop that watches ALL terminal sources for activity.""" + file_positions: dict[str, int] = {} + last_check_time: dict[str, float] = {} + + # Cortex watch file (real-time if user sets up the hook) + watch_file = self.get_watch_file_path() + + # Ensure watch file directory exists + watch_file.parent.mkdir(parents=True, exist_ok=True) + + # Initialize positions for all shell history files - start at END to only see NEW commands + for hist_file in self._shell_history_files: + if hist_file.exists(): + try: + file_positions[str(hist_file)] = hist_file.stat().st_size + last_check_time[str(hist_file)] = time.time() + except OSError: + pass + + # Initialize watch file position - ALWAYS start from END of existing content + # This ensures we only see commands written AFTER monitoring starts + if watch_file.exists(): + try: + # Start from current end position (skip ALL existing content) + file_positions[str(watch_file)] = watch_file.stat().st_size + except OSError: + file_positions[str(watch_file)] = 0 + else: + # File doesn't exist yet - will be created, start from 0 + file_positions[str(watch_file)] = 0 + + # Initialize positions for all Cursor terminal files + for terminals_dir in self._cursor_terminals_dirs: + if terminals_dir.exists(): + for term_file in terminals_dir.glob("*.txt"): + try: + file_positions[str(term_file)] = term_file.stat().st_size + except OSError: + pass + # Also check for ext-*.txt files (external terminals) + for term_file in terminals_dir.glob("ext-*.txt"): + try: + file_positions[str(term_file)] = term_file.stat().st_size + except OSError: + pass + + check_count = 0 + while self._monitoring: + time.sleep(0.2) # Check very frequently (5 times per second) + check_count += 1 + + # Check Cortex watch file FIRST (this is the real-time one) + if watch_file.exists(): + self._check_watch_file(watch_file, file_positions) + + # Check all shell history files + for hist_file in self._shell_history_files: + if hist_file.exists(): + shell_name = hist_file.stem.replace("_history", "").replace(".", "") + self._check_file_for_new_commands( + hist_file, file_positions, source=f"{shell_name}_history" + ) + + # Check ALL Cursor terminal directories (these update in real-time!) + for terminals_dir in self._cursor_terminals_dirs: + if terminals_dir.exists(): + project_name = terminals_dir.parent.name + + # IDE terminals - check ALL txt files + for term_file in terminals_dir.glob("*.txt"): + if not term_file.name.startswith("ext-"): + self._check_file_for_new_commands( + term_file, file_positions, + source=f"cursor:{project_name}:{term_file.stem}" + ) + + # External terminals (iTerm, gnome-terminal, etc.) + for term_file in terminals_dir.glob("ext-*.txt"): + self._check_file_for_new_commands( + term_file, file_positions, + source=f"external:{project_name}:{term_file.stem}" + ) + + # Check tmux panes if available (every 5 checks = 1 second) + if self._tmux_available and check_count % 5 == 0: + self._check_tmux_panes() + + # Periodically show we're still monitoring (every 30 seconds) + if check_count % 150 == 0 and self._show_live_output: + console.print(f"[dim]... still monitoring ({len(self._commands_observed)} commands observed so far)[/dim]") + + def _is_cortex_terminal_command(self, command: str) -> bool: + """Check if a command is from the Cortex terminal itself (should be ignored). + + This should be very conservative - only filter out commands that are + DEFINITELY from Cortex's own terminal, not user commands. + """ + cmd_lower = command.lower().strip() + + # Only filter out commands that are clearly from Cortex terminal + cortex_patterns = [ + "cortex ask", + "cortex watch", + "cortex do ", + "cortex info", + "source ~/.cortex/watch_hook", # Setting up the watch hook + ".cortex/watch_hook", + ] + + for pattern in cortex_patterns: + if pattern in cmd_lower: + return True + + # Check if command starts with "cortex " (the CLI) + if cmd_lower.startswith("cortex "): + return True + + # Don't filter out general commands - let them through! + return False + + def _check_watch_file(self, watch_file: Path, positions: dict[str, int]): + """Check the Cortex watch file for new commands (real-time).""" + try: + current_size = watch_file.stat().st_size + key = str(watch_file) + + # Initialize position if not set + # Start from 0 because we clear the file when monitoring starts + # This ensures we capture all commands written after monitoring begins + if key not in positions: + positions[key] = 0 # Start from beginning since file was cleared + + # If file is smaller than our position (was truncated), reset + if current_size < positions[key]: + positions[key] = 0 + + if current_size > positions[key]: + with open(watch_file) as f: + f.seek(positions[key]) + new_content = f.read() + + # Parse watch file - each line is a command + for line in new_content.split("\n"): + line = line.strip() + if not line: + continue + + # Skip very short lines or common noise + if len(line) < 2: + continue + + # Skip if we've already seen this exact command recently + if hasattr(self, '_recent_watch_commands'): + if line in self._recent_watch_commands: + continue + else: + self._recent_watch_commands = [] + + # Keep track of recent commands to avoid duplicates + self._recent_watch_commands.append(line) + if len(self._recent_watch_commands) > 20: + self._recent_watch_commands.pop(0) + + # Handle format with timestamp: "HH:MM:SS command" + if re.match(r'^\d{2}:\d{2}:\d{2}\s+', line): + parts = line.split(" ", 1) + if len(parts) == 2 and parts[1].strip(): + self._process_observed_command(parts[1].strip(), "live_terminal") + else: + # Plain command + self._process_observed_command(line, "live_terminal") + + positions[key] = current_size + + except OSError: + pass + + def _check_tmux_panes(self): + """Check tmux panes for recent commands.""" + import subprocess + try: + # Get list of tmux sessions + result = subprocess.run( + ["tmux", "list-panes", "-a", "-F", "#{pane_id}:#{pane_current_command}"], + capture_output=True, text=True, timeout=1 + ) + if result.returncode == 0: + for line in result.stdout.strip().split("\n"): + if ":" in line: + pane_id, cmd = line.split(":", 1) + if cmd and cmd not in ["bash", "zsh", "fish", "sh"]: + self._process_observed_command(cmd, source=f"tmux:{pane_id}") + except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): + pass + + def _check_file_for_new_commands( + self, + file_path: Path, + positions: dict[str, int], + source: str, + ): + """Check a file for new commands and process them.""" + try: + current_size = file_path.stat().st_size + key = str(file_path) + + if key not in positions: + positions[key] = current_size + return + + if current_size > positions[key]: + with open(file_path) as f: + f.seek(positions[key]) + new_content = f.read() + + # For Cursor terminals, also extract output + if "cursor" in source or "external" in source: + self._process_terminal_content(new_content, source) + else: + new_commands = self._extract_commands_from_content(new_content, source) + for cmd in new_commands: + self._process_observed_command(cmd, source) + + positions[key] = current_size + + except OSError: + pass + + def _process_terminal_content(self, content: str, source: str): + """Process terminal content including commands and their output.""" + lines = content.split("\n") + current_command = None + output_lines = [] + + for line in lines: + line_stripped = line.strip() + if not line_stripped: + continue + + # Check if this is a command line (has prompt) + is_command = False + for pattern in [ + r"^\$ (.+)$", + r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+\$ (.+)$", + r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+# (.+)$", + r"^\(.*\)\s*\$ (.+)$", + ]: + match = re.match(pattern, line_stripped) + if match: + # Save previous command with its output + if current_command: + self._process_observed_command_with_output( + current_command, "\n".join(output_lines), source + ) + + current_command = match.group(1).strip() + output_lines = [] + is_command = True + break + + if not is_command and current_command: + # This is output from the current command + output_lines.append(line_stripped) + + # Process the last command + if current_command: + self._process_observed_command_with_output( + current_command, "\n".join(output_lines), source + ) + + def _process_observed_command_with_output(self, command: str, output: str, source: str): + """Process a command with its output for better feedback.""" + # First process the command normally + self._process_observed_command(command, source) + + if not self._show_live_output: + return + + # Then show relevant output if there is any + if output and len(output) > 5: + # Check for errors in output + error_patterns = [ + (r"error:", "Error detected"), + (r"Error:", "Error detected"), + (r"ERROR", "Error detected"), + (r"failed", "Operation failed"), + (r"Failed", "Operation failed"), + (r"permission denied", "Permission denied"), + (r"Permission denied", "Permission denied"), + (r"not found", "Not found"), + (r"No such file", "File not found"), + (r"command not found", "Command not found"), + (r"Cannot connect", "Connection failed"), + (r"Connection refused", "Connection refused"), + (r"Unable to", "Operation failed"), + (r"denied", "Access denied"), + (r"Denied", "Access denied"), + (r"timed out", "Timeout"), + (r"timeout", "Timeout"), + (r"fatal:", "Fatal error"), + (r"FATAL", "Fatal error"), + (r"panic", "Panic"), + (r"segfault", "Crash"), + (r"Segmentation fault", "Crash"), + (r"killed", "Process killed"), + (r"Killed", "Process killed"), + (r"cannot", "Cannot complete"), + (r"Could not", "Could not complete"), + (r"Invalid", "Invalid input"), + (r"Conflict", "Conflict detected"), + (r"\[emerg\]", "Config error"), + (r"\[error\]", "Error"), + (r"\[crit\]", "Critical error"), + (r"\[alert\]", "Alert"), + (r"syntax error", "Syntax error"), + (r"unknown directive", "Unknown directive"), + (r"unexpected", "Unexpected error"), + ] + + for pattern, msg in error_patterns: + if re.search(pattern, output, re.IGNORECASE): + # Show error in bordered panel + from rich.panel import Panel + from rich.text import Text + + output_preview = output[:200] + "..." if len(output) > 200 else output + + error_text = Text() + error_text.append(f"✗ {msg}\n\n", style="bold red") + for line in output_preview.split('\n')[:3]: + if line.strip(): + error_text.append(f" {line.strip()[:80]}\n", style="dim") + + console.print() + console.print(Panel( + error_text, + title="[red bold]Error[/red bold]", + border_style="red", + padding=(0, 1), + )) + + # Get AI-powered help + self._provide_error_help(command, output) + break + else: + # Show success indicator for commands that completed + if "✓" in output or "success" in output.lower() or "complete" in output.lower(): + console.print(f"[green] ✓ Command completed successfully[/green]") + elif len(output.strip()) > 0: + # Show a preview of the output + output_lines = [l for l in output.split("\n") if l.strip()][:3] + if output_lines: + console.print(f"[dim] Output: {output_lines[0][:60]}{'...' if len(output_lines[0]) > 60 else ''}[/dim]") + + def _provide_error_help(self, command: str, output: str): + """Provide contextual help for errors using Claude LLM and send solutions via notifications.""" + from rich.panel import Panel + from rich.table import Table + import subprocess + + console.print() + + # First, try Claude for intelligent analysis + claude_analysis = None + if self._claude and self._use_llm and self._claude.is_available(): + claude_analysis = self._claude.analyze_error(command, output) + + # Also use the existing ErrorDiagnoser for pattern-based analysis + diagnosis = self._diagnoser.diagnose_error(command, output) + + error_type = diagnosis.get("error_type", "unknown") + category = diagnosis.get("category", "unknown") + description = diagnosis.get("description", output[:200]) + fix_commands = diagnosis.get("fix_commands", []) + can_auto_fix = diagnosis.get("can_auto_fix", False) + fix_strategy = diagnosis.get("fix_strategy", "") + extracted_info = diagnosis.get("extracted_info", {}) + + # If Claude provided analysis, use it to enhance diagnosis + if claude_analysis: + cause = claude_analysis.get("cause", "") + claude_fixes = claude_analysis.get("fixes", []) + + # Show Claude's analysis in bordered panel + if cause or claude_fixes: + from rich.panel import Panel + from rich.text import Text + + analysis_text = Text() + if cause: + analysis_text.append("Cause: ", style="bold cyan") + analysis_text.append(f"{cause}\n\n", style="white") + if claude_fixes: + analysis_text.append("Solution:\n", style="bold green") + for fix in claude_fixes[:3]: + analysis_text.append(f" $ {fix}\n", style="green") + + console.print() + console.print(Panel( + analysis_text, + title="[cyan bold]🤖 Claude Analysis[/cyan bold]", + border_style="cyan", + padding=(0, 1), + )) + + # Send notification with Claude's solution + if cause or claude_fixes: + notif_title = f"🔧 Cortex: {error_type if error_type != 'unknown' else 'Error'}" + notif_body = cause[:100] if cause else description[:100] + if claude_fixes: + notif_body += f"\n\nFix: {claude_fixes[0]}" + self._send_solution_notification(notif_title, notif_body) + + # Use Claude's fixes if pattern-based analysis didn't find any + if not fix_commands and claude_fixes: + fix_commands = claude_fixes + can_auto_fix = True + + # Show diagnosis in panel (only if no Claude analysis) + if not claude_analysis: + from rich.panel import Panel + from rich.text import Text + from rich.table import Table + + diag_table = Table(show_header=False, box=None, padding=(0, 1)) + diag_table.add_column("Key", style="dim") + diag_table.add_column("Value", style="bold") + + diag_table.add_row("Type", error_type) + diag_table.add_row("Category", category) + if can_auto_fix: + diag_table.add_row("Auto-Fix", f"[green]● Yes[/green] [dim]({fix_strategy})[/dim]" if fix_strategy else "[green]● Yes[/green]") + else: + diag_table.add_row("Auto-Fix", "[red]○ No[/red]") + + console.print() + console.print(Panel( + diag_table, + title="[yellow bold]Diagnosis[/yellow bold]", + border_style="yellow", + padding=(0, 1), + )) + + # If auto-fix is possible, attempt to run the fix commands + if can_auto_fix and fix_commands: + actionable_commands = [c for c in fix_commands if not c.startswith("#")] + + if actionable_commands: + # Auto-fix with progress bar + from rich.panel import Panel + from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn + + console.print() + console.print(Panel( + f"[bold]Running {len(actionable_commands)} fix command(s)...[/bold]", + title="[green bold]🔧 Auto-Fix[/green bold]", + border_style="green", + padding=(0, 1), + )) + + # Send notification that we're fixing the command + self._notify_fixing_command(command, actionable_commands[0]) + + # Run the fix commands + fix_success = self._run_auto_fix_commands(actionable_commands, command, error_type) + + if fix_success: + # Success in bordered panel + from rich.panel import Panel + console.print() + console.print(Panel( + f"[green]✓[/green] Auto-fix completed!\n\n[dim]Retry:[/dim] [cyan]{command}[/cyan]", + title="[green bold]Success[/green bold]", + border_style="green", + padding=(0, 1), + )) + + # Send success notification + self._send_fix_success_notification(command, error_type) + else: + pass # Sudo commands shown separately + + console.print() + return + + # Show fix commands in bordered panel if we can't auto-fix + if fix_commands and not claude_analysis: + from rich.panel import Panel + from rich.text import Text + + fix_text = Text() + for cmd in fix_commands[:3]: + if not cmd.startswith("#"): + fix_text.append(f" $ {cmd}\n", style="green") + + console.print() + console.print(Panel( + fix_text, + title="[bold]Manual Fix[/bold]", + border_style="blue", + padding=(0, 1), + )) + + # If error is unknown and no Claude, use local LLM + if error_type == "unknown" and not claude_analysis and self._llm and self._use_llm and self._llm.is_available(): + llm_help = self._llm_analyze_error(command, output) + if llm_help: + console.print() + console.print(f"[dim]{llm_help}[/dim]") + + # Try to extract fix command from LLM response + llm_fix = self._extract_fix_from_llm(llm_help) + if llm_fix: + console.print() + console.print(f"[bold green]💡 AI Suggested Fix:[/bold green] [cyan]{llm_fix}[/cyan]") + + # Attempt to run the LLM suggested fix + if self._is_safe_fix_command(llm_fix): + console.print("[dim]Attempting AI-suggested fix...[/dim]") + self._run_auto_fix_commands([llm_fix], command, "ai_suggested") + + # Build notification message + notification_msg = "" + if fix_commands: + actionable = [c for c in fix_commands if not c.startswith("#")] + if actionable: + notification_msg = f"Manual fix needed: {actionable[0][:50]}" + else: + notification_msg = description[:100] + else: + notification_msg = description[:100] + + # Send desktop notification + self._send_error_notification(command, notification_msg, error_type, can_auto_fix) + + console.print() + + def _run_auto_fix_commands(self, commands: list[str], original_command: str, error_type: str) -> bool: + """Run auto-fix commands with progress bar and return True if successful.""" + import subprocess + from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn + from rich.panel import Panel + from rich.table import Table + + all_success = True + sudo_commands_pending = [] + results = [] + + # Break down && commands into individual commands + expanded_commands = [] + for cmd in commands[:3]: + if cmd.startswith("#"): + continue + # Split by && but preserve the individual commands + if " && " in cmd: + parts = [p.strip() for p in cmd.split(" && ") if p.strip()] + expanded_commands.extend(parts) + else: + expanded_commands.append(cmd) + + actionable = expanded_commands + + # Show each command being run with Rich Status (no raw ANSI codes) + from rich.status import Status + + for i, fix_cmd in enumerate(actionable, 1): + # Check if this needs sudo + needs_sudo = fix_cmd.strip().startswith("sudo ") + + if needs_sudo: + try: + check_sudo = subprocess.run( + ["sudo", "-n", "true"], + capture_output=True, + timeout=5 + ) + + if check_sudo.returncode != 0: + sudo_commands_pending.append(fix_cmd) + results.append((fix_cmd, "sudo", None)) + console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {fix_cmd[:55]}... [dim](needs sudo)[/dim]") + continue + except Exception: + sudo_commands_pending.append(fix_cmd) + results.append((fix_cmd, "sudo", None)) + console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {fix_cmd[:55]}... [dim](needs sudo)[/dim]") + continue + + # Run command with status spinner + cmd_display = fix_cmd[:55] + "..." if len(fix_cmd) > 55 else fix_cmd + + try: + with Status(f"[cyan]{cmd_display}[/cyan]", console=console, spinner="dots"): + result = subprocess.run( + fix_cmd, + shell=True, + capture_output=True, + text=True, + timeout=60 + ) + + if result.returncode == 0: + results.append((fix_cmd, "success", None)) + console.print(f" [dim][{i}/{len(actionable)}][/dim] [green]✓[/green] {cmd_display}") + else: + if "password" in (result.stderr or "").lower() or "terminal is required" in (result.stderr or "").lower(): + sudo_commands_pending.append(fix_cmd) + results.append((fix_cmd, "sudo", None)) + console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {cmd_display} [dim](needs sudo)[/dim]") + else: + results.append((fix_cmd, "failed", result.stderr[:60] if result.stderr else "failed")) + all_success = False + console.print(f" [dim][{i}/{len(actionable)}][/dim] [red]✗[/red] {cmd_display}") + console.print(f" [dim red]{result.stderr[:80] if result.stderr else 'Command failed'}[/dim red]") + break + + except subprocess.TimeoutExpired: + results.append((fix_cmd, "timeout", None)) + all_success = False + console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]⏱[/yellow] {cmd_display} [dim](timeout)[/dim]") + break + except Exception as e: + results.append((fix_cmd, "error", str(e)[:50])) + all_success = False + console.print(f" [dim][{i}/{len(actionable)}][/dim] [red]✗[/red] {cmd_display}") + break + + # Show summary line + success_count = sum(1 for _, s, _ in results if s == "success") + if success_count > 0 and success_count == len([r for r in results if r[1] != "sudo"]): + console.print(f"\n [green]✓ All {success_count} command(s) completed[/green]") + + # Show sudo commands in bordered panel + if sudo_commands_pending: + from rich.panel import Panel + from rich.text import Text + + sudo_text = Text() + sudo_text.append("Run these commands manually:\n\n", style="dim") + for cmd in sudo_commands_pending: + sudo_text.append(f" $ {cmd}\n", style="green") + + console.print() + console.print(Panel( + sudo_text, + title="[yellow bold]🔐 Sudo Required[/yellow bold]", + border_style="yellow", + padding=(0, 1), + )) + + # Send notification about pending sudo commands + self._send_sudo_pending_notification(sudo_commands_pending) + + # Still consider it a partial success if we need manual sudo + return len(sudo_commands_pending) < len([c for c in commands if not c.startswith("#")]) + + return all_success + + def _send_sudo_pending_notification(self, commands: list[str]): + """Send notification about pending sudo commands.""" + try: + import subprocess + + cmd_preview = commands[0][:40] + "..." if len(commands[0]) > 40 else commands[0] + + subprocess.run([ + "notify-send", + "--urgency=normal", + "--icon=dialog-password", + "🔐 Cortex: Sudo required", + f"Run in your terminal:\n{cmd_preview}" + ], capture_output=True, timeout=2) + + except Exception: + pass + + def _extract_fix_from_llm(self, llm_response: str) -> str | None: + """Extract a fix command from LLM response.""" + import re + + # Look for commands in common formats + patterns = [ + r'`([^`]+)`', # Backtick enclosed + r'^\$ (.+)$', # Shell prompt format + r'^sudo (.+)$', # Sudo commands + r'run[:\s]+([^\n]+)', # "run: command" format + r'try[:\s]+([^\n]+)', # "try: command" format + ] + + for pattern in patterns: + matches = re.findall(pattern, llm_response, re.MULTILINE | re.IGNORECASE) + for match in matches: + cmd = match.strip() + if cmd and len(cmd) > 3 and self._is_safe_fix_command(cmd): + return cmd + + return None + + def _is_safe_fix_command(self, command: str) -> bool: + """Check if a fix command is safe to run automatically.""" + cmd_lower = command.lower().strip() + + # Dangerous commands we should never auto-run + dangerous_patterns = [ + "rm -rf /", + "rm -rf ~", + "rm -rf *", + "> /dev/", + "mkfs", + "dd if=", + "chmod -R 777 /", + "chmod 777 /", + ":(){:|:&};:", # Fork bomb + "wget|sh", + "curl|sh", + "curl|bash", + "wget|bash", + ] + + for pattern in dangerous_patterns: + if pattern in cmd_lower: + return False + + # Safe fix command patterns + safe_patterns = [ + "sudo systemctl", + "sudo service", + "sudo apt", + "sudo apt-get", + "apt-cache", + "systemctl status", + "sudo nginx -t", + "sudo nginx -s reload", + "docker start", + "docker restart", + "pip install", + "npm install", + "sudo chmod", + "sudo chown", + "mkdir -p", + "touch", + ] + + for pattern in safe_patterns: + if cmd_lower.startswith(pattern): + return True + + # Allow sudo commands for common safe operations + if cmd_lower.startswith("sudo "): + rest = cmd_lower[5:].strip() + safe_sudo = ["systemctl", "service", "apt", "apt-get", "nginx", "chmod", "chown", "mkdir"] + if any(rest.startswith(s) for s in safe_sudo): + return True + + return False + + def _send_fix_success_notification(self, command: str, error_type: str): + """Send a desktop notification that the fix was successful.""" + try: + import subprocess + + cmd_short = command[:30] + "..." if len(command) > 30 else command + + subprocess.run([ + "notify-send", + "--urgency=normal", + "--icon=dialog-information", + f"✅ Cortex: Fixed {error_type}", + f"Auto-fix successful! You can now retry:\n{cmd_short}" + ], capture_output=True, timeout=2) + + except Exception: + pass + + def _send_solution_notification(self, title: str, body: str): + """Send a desktop notification with the solution from Claude.""" + try: + import subprocess + + # Use notify-send with high priority + subprocess.run([ + "notify-send", + "--urgency=critical", + "--icon=dialog-information", + "--expire-time=15000", # 15 seconds + title, + body + ], capture_output=True, timeout=2) + + except Exception: + pass + + def _send_error_notification(self, command: str, solution: str, error_type: str = "", can_auto_fix: bool = False): + """Send a desktop notification with the error solution.""" + try: + # Try to use notify-send (standard on Ubuntu) + import subprocess + + # Truncate for notification + cmd_short = command[:30] + "..." if len(command) > 30 else command + solution_short = solution[:150] + "..." if len(solution) > 150 else solution + + # Build title with error type + if error_type and error_type != "unknown": + title = f"🔧 Cortex: {error_type}" + else: + title = f"🔧 Cortex: Error detected" + + # Add auto-fix indicator + if can_auto_fix: + body = f"✓ Auto-fixable\n\n{solution_short}" + icon = "dialog-information" + else: + body = solution_short + icon = "dialog-warning" + + # Send notification + subprocess.run([ + "notify-send", + "--urgency=normal", + f"--icon={icon}", + title, + body + ], capture_output=True, timeout=2) + + except (FileNotFoundError, subprocess.TimeoutExpired, Exception): + # notify-send not available or failed, try callback + if self.notification_callback: + self.notification_callback(f"Error in: {command[:30]}", solution[:100]) + + def _llm_analyze_error(self, command: str, error_output: str) -> str | None: + """Use local LLM to analyze an error and provide a fix.""" + if not self._llm: + return None + + # Build context from recent commands + context = "" + if self._session_context: + context = "Recent commands:\n" + "\n".join(self._session_context[-5:]) + "\n\n" + + prompt = f"""You are a Linux expert. A user ran a command and got an error. +Provide a brief, actionable fix (2-3 sentences max). + +IMPORTANT: Do NOT suggest sudo commands - they cannot be auto-executed. +Only suggest non-sudo commands. If sudo is required, say "requires manual sudo" instead. + +{context}Command: {command} + +Error output: +{error_output[:500]} + +Fix (be specific, give the exact non-sudo command to run):""" + + try: + result = self._llm.analyze(prompt, max_tokens=150, timeout=10) + if result: + return result.strip() + except Exception: + pass + + return None + + def analyze_session_intent(self) -> str | None: + """Use LLM to analyze what the user is trying to accomplish based on their commands.""" + if not self._llm or not self._llm.is_available(): + return None + + if len(self._session_context) < 2: + return None + + prompt = f"""Based on these terminal commands, what is the user trying to accomplish? +Give a brief summary (1 sentence max). + +Commands: +{chr(10).join(self._session_context[-5:])} + +The user is trying to:""" + + try: + result = self._llm.analyze(prompt, max_tokens=50, timeout=15) + if result: + result = result.strip() + # Take only first sentence + if ". " in result: + result = result.split(". ")[0] + "." + return result + except Exception: + pass + + return None + + def get_next_step_suggestion(self) -> str | None: + """Use LLM to suggest the next logical step based on recent commands.""" + if not self._llm or not self._llm.is_available(): + return None + + if len(self._session_context) < 1: + return None + + prompt = f"""Based on these terminal commands, what single command should the user run next? +Respond with ONLY the command, nothing else. + +Recent commands: +{chr(10).join(self._session_context[-5:])} + +Next command:""" + + try: + result = self._llm.analyze(prompt, max_tokens=30, timeout=15) + if result: + # Clean up - extract just the command + result = result.strip() + # Remove common prefixes + for prefix in ["$", "Run:", "Try:", "Next:", "Command:", "`"]: + if result.lower().startswith(prefix.lower()): + result = result[len(prefix):].strip() + result = result.rstrip("`") + return result.split("\n")[0].strip() + except Exception: + pass + + return None + + def get_collected_context(self) -> str: + """Get a formatted summary of all collected terminal context.""" + with self._lock: + if not self._commands_observed: + return "No commands observed yet." + + lines = ["[bold]📋 Collected Terminal Context:[/bold]", ""] + + for i, obs in enumerate(self._commands_observed, 1): + timestamp = obs.get("timestamp", "")[:19] + source = obs.get("source", "unknown") + command = obs.get("command", "") + + lines.append(f"{i}. [{timestamp}] ({source})") + lines.append(f" $ {command}") + lines.append("") + + return "\n".join(lines) + + def print_collected_context(self): + """Print a summary of all collected terminal context with AI analysis.""" + from rich.panel import Panel + + with self._lock: + if not self._commands_observed: + console.print("[dim]No commands observed yet.[/dim]") + return + + console.print() + console.print(Panel( + f"[bold]Collected {len(self._commands_observed)} command(s) from other terminals[/bold]", + title="[cyan]📋 Terminal Context Summary[/cyan]", + border_style="cyan", + )) + + for i, obs in enumerate(self._commands_observed[-10:], 1): # Show last 10 + timestamp = obs.get("timestamp", "")[:19].split("T")[-1] if "T" in obs.get("timestamp", "") else obs.get("timestamp", "")[:8] + source = obs.get("source", "unknown") + command = obs.get("command", "") + + # Shorten source name + if ":" in source: + source = source.split(":")[-1] + + console.print(f" [dim]{timestamp}[/dim] [cyan]{source:12}[/cyan] [white]{command[:50]}{'...' if len(command) > 50 else ''}[/white]") + + if len(self._commands_observed) > 10: + console.print(f" [dim]... and {len(self._commands_observed) - 10} more commands[/dim]") + + # Add AI analysis if available + if self._llm and self._use_llm and self._llm.is_available() and len(self._session_context) >= 2: + console.print() + console.print("[bold magenta]🤖 AI Analysis:[/bold magenta]") + + # Analyze intent + intent = self.analyze_session_intent() + if intent: + console.print(f"[white] Intent: {intent}[/white]") + + # Suggest next step + next_step = self.get_next_step_suggestion() + if next_step: + console.print(f"[green] Suggested next: {next_step}[/green]") + + console.print() + + def _extract_commands_from_content(self, content: str, source: str) -> list[str]: + """Extract commands from terminal content based on source type.""" + commands = [] + + # Shell history files - each line is a command + if "_history" in source or "history" in source: + for line in content.strip().split("\n"): + line = line.strip() + if not line: + continue + # Skip timestamps in zsh extended history format + if line.startswith(":"): + # Format: : timestamp:0;command + if ";" in line: + cmd = line.split(";", 1)[1] + if cmd: + commands.append(cmd) + # Skip fish history format markers + elif line.startswith("- cmd:"): + cmd = line[6:].strip() + if cmd: + commands.append(cmd) + elif not line.startswith("when:"): + commands.append(line) + else: + # Terminal output - look for command prompts + for line in content.split("\n"): + line = line.strip() + if not line: + continue + + # Various prompt patterns + prompt_patterns = [ + r"^\$ (.+)$", # Simple $ prompt + r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+\$ (.+)$", # user@host:path$ cmd + r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+# (.+)$", # root prompt + r"^>>> (.+)$", # Python REPL + r"^\(.*\)\s*\$ (.+)$", # (venv) $ cmd + r"^➜\s+.+\s+(.+)$", # Oh-my-zsh prompt + r"^❯ (.+)$", # Starship prompt + r"^▶ (.+)$", # Another prompt style + r"^\[.*\]\$ (.+)$", # [dir]$ cmd + r"^% (.+)$", # % prompt (zsh default) + ] + + for pattern in prompt_patterns: + match = re.match(pattern, line) + if match: + cmd = match.group(1).strip() + if cmd: + commands.append(cmd) + break + + return commands + + def _process_observed_command(self, command: str, source: str = "unknown"): + """Process an observed command and notify about issues with real-time feedback.""" + # Skip empty or very short commands + if not command or len(command.strip()) < 2: + return + + command = command.strip() + + # Skip commands from the Cortex terminal itself + if self._is_cortex_terminal_command(command): + return + + # Skip common shell built-ins that aren't interesting (only if standalone) + skip_commands = ["cd", "ls", "pwd", "clear", "exit", "history", "fg", "bg", "jobs", "alias"] + parts = command.split() + cmd_base = parts[0] if parts else "" + + # Also handle sudo prefix + if cmd_base == "sudo" and len(parts) > 1: + cmd_base = parts[1] + + # Only skip if it's JUST the command with no args + if cmd_base in skip_commands and len(parts) == 1: + return + + # Skip if it looks like a partial command or just an argument + if not any(c.isalpha() for c in cmd_base): + return + + # Avoid duplicates within short time window + with self._lock: + recent = [c for c in self._commands_observed + if c["command"] == command + and (datetime.datetime.now() - datetime.datetime.fromisoformat(c["timestamp"])).seconds < 5] + if recent: + return + + self._commands_observed.append({ + "command": command, + "timestamp": datetime.datetime.now().isoformat(), + "source": source, + "has_error": False, # Will be updated if error is detected + "status": "pending", # pending, success, failed + }) + + # Add to session context for LLM + self._session_context.append(f"$ {command}") + # Keep only last 10 commands for context + if len(self._session_context) > 10: + self._session_context = self._session_context[-10:] + + # Real-time feedback with visual emphasis + self._show_realtime_feedback(command, source) + + # For live terminal commands, proactively check the result + if source == "live_terminal": + self._check_command_result(command) + + # Check for issues and provide help + issues = self._check_command_issues(command) + if issues: + from rich.panel import Panel + console.print(Panel( + f"[bold yellow]⚠ Issue:[/bold yellow] {issues}", + border_style="yellow", + padding=(0, 1), + expand=False, + )) + if self.notification_callback: + self.notification_callback(f"Cortex: Issue detected", issues) + + # Check if command matches expected commands + if self._expected_commands: + matched = self._check_command_match(command) + from rich.panel import Panel + if matched: + console.print(Panel( + "[bold green]✓ Matches expected command[/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + )) + else: + # User ran a DIFFERENT command than expected + console.print(Panel( + "[bold yellow]⚠ Not in expected commands[/bold yellow]", + border_style="yellow", + padding=(0, 1), + expand=False, + )) + # Send notification with the correct command(s) + self._notify_wrong_command(command) + + def _check_command_match(self, command: str) -> bool: + """Check if a command matches any expected command.""" + if not self._expected_commands: + return True # No expected commands means anything goes + + cmd_normalized = command.strip().lower() + # Remove sudo prefix for comparison + if cmd_normalized.startswith("sudo "): + cmd_normalized = cmd_normalized[5:].strip() + + for expected in self._expected_commands: + exp_normalized = expected.strip().lower() + if exp_normalized.startswith("sudo "): + exp_normalized = exp_normalized[5:].strip() + + # Check for exact match or if command contains the expected command + if cmd_normalized == exp_normalized: + return True + if exp_normalized in cmd_normalized: + return True + if cmd_normalized in exp_normalized: + return True + + # Check if first words match (e.g., "systemctl restart nginx" vs "systemctl restart nginx.service") + cmd_parts = cmd_normalized.split() + exp_parts = exp_normalized.split() + if len(cmd_parts) >= 2 and len(exp_parts) >= 2: + if cmd_parts[0] == exp_parts[0] and cmd_parts[1] == exp_parts[1]: + return True + + return False + + def _notify_wrong_command(self, wrong_command: str): + """Send desktop notification when user runs wrong command.""" + if not self._expected_commands: + return + + # Find the most relevant expected command + correct_cmd = self._expected_commands[0] if self._expected_commands else None + + if correct_cmd: + title = "⚠️ Cortex: Wrong Command" + body = f"You ran: {wrong_command[:40]}...\n\nExpected: {correct_cmd}" + + try: + import subprocess + subprocess.run([ + "notify-send", + "--urgency=critical", + "--icon=dialog-warning", + "--expire-time=10000", + title, + body + ], capture_output=True, timeout=2) + except Exception: + pass + + # Also show in console + console.print(f" [bold yellow]📢 Expected command:[/bold yellow] [cyan]{correct_cmd}[/cyan]") + + def _notify_fixing_command(self, original_cmd: str, fix_cmd: str): + """Send notification that Cortex is fixing a command error.""" + title = "🔧 Cortex: Fixing Error" + body = f"Command failed: {original_cmd[:30]}...\n\nFix: {fix_cmd}" + + try: + import subprocess + subprocess.run([ + "notify-send", + "--urgency=normal", + "--icon=dialog-information", + "--expire-time=8000", + title, + body + ], capture_output=True, timeout=2) + except Exception: + pass + + def _check_command_result(self, command: str): + """Proactively check if a command succeeded by running verification commands.""" + import subprocess + import time + + # Wait a moment for the command to complete + time.sleep(0.5) + + cmd_lower = command.lower().strip() + check_cmd = None + error_output = None + + # Determine what check to run based on the command + if "systemctl" in cmd_lower: + # Extract service name + parts = command.split() + service_name = None + for i, p in enumerate(parts): + if p in ["start", "stop", "restart", "reload", "enable", "disable"]: + if i + 1 < len(parts): + service_name = parts[i + 1] + break + + if service_name: + check_cmd = f"systemctl status {service_name} 2>&1 | head -5" + + elif "service" in cmd_lower and "status" not in cmd_lower: + # Extract service name for service command + parts = command.split() + if len(parts) >= 3: + service_name = parts[1] if parts[0] != "sudo" else parts[2] + check_cmd = f"service {service_name} status 2>&1 | head -5" + + elif "docker" in cmd_lower: + if "run" in cmd_lower or "start" in cmd_lower: + # Get container name if present + parts = command.split() + container_name = None + for i, p in enumerate(parts): + if p == "--name" and i + 1 < len(parts): + container_name = parts[i + 1] + break + + if container_name: + check_cmd = f"docker ps -f name={container_name} --format '{{{{.Status}}}}' 2>&1" + else: + check_cmd = "docker ps -l --format '{{.Status}} {{.Names}}' 2>&1" + elif "stop" in cmd_lower or "rm" in cmd_lower: + check_cmd = "docker ps -a -l --format '{{.Status}} {{.Names}}' 2>&1" + + elif "nginx" in cmd_lower and "-t" in cmd_lower: + check_cmd = "nginx -t 2>&1" + + elif "apt" in cmd_lower or "apt-get" in cmd_lower: + # Check for recent apt errors + check_cmd = "tail -3 /var/log/apt/term.log 2>/dev/null || echo 'ok'" + + # Run the check command if we have one + if check_cmd: + try: + result = subprocess.run( + check_cmd, + shell=True, + capture_output=True, + text=True, + timeout=5 + ) + + output = result.stdout + result.stderr + + # Check for error indicators in the output + error_indicators = [ + "failed", "error", "not found", "inactive", "dead", + "could not", "unable", "denied", "cannot", "exited", + "not running", "not loaded" + ] + + has_error = any(ind in output.lower() for ind in error_indicators) + + if has_error or result.returncode != 0: + error_output = output + + except (subprocess.TimeoutExpired, Exception): + pass + + # If we found an error, mark the command and process it with auto-fix + if error_output: + console.print(f" [dim]checking...[/dim]") + # Mark this command as having an error + with self._lock: + for obs in self._commands_observed: + if obs["command"] == command: + obs["has_error"] = True + obs["status"] = "failed" + break + self._process_observed_command_with_output(command, error_output, "live_terminal_check") + else: + # Mark as success if check passed + with self._lock: + for obs in self._commands_observed: + if obs["command"] == command and obs["status"] == "pending": + obs["status"] = "success" + break + + def _show_realtime_feedback(self, command: str, source: str): + """Show real-time visual feedback for detected commands.""" + if not self._show_live_output: + return + + from rich.panel import Panel + from rich.text import Text + + # Source icons and labels + source_info = { + "cursor": ("🖥️", "Cursor IDE", "cyan"), + "external": ("🌐", "External Terminal", "blue"), + "tmux": ("📺", "Tmux", "magenta"), + "bash": ("📝", "Bash", "green"), + "zsh": ("📝", "Zsh", "green"), + "fish": ("🐟", "Fish", "yellow"), + } + + # Determine source type + icon, label, color = "📝", "Terminal", "white" + for key, (i, l, c) in source_info.items(): + if key in source.lower(): + icon, label, color = i, l, c + break + + # Categorize command + cmd_category = self._categorize_command(command) + category_icons = { + "docker": "🐳", + "git": "📦", + "apt": "📦", + "pip": "🐍", + "npm": "📦", + "systemctl": "⚙️", + "service": "⚙️", + "sudo": "🔐", + "ssh": "🔗", + "curl": "🌐", + "wget": "⬇️", + "mkdir": "📁", + "rm": "🗑️", + "cp": "📋", + "mv": "📋", + "cat": "📄", + "vim": "📝", + "nano": "📝", + "nginx": "🌐", + "python": "🐍", + "node": "📗", + } + cmd_icon = category_icons.get(cmd_category, "▶") + + # Format timestamp + timestamp = datetime.datetime.now().strftime("%H:%M:%S") + + # Store in buffer for later reference + self._output_buffer.append({ + "timestamp": timestamp, + "source": source, + "label": label, + "icon": icon, + "color": color, + "command": command, + "cmd_icon": cmd_icon, + }) + + # Print real-time feedback with bordered section + analysis = self._analyze_command(command) + + from rich.panel import Panel + from rich.text import Text + + # Build command display + cmd_text = Text() + cmd_text.append(f"{cmd_icon} ", style="bold") + cmd_text.append(command, style="bold white") + if analysis: + cmd_text.append(f"\n {analysis}", style="dim italic") + + console.print() + console.print(Panel( + cmd_text, + title=f"[dim]{timestamp}[/dim]", + title_align="right", + border_style="blue", + padding=(0, 1), + )) + + def _categorize_command(self, command: str) -> str: + """Categorize a command by its base command.""" + cmd_parts = command.split() + if not cmd_parts: + return "unknown" + + base = cmd_parts[0] + if base == "sudo" and len(cmd_parts) > 1: + base = cmd_parts[1] + + return base.lower() + + def _analyze_command(self, command: str) -> str | None: + """Analyze a command and return a brief description using LLM or patterns.""" + cmd_lower = command.lower() + + # First try pattern matching for speed + patterns = [ + (r"docker run", "Starting a Docker container"), + (r"docker pull", "Pulling a Docker image"), + (r"docker ps", "Listing Docker containers"), + (r"docker exec", "Executing command in container"), + (r"docker build", "Building Docker image"), + (r"docker stop", "Stopping container"), + (r"docker rm", "Removing container"), + (r"git clone", "Cloning a repository"), + (r"git pull", "Pulling latest changes"), + (r"git push", "Pushing changes"), + (r"git commit", "Committing changes"), + (r"git status", "Checking repository status"), + (r"apt install", "Installing package via apt"), + (r"apt update", "Updating package list"), + (r"pip install", "Installing Python package"), + (r"npm install", "Installing Node.js package"), + (r"systemctl start", "Starting a service"), + (r"systemctl stop", "Stopping a service"), + (r"systemctl restart", "Restarting a service"), + (r"systemctl status", "Checking service status"), + (r"nginx -t", "Testing Nginx configuration"), + (r"curl", "Making HTTP request"), + (r"wget", "Downloading file"), + (r"ssh", "SSH connection"), + (r"mkdir", "Creating directory"), + (r"rm -rf", "Removing files/directories recursively"), + (r"cp ", "Copying files"), + (r"mv ", "Moving/renaming files"), + (r"chmod", "Changing file permissions"), + (r"chown", "Changing file ownership"), + ] + + for pattern, description in patterns: + if re.search(pattern, cmd_lower): + return description + + # Use LLM for unknown commands + if self._llm and self._use_llm and self._llm.is_available(): + return self._llm_analyze_command(command) + + return None + + def _llm_analyze_command(self, command: str) -> str | None: + """Use local LLM to analyze a command.""" + if not self._llm: + return None + + prompt = f"""Analyze this Linux command and respond with ONLY a brief description (max 10 words) of what it does: + +Command: {command} + +Brief description:""" + + try: + result = self._llm.analyze(prompt, max_tokens=30, timeout=5) + if result: + # Clean up the response + result = result.strip().strip('"').strip("'") + # Take only first line + result = result.split("\n")[0].strip() + # Limit length + if len(result) > 60: + result = result[:57] + "..." + return result + except Exception: + pass + + return None + + def _check_command_issues(self, command: str) -> str | None: + """Check if a command has potential issues and return a warning.""" + issues = [] + + if any(p in command for p in ["/etc/", "/var/", "/usr/"]): + if not command.startswith("sudo") and not command.startswith("cat"): + issues.append("May need sudo for system files") + + if "rm -rf /" in command: + issues.append("DANGER: Destructive command detected!") + + typo_checks = { + "sudp": "sudo", + "suod": "sudo", + "cta": "cat", + "mdir": "mkdir", + "mkidr": "mkdir", + } + for typo, correct in typo_checks.items(): + if command.startswith(typo + " "): + issues.append(f"Typo? Did you mean '{correct}'?") + + return "; ".join(issues) if issues else None + diff --git a/cortex/do_runner/verification.py b/cortex/do_runner/verification.py new file mode 100644 index 000000000..c13a3c040 --- /dev/null +++ b/cortex/do_runner/verification.py @@ -0,0 +1,1050 @@ +"""Verification and conflict detection for the Do Runner module.""" + +import os +import re +import subprocess +import time +from typing import Any + +from rich.console import Console + +from .models import CommandLog + +console = Console() + + +class ConflictDetector: + """Detects conflicts with existing configurations.""" + + def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + """Execute a single command.""" + try: + if needs_sudo and not cmd.strip().startswith("sudo"): + cmd = f"sudo {cmd}" + + result = subprocess.run( + ["sudo", "bash", "-c", cmd] if needs_sudo else cmd, + shell=not needs_sudo, + capture_output=True, + text=True, + timeout=timeout, + ) + return result.returncode == 0, result.stdout.strip(), result.stderr.strip() + except subprocess.TimeoutExpired: + return False, "", f"Command timed out after {timeout} seconds" + except Exception as e: + return False, "", str(e) + + def check_for_conflicts( + self, + cmd: str, + purpose: str, + ) -> dict[str, Any]: + """ + Check if the command might conflict with existing resources. + + This is a GENERAL conflict detector that works for: + - Docker containers + - Services (systemd) + - Files/directories + - Packages + - Databases + - Users/groups + - Ports + - Virtual environments + - And more... + + Returns: + Dict with conflict info, alternatives, and cleanup commands. + """ + # Check all resource types + checkers = [ + self._check_docker_conflict, + self._check_service_conflict, + self._check_file_conflict, + self._check_package_conflict, + self._check_port_conflict, + self._check_user_conflict, + self._check_venv_conflict, + self._check_database_conflict, + self._check_cron_conflict, + ] + + for checker in checkers: + result = checker(cmd, purpose) + if result["has_conflict"]: + return result + + # Default: no conflict + return { + "has_conflict": False, + "conflict_type": None, + "resource_type": None, + "resource_name": None, + "suggestion": None, + "cleanup_commands": [], + "alternative_actions": [], + } + + def _create_conflict_result( + self, + resource_type: str, + resource_name: str, + conflict_type: str, + suggestion: str, + is_active: bool = True, + alternative_actions: list[dict] | None = None, + ) -> dict[str, Any]: + """Create a standardized conflict result with alternatives.""" + + # Generate standard alternative actions based on resource type and state + if alternative_actions is None: + if is_active: + alternative_actions = [ + { + "action": "use_existing", + "description": f"Use existing {resource_type} '{resource_name}'", + "commands": [], + }, + { + "action": "restart", + "description": f"Restart {resource_type} '{resource_name}'", + "commands": self._get_restart_commands(resource_type, resource_name), + }, + { + "action": "recreate", + "description": f"Remove and recreate {resource_type} '{resource_name}'", + "commands": self._get_remove_commands(resource_type, resource_name), + }, + ] + else: + alternative_actions = [ + { + "action": "start_existing", + "description": f"Start existing {resource_type} '{resource_name}'", + "commands": self._get_start_commands(resource_type, resource_name), + }, + { + "action": "recreate", + "description": f"Remove and recreate {resource_type} '{resource_name}'", + "commands": self._get_remove_commands(resource_type, resource_name), + }, + ] + + return { + "has_conflict": True, + "conflict_type": conflict_type, + "resource_type": resource_type, + "resource_name": resource_name, + "suggestion": suggestion, + "is_active": is_active, + "alternative_actions": alternative_actions, + "cleanup_commands": [], + "use_existing": is_active, + } + + def _get_restart_commands(self, resource_type: str, name: str) -> list[str]: + """Get restart commands for a resource type.""" + commands = { + "container": [f"docker restart {name}"], + "service": [f"sudo systemctl restart {name}"], + "database": [f"sudo systemctl restart {name}"], + "webserver": [f"sudo systemctl restart {name}"], + } + return commands.get(resource_type, []) + + def _get_start_commands(self, resource_type: str, name: str) -> list[str]: + """Get start commands for a resource type.""" + commands = { + "container": [f"docker start {name}"], + "service": [f"sudo systemctl start {name}"], + "database": [f"sudo systemctl start {name}"], + "webserver": [f"sudo systemctl start {name}"], + } + return commands.get(resource_type, []) + + def _get_remove_commands(self, resource_type: str, name: str) -> list[str]: + """Get remove/cleanup commands for a resource type.""" + commands = { + "container": [f"docker rm -f {name}"], + "service": [f"sudo systemctl stop {name}"], + "file": [f"sudo rm -f {name}"], + "directory": [f"sudo rm -rf {name}"], + "package": [], # Don't auto-remove packages + "user": [], # Don't auto-remove users + "venv": [f"rm -rf {name}"], + "database": [], # Don't auto-remove databases + } + return commands.get(resource_type, []) + + def _check_docker_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for Docker container/compose conflicts.""" + result = {"has_conflict": False} + + # Docker run with --name + if "docker run" in cmd.lower(): + name_match = re.search(r'--name\s+([^\s]+)', cmd) + if name_match: + container_name = name_match.group(1) + + # Check if container exists + success, container_id, _ = self._execute_command( + f"docker ps -aq --filter name=^{container_name}$", needs_sudo=False + ) + + if success and container_id.strip(): + # Check if running + running_success, running_id, _ = self._execute_command( + f"docker ps -q --filter name=^{container_name}$", needs_sudo=False + ) + is_running = running_success and running_id.strip() + + # Get image info + _, image_info, _ = self._execute_command( + f"docker inspect --format '{{{{.Config.Image}}}}' {container_name}", needs_sudo=False + ) + image = image_info.strip() if image_info else "unknown" + + status = "running" if is_running else "stopped" + return self._create_conflict_result( + resource_type="container", + resource_name=container_name, + conflict_type=f"container_{status}", + suggestion=f"Container '{container_name}' already exists ({status}, image: {image})", + is_active=is_running, + ) + + # Docker compose + if "docker-compose" in cmd.lower() or "docker compose" in cmd.lower(): + if "up" in cmd: + success, services, _ = self._execute_command("docker compose ps -q 2>/dev/null", needs_sudo=False) + if success and services.strip(): + return self._create_conflict_result( + resource_type="compose", + resource_name="docker-compose", + conflict_type="compose_running", + suggestion="Docker Compose services are already running", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": "Keep existing services", "commands": []}, + {"action": "restart", "description": "Restart services", "commands": ["docker compose restart"]}, + {"action": "recreate", "description": "Recreate services", "commands": ["docker compose down", "docker compose up -d"]}, + ] + ) + + return result + + def _check_service_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for systemd service conflicts.""" + result = {"has_conflict": False} + + # systemctl start/enable + if "systemctl" in cmd: + service_match = re.search(r'systemctl\s+(start|enable|restart)\s+([^\s]+)', cmd) + if service_match: + action = service_match.group(1) + service = service_match.group(2).replace('.service', '') + + success, status, _ = self._execute_command( + f"systemctl is-active {service} 2>/dev/null", needs_sudo=False + ) + + if action in ["start", "enable"] and status.strip() == "active": + return self._create_conflict_result( + resource_type="service", + resource_name=service, + conflict_type="service_running", + suggestion=f"Service '{service}' is already running", + is_active=True, + ) + + # service command + if cmd.startswith("service ") or " service " in cmd: + service_match = re.search(r'service\s+(\S+)\s+(start|restart)', cmd) + if service_match: + service = service_match.group(1) + success, status, _ = self._execute_command( + f"systemctl is-active {service} 2>/dev/null", needs_sudo=False + ) + if status.strip() == "active": + return self._create_conflict_result( + resource_type="service", + resource_name=service, + conflict_type="service_running", + suggestion=f"Service '{service}' is already running", + is_active=True, + ) + + return result + + def _check_file_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for file/directory conflicts.""" + result = {"has_conflict": False} + paths_in_cmd = re.findall(r'(/[^\s>|]+)', cmd) + + for path in paths_in_cmd: + # Skip common read paths + if path in ["/dev/null", "/etc/os-release", "/proc/", "/sys/"]: + continue + + # Check for file creation/modification commands + is_write_cmd = any(p in cmd for p in [">" , "tee ", "cp ", "mv ", "touch ", "mkdir ", "echo "]) + + if is_write_cmd and os.path.exists(path): + is_dir = os.path.isdir(path) + resource_type = "directory" if is_dir else "file" + + return self._create_conflict_result( + resource_type=resource_type, + resource_name=path, + conflict_type=f"{resource_type}_exists", + suggestion=f"{resource_type.title()} '{path}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Keep existing {resource_type}", "commands": []}, + {"action": "backup", "description": f"Backup and overwrite", "commands": [f"sudo cp -r {path} {path}.cortex.bak"]}, + {"action": "recreate", "description": f"Remove and recreate", "commands": [f"sudo rm -rf {path}" if is_dir else f"sudo rm -f {path}"]}, + ] + ) + + return result + + def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for package installation conflicts.""" + result = {"has_conflict": False} + + # apt install + if "apt install" in cmd or "apt-get install" in cmd: + pkg_match = re.search(r'(?:apt|apt-get)\s+install\s+(?:-y\s+)?(\S+)', cmd) + if pkg_match: + package = pkg_match.group(1) + success, _, _ = self._execute_command(f"dpkg -l {package} 2>/dev/null | grep -q '^ii'", needs_sudo=False) + if success: + # Get version + _, version_out, _ = self._execute_command(f"dpkg -l {package} | grep '^ii' | awk '{{print $3}}'", needs_sudo=False) + version = version_out.strip() if version_out else "unknown" + + return self._create_conflict_result( + resource_type="package", + resource_name=package, + conflict_type="package_installed", + suggestion=f"Package '{package}' is already installed (version: {version})", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Keep current version ({version})", "commands": []}, + {"action": "upgrade", "description": "Upgrade to latest version", "commands": [f"sudo apt install --only-upgrade -y {package}"]}, + {"action": "reinstall", "description": "Reinstall package", "commands": [f"sudo apt install --reinstall -y {package}"]}, + ] + ) + + # pip install + if "pip install" in cmd or "pip3 install" in cmd: + pkg_match = re.search(r'pip3?\s+install\s+(?:-[^\s]+\s+)*(\S+)', cmd) + if pkg_match: + package = pkg_match.group(1) + success, version_out, _ = self._execute_command(f"pip3 show {package} 2>/dev/null | grep Version", needs_sudo=False) + if success and version_out: + version = version_out.replace("Version:", "").strip() + return self._create_conflict_result( + resource_type="pip_package", + resource_name=package, + conflict_type="pip_package_installed", + suggestion=f"Python package '{package}' is already installed (version: {version})", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Keep current version ({version})", "commands": []}, + {"action": "upgrade", "description": "Upgrade to latest", "commands": [f"pip3 install --upgrade {package}"]}, + {"action": "reinstall", "description": "Reinstall package", "commands": [f"pip3 install --force-reinstall {package}"]}, + ] + ) + + # npm install -g + if "npm install -g" in cmd or "npm i -g" in cmd: + pkg_match = re.search(r'npm\s+(?:install|i)\s+-g\s+(\S+)', cmd) + if pkg_match: + package = pkg_match.group(1) + success, version_out, _ = self._execute_command(f"npm list -g {package} 2>/dev/null | grep {package}", needs_sudo=False) + if success and version_out: + return self._create_conflict_result( + resource_type="npm_package", + resource_name=package, + conflict_type="npm_package_installed", + suggestion=f"npm package '{package}' is already installed globally", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": "Keep current version", "commands": []}, + {"action": "upgrade", "description": "Update to latest", "commands": [f"npm update -g {package}"]}, + ] + ) + + # snap install - check if snap is available and package is installed + if "snap install" in cmd: + # First check if snap is available + snap_available = self._check_tool_available("snap") + if not snap_available: + return self._create_conflict_result( + resource_type="tool", + resource_name="snap", + conflict_type="tool_not_available", + suggestion="Snap package manager is not installed. Installing snap first.", + is_active=False, + alternative_actions=[ + {"action": "install_first", "description": "Install snapd first", "commands": ["sudo apt update", "sudo apt install -y snapd"]}, + {"action": "use_apt", "description": "Use apt instead of snap", "commands": []}, + ] + ) + + pkg_match = re.search(r'snap\s+install\s+(\S+)', cmd) + if pkg_match: + package = pkg_match.group(1) + success, version_out, _ = self._execute_command(f"snap list {package} 2>/dev/null | grep {package}", needs_sudo=False) + if success and version_out: + return self._create_conflict_result( + resource_type="snap_package", + resource_name=package, + conflict_type="snap_package_installed", + suggestion=f"Snap package '{package}' is already installed", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": "Keep current version", "commands": []}, + {"action": "refresh", "description": "Refresh to latest", "commands": [f"sudo snap refresh {package}"]}, + ] + ) + + # flatpak install - check if flatpak is available and package is installed + if "flatpak install" in cmd: + # First check if flatpak is available + flatpak_available = self._check_tool_available("flatpak") + if not flatpak_available: + return self._create_conflict_result( + resource_type="tool", + resource_name="flatpak", + conflict_type="tool_not_available", + suggestion="Flatpak is not installed. Installing flatpak first.", + is_active=False, + alternative_actions=[ + {"action": "install_first", "description": "Install flatpak first", "commands": ["sudo apt update", "sudo apt install -y flatpak"]}, + {"action": "use_apt", "description": "Use apt instead of flatpak", "commands": []}, + ] + ) + + pkg_match = re.search(r'flatpak\s+install\s+(?:-y\s+)?(\S+)', cmd) + if pkg_match: + package = pkg_match.group(1) + success, version_out, _ = self._execute_command(f"flatpak list | grep -i {package}", needs_sudo=False) + if success and version_out: + return self._create_conflict_result( + resource_type="flatpak_package", + resource_name=package, + conflict_type="flatpak_package_installed", + suggestion=f"Flatpak application '{package}' is already installed", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": "Keep current version", "commands": []}, + {"action": "upgrade", "description": "Update to latest", "commands": [f"flatpak update -y {package}"]}, + ] + ) + + return result + + def _check_tool_available(self, tool: str) -> bool: + """Check if a command-line tool is available.""" + success, output, _ = self._execute_command(f"which {tool} 2>/dev/null", needs_sudo=False) + return success and bool(output.strip()) + + def _check_port_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for port binding conflicts.""" + result = {"has_conflict": False} + + # Look for port mappings + port_patterns = [ + r'-p\s+(\d+):\d+', # docker -p 8080:80 + r'--port[=\s]+(\d+)', # --port 8080 + r':(\d+)\s', # :8080 + r'listen\s+(\d+)', # nginx listen 80 + ] + + for pattern in port_patterns: + match = re.search(pattern, cmd) + if match: + port = match.group(1) + + # Check if port is in use + success, output, _ = self._execute_command(f"ss -tlnp | grep ':{port} '", needs_sudo=True) + if success and output: + # Get process using the port + process = "unknown" + proc_match = re.search(r'users:\(\("([^"]+)"', output) + if proc_match: + process = proc_match.group(1) + + return self._create_conflict_result( + resource_type="port", + resource_name=port, + conflict_type="port_in_use", + suggestion=f"Port {port} is already in use by '{process}'", + is_active=True, + alternative_actions=[ + {"action": "use_different", "description": f"Use a different port", "commands": []}, + {"action": "stop_existing", "description": f"Stop process using port {port}", "commands": [f"sudo fuser -k {port}/tcp"]}, + ] + ) + + return result + + def _check_user_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for user/group creation conflicts.""" + result = {"has_conflict": False} + + # useradd / adduser + if "useradd" in cmd or "adduser" in cmd: + user_match = re.search(r'(?:useradd|adduser)\s+(?:[^\s]+\s+)*(\S+)$', cmd) + if user_match: + username = user_match.group(1) + success, _, _ = self._execute_command(f"id {username} 2>/dev/null", needs_sudo=False) + if success: + return self._create_conflict_result( + resource_type="user", + resource_name=username, + conflict_type="user_exists", + suggestion=f"User '{username}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Use existing user '{username}'", "commands": []}, + {"action": "modify", "description": f"Modify existing user", "commands": []}, + ] + ) + + # groupadd / addgroup + if "groupadd" in cmd or "addgroup" in cmd: + group_match = re.search(r'(?:groupadd|addgroup)\s+(\S+)$', cmd) + if group_match: + groupname = group_match.group(1) + success, _, _ = self._execute_command(f"getent group {groupname} 2>/dev/null", needs_sudo=False) + if success: + return self._create_conflict_result( + resource_type="group", + resource_name=groupname, + conflict_type="group_exists", + suggestion=f"Group '{groupname}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Use existing group '{groupname}'", "commands": []}, + ] + ) + + return result + + def _check_venv_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for virtual environment conflicts.""" + result = {"has_conflict": False} + + # python -m venv / virtualenv + if "python" in cmd and "venv" in cmd: + venv_match = re.search(r'(?:venv|virtualenv)\s+(\S+)', cmd) + if venv_match: + venv_path = venv_match.group(1) + if os.path.exists(venv_path) and os.path.exists(os.path.join(venv_path, "bin", "python")): + return self._create_conflict_result( + resource_type="venv", + resource_name=venv_path, + conflict_type="venv_exists", + suggestion=f"Virtual environment '{venv_path}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Use existing venv", "commands": []}, + {"action": "recreate", "description": "Delete and recreate", "commands": [f"rm -rf {venv_path}"]}, + ] + ) + + return result + + def _check_database_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for database creation conflicts.""" + result = {"has_conflict": False} + + # MySQL/MariaDB create database + if "mysql" in cmd.lower() and "create database" in cmd.lower(): + db_match = re.search(r'create\s+database\s+(?:if\s+not\s+exists\s+)?(\S+)', cmd, re.IGNORECASE) + if db_match: + dbname = db_match.group(1).strip('`"\'') + success, output, _ = self._execute_command( + f"mysql -e \"SHOW DATABASES LIKE '{dbname}'\" 2>/dev/null", needs_sudo=False + ) + if success and dbname in output: + return self._create_conflict_result( + resource_type="mysql_database", + resource_name=dbname, + conflict_type="database_exists", + suggestion=f"MySQL database '{dbname}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Use existing database", "commands": []}, + {"action": "recreate", "description": "Drop and recreate", "commands": [f"mysql -e 'DROP DATABASE {dbname}'"]}, + ] + ) + + # PostgreSQL create database + if "createdb" in cmd or ("psql" in cmd and "create database" in cmd.lower()): + db_match = re.search(r'(?:createdb|create\s+database)\s+(\S+)', cmd, re.IGNORECASE) + if db_match: + dbname = db_match.group(1).strip('"\'') + success, _, _ = self._execute_command( + f"psql -lqt 2>/dev/null | cut -d \\| -f 1 | grep -qw {dbname}", needs_sudo=False + ) + if success: + return self._create_conflict_result( + resource_type="postgres_database", + resource_name=dbname, + conflict_type="database_exists", + suggestion=f"PostgreSQL database '{dbname}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": f"Use existing database", "commands": []}, + {"action": "recreate", "description": "Drop and recreate", "commands": [f"dropdb {dbname}"]}, + ] + ) + + return result + + def _check_cron_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: + """Check for cron job conflicts.""" + result = {"has_conflict": False} + + # crontab entries + if "crontab" in cmd or "/etc/cron" in cmd: + # Check if similar cron job exists + if "echo" in cmd and ">>" in cmd: + # Extract the command being added + job_match = re.search(r"echo\s+['\"]([^'\"]+)['\"]", cmd) + if job_match: + job_content = job_match.group(1) + # Check existing crontab + success, crontab, _ = self._execute_command("crontab -l 2>/dev/null", needs_sudo=False) + if success and crontab: + # Check if similar job exists + job_cmd = job_content.split()[-1] if job_content else "" + if job_cmd and job_cmd in crontab: + return self._create_conflict_result( + resource_type="cron_job", + resource_name=job_cmd, + conflict_type="cron_exists", + suggestion=f"Similar cron job for '{job_cmd}' already exists", + is_active=True, + alternative_actions=[ + {"action": "use_existing", "description": "Keep existing cron job", "commands": []}, + {"action": "replace", "description": "Replace existing job", "commands": []}, + ] + ) + + return result + + +class VerificationRunner: + """Runs verification tests after command execution.""" + + def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + """Execute a single command.""" + try: + if needs_sudo and not cmd.strip().startswith("sudo"): + cmd = f"sudo {cmd}" + + result = subprocess.run( + ["sudo", "bash", "-c", cmd] if needs_sudo else cmd, + shell=not needs_sudo, + capture_output=True, + text=True, + timeout=timeout, + ) + return result.returncode == 0, result.stdout.strip(), result.stderr.strip() + except subprocess.TimeoutExpired: + return False, "", f"Command timed out after {timeout} seconds" + except Exception as e: + return False, "", str(e) + + def run_verification_tests( + self, + commands_executed: list[CommandLog], + user_query: str, + ) -> tuple[bool, list[dict[str, Any]]]: + """ + Run verification tests after all commands have been executed. + + Returns: + Tuple of (all_passed, test_results) + """ + console.print() + console.print("[bold cyan]🧪 Running verification tests...[/bold cyan]") + + test_results = [] + services_to_check = set() + configs_to_check = set() + files_to_check = set() + + for cmd_log in commands_executed: + cmd = cmd_log.command.lower() + + if "systemctl" in cmd or "service " in cmd: + svc_match = re.search(r'(?:systemctl|service)\s+\w+\s+([^\s]+)', cmd) + if svc_match: + services_to_check.add(svc_match.group(1).replace('.service', '')) + + if "nginx" in cmd: + configs_to_check.add("nginx") + if "apache" in cmd or "a2ensite" in cmd: + configs_to_check.add("apache") + + paths = re.findall(r'(/[^\s>|&]+)', cmd_log.command) + for path in paths: + if any(x in path for x in ['/etc/', '/var/', '/opt/']): + files_to_check.add(path) + + all_passed = True + + # Config tests + if "nginx" in configs_to_check: + console.print("[dim] Testing nginx configuration...[/dim]") + success, stdout, stderr = self._execute_command("nginx -t", needs_sudo=True) + test_results.append({ + "test": "nginx -t", + "passed": success, + "output": stdout if success else stderr, + }) + if success: + console.print("[green] ✓ Nginx configuration is valid[/green]") + else: + console.print(f"[red] ✗ Nginx config test failed: {stderr[:100]}[/red]") + all_passed = False + + if "apache" in configs_to_check: + console.print("[dim] Testing Apache configuration...[/dim]") + success, stdout, stderr = self._execute_command("apache2ctl configtest", needs_sudo=True) + test_results.append({ + "test": "apache2ctl configtest", + "passed": success, + "output": stdout if success else stderr, + }) + if success: + console.print("[green] ✓ Apache configuration is valid[/green]") + else: + console.print(f"[red] ✗ Apache config test failed: {stderr[:100]}[/red]") + all_passed = False + + # Service status tests + for service in services_to_check: + console.print(f"[dim] Checking service {service}...[/dim]") + success, stdout, stderr = self._execute_command( + f"systemctl is-active {service}", needs_sudo=False + ) + is_active = stdout.strip() == "active" + test_results.append({ + "test": f"systemctl is-active {service}", + "passed": is_active, + "output": stdout, + }) + if is_active: + console.print(f"[green] ✓ Service {service} is running[/green]") + else: + console.print(f"[yellow] ⚠ Service {service} status: {stdout.strip()}[/yellow]") + + # File existence tests + for file_path in list(files_to_check)[:5]: + if os.path.exists(file_path): + success, _, _ = self._execute_command(f"test -r {file_path}", needs_sudo=True) + test_results.append({ + "test": f"file exists: {file_path}", + "passed": True, + "output": "File exists and is readable", + }) + else: + test_results.append({ + "test": f"file exists: {file_path}", + "passed": False, + "output": "File does not exist", + }) + console.print(f"[yellow] ⚠ File not found: {file_path}[/yellow]") + + # Connectivity tests + query_lower = user_query.lower() + if any(x in query_lower for x in ["proxy", "forward", "port", "listen"]): + port_match = re.search(r'port\s*(\d+)|:(\d+)', user_query) + if port_match: + port = port_match.group(1) or port_match.group(2) + console.print(f"[dim] Testing connectivity on port {port}...[/dim]") + success, stdout, stderr = self._execute_command( + f"curl -s -o /dev/null -w '%{{http_code}}' http://localhost:{port}/ 2>/dev/null || echo 'failed'", + needs_sudo=False + ) + if stdout.strip() not in ["failed", "000", ""]: + console.print(f"[green] ✓ Port {port} responding (HTTP {stdout.strip()})[/green]") + test_results.append({ + "test": f"curl localhost:{port}", + "passed": True, + "output": f"HTTP {stdout.strip()}", + }) + else: + console.print(f"[yellow] ⚠ Port {port} not responding (may be expected)[/yellow]") + + # Summary + passed = sum(1 for t in test_results if t["passed"]) + total = len(test_results) + + console.print() + if all_passed: + console.print(f"[bold green]✓ All tests passed ({passed}/{total})[/bold green]") + else: + console.print(f"[bold yellow]⚠ Some tests failed ({passed}/{total} passed)[/bold yellow]") + + return all_passed, test_results + + +class FileUsefulnessAnalyzer: + """Analyzes file content usefulness for modifications.""" + + def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + """Execute a single command.""" + try: + if needs_sudo and not cmd.strip().startswith("sudo"): + cmd = f"sudo {cmd}" + + result = subprocess.run( + ["sudo", "bash", "-c", cmd] if needs_sudo else cmd, + shell=not needs_sudo, + capture_output=True, + text=True, + timeout=timeout, + ) + return result.returncode == 0, result.stdout.strip(), result.stderr.strip() + except subprocess.TimeoutExpired: + return False, "", f"Command timed out after {timeout} seconds" + except Exception as e: + return False, "", str(e) + + def check_file_exists_and_usefulness( + self, + cmd: str, + purpose: str, + user_query: str, + ) -> dict[str, Any]: + """Check if files the command creates already exist and analyze their usefulness.""" + result = { + "files_checked": [], + "existing_files": [], + "useful_content": {}, + "recommendations": [], + "modified_command": cmd, + } + + file_creation_patterns = [ + (r'(?:echo|printf)\s+.*?>\s*([^\s;|&]+)', 'write'), + (r'(?:echo|printf)\s+.*?>>\s*([^\s;|&]+)', 'append'), + (r'tee\s+(?:-a\s+)?([^\s;|&]+)', 'write'), + (r'cp\s+[^\s]+\s+([^\s;|&]+)', 'copy'), + (r'touch\s+([^\s;|&]+)', 'create'), + (r'cat\s+.*?>\s*([^\s;|&]+)', 'write'), + (r'sed\s+-i[^\s]*\s+.*?\s+([^\s;|&]+)$', 'modify'), + (r'mv\s+[^\s]+\s+([^\s;|&]+)', 'move'), + ] + + target_files = [] + operation_type = None + + for pattern, op_type in file_creation_patterns: + matches = re.findall(pattern, cmd) + for match in matches: + if match.startswith('/') or match.startswith('~'): + target_files.append(match) + operation_type = op_type + + result["files_checked"] = target_files + + for file_path in target_files: + if file_path.startswith('~'): + file_path = os.path.expanduser(file_path) + + if os.path.exists(file_path): + result["existing_files"].append(file_path) + console.print(f"[yellow]📁 File exists: {file_path}[/yellow]") + + success, content, _ = self._execute_command(f"cat '{file_path}' 2>/dev/null", needs_sudo=True) + + if success and content: + useful_parts = self.analyze_file_usefulness(content, purpose, user_query) + + if useful_parts["is_useful"]: + result["useful_content"][file_path] = useful_parts + console.print(f"[cyan] ✓ Contains useful content: {useful_parts['summary']}[/cyan]") + + if useful_parts["action"] == "merge": + result["recommendations"].append({ + "file": file_path, + "action": "merge", + "reason": useful_parts["reason"], + "keep_sections": useful_parts.get("keep_sections", []), + }) + elif useful_parts["action"] == "modify": + result["recommendations"].append({ + "file": file_path, + "action": "modify", + "reason": useful_parts["reason"], + }) + else: + result["recommendations"].append({ + "file": file_path, + "action": "backup_and_replace", + "reason": "Existing content not relevant", + }) + elif operation_type in ['write', 'copy', 'create']: + parent_dir = os.path.dirname(file_path) + if parent_dir and not os.path.exists(parent_dir): + console.print(f"[yellow]📁 Parent directory doesn't exist: {parent_dir}[/yellow]") + result["recommendations"].append({ + "file": file_path, + "action": "create_parent", + "reason": f"Need to create {parent_dir} first", + }) + + return result + + def analyze_file_usefulness( + self, + content: str, + purpose: str, + user_query: str, + ) -> dict[str, Any]: + """Analyze if file content is useful for the current purpose.""" + result = { + "is_useful": False, + "summary": "", + "action": "replace", + "reason": "", + "keep_sections": [], + } + + content_lower = content.lower() + purpose_lower = purpose.lower() + query_lower = user_query.lower() + + # Nginx configuration + if any(x in content_lower for x in ["server {", "location", "nginx", "proxy_pass", "listen"]): + result["is_useful"] = True + + has_server_block = "server {" in content_lower or "server{" in content_lower + has_location = "location" in content_lower + has_proxy = "proxy_pass" in content_lower + has_ssl = "ssl" in content_lower or "443" in content + + summary_parts = [] + if has_server_block: + summary_parts.append("server block") + if has_location: + summary_parts.append("location rules") + if has_proxy: + summary_parts.append("proxy settings") + if has_ssl: + summary_parts.append("SSL config") + + result["summary"] = "Has " + ", ".join(summary_parts) + + if "proxy" in query_lower or "forward" in query_lower: + if has_proxy: + existing_proxy = re.search(r'proxy_pass\s+([^;]+)', content) + if existing_proxy: + result["action"] = "modify" + result["reason"] = f"Existing proxy to {existing_proxy.group(1).strip()}" + else: + result["action"] = "merge" + result["reason"] = "Add proxy to existing server block" + result["keep_sections"] = ["server", "ssl", "location"] + elif "ssl" in query_lower or "https" in query_lower: + if has_ssl: + result["action"] = "modify" + result["reason"] = "SSL already configured, modify as needed" + else: + result["action"] = "merge" + result["reason"] = "Add SSL to existing config" + else: + result["action"] = "merge" + result["reason"] = "Preserve existing configuration" + + # Apache configuration + elif any(x in content_lower for x in [" 2: + result["is_useful"] = True + result["summary"] = f"Related content ({len(overlap)} keyword matches)" + result["action"] = "backup_and_replace" + result["reason"] = "Content partially relevant, backing up" + + return result + + def apply_file_recommendations( + self, + recommendations: list[dict[str, Any]], + ) -> list[str]: + """Apply recommendations for existing files.""" + commands_executed = [] + + for rec in recommendations: + file_path = rec["file"] + action = rec["action"] + + if action == "backup_and_replace": + backup_path = f"{file_path}.cortex.bak.{int(time.time())}" + backup_cmd = f"sudo cp '{file_path}' '{backup_path}'" + success, _, _ = self._execute_command(backup_cmd, needs_sudo=True) + if success: + console.print(f"[dim] ✓ Backed up to {backup_path}[/dim]") + commands_executed.append(backup_cmd) + + elif action == "create_parent": + parent = os.path.dirname(file_path) + mkdir_cmd = f"sudo mkdir -p '{parent}'" + success, _, _ = self._execute_command(mkdir_cmd, needs_sudo=True) + if success: + console.print(f"[dim] ✓ Created directory {parent}[/dim]") + commands_executed.append(mkdir_cmd) + + return commands_executed + diff --git a/cortex/semantic_cache.py b/cortex/semantic_cache.py index 4dd8d75dc..89a42f518 100644 --- a/cortex/semantic_cache.py +++ b/cortex/semantic_cache.py @@ -80,10 +80,10 @@ def _ensure_db_directory(self) -> None: db_dir = Path(self.db_path).parent try: db_dir.mkdir(parents=True, exist_ok=True) - # Also check if we can actually write to this directory + # Also check if directory is writable if not os.access(db_dir, os.W_OK): - raise PermissionError(f"No write permission to {db_dir}") - except PermissionError: + raise PermissionError(f"Directory {db_dir} is not writable") + except (PermissionError, OSError): user_dir = Path.home() / ".cortex" user_dir.mkdir(parents=True, exist_ok=True) self.db_path = str(user_dir / "cache.db") @@ -94,7 +94,8 @@ def _init_database(self) -> None: with self._pool.get_connection() as conn: cur = conn.cursor() - cur.execute(""" + cur.execute( + """ CREATE TABLE IF NOT EXISTS llm_cache_entries ( id INTEGER PRIMARY KEY AUTOINCREMENT, provider TEXT NOT NULL, @@ -108,22 +109,29 @@ def _init_database(self) -> None: last_accessed TEXT NOT NULL, hit_count INTEGER NOT NULL DEFAULT 0 ) - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_cache_unique ON llm_cache_entries(provider, model, system_hash, prompt_hash) - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE INDEX IF NOT EXISTS idx_llm_cache_lru ON llm_cache_entries(last_accessed) - """) - cur.execute(""" + """ + ) + cur.execute( + """ CREATE TABLE IF NOT EXISTS llm_cache_stats ( id INTEGER PRIMARY KEY CHECK (id = 1), hits INTEGER NOT NULL DEFAULT 0, misses INTEGER NOT NULL DEFAULT 0 ) - """) + """ + ) cur.execute("INSERT OR IGNORE INTO llm_cache_stats(id, hits, misses) VALUES (1, 0, 0)") conn.commit() diff --git a/cortex/system_info_generator.py b/cortex/system_info_generator.py new file mode 100644 index 000000000..5b6f1000b --- /dev/null +++ b/cortex/system_info_generator.py @@ -0,0 +1,800 @@ +""" +System Information Command Generator for Cortex. + +Generates read-only commands using LLM to retrieve system and application information. +All commands are validated against the CommandValidator to ensure they only read the system. + +Usage: + generator = SystemInfoGenerator(api_key="...", provider="claude") + + # Simple info queries + result = generator.get_info("What version of Python is installed?") + + # Application-specific queries + result = generator.get_app_info("nginx", "What's the current nginx configuration?") + + # Structured info retrieval + info = generator.get_structured_info("hardware", ["cpu", "memory", "disk"]) +""" + +import json +import os +import re +import subprocess +from dataclasses import dataclass, field +from enum import Enum +from typing import Any + +from rich.console import Console +from rich.panel import Panel +from rich.table import Table + +from cortex.ask import CommandValidator + +console = Console() + + +class InfoCategory(str, Enum): + """Categories of system information.""" + HARDWARE = "hardware" + SOFTWARE = "software" + NETWORK = "network" + SECURITY = "security" + SERVICES = "services" + PACKAGES = "packages" + PROCESSES = "processes" + STORAGE = "storage" + PERFORMANCE = "performance" + CONFIGURATION = "configuration" + LOGS = "logs" + USERS = "users" + APPLICATION = "application" + CUSTOM = "custom" + + +@dataclass +class InfoCommand: + """A single read-only command for gathering information.""" + command: str + purpose: str + category: InfoCategory = InfoCategory.CUSTOM + timeout: int = 30 + + +@dataclass +class InfoResult: + """Result of executing an info command.""" + command: str + success: bool + output: str + error: str = "" + execution_time: float = 0.0 + + +@dataclass +class SystemInfoResult: + """Complete result of a system info query.""" + query: str + answer: str + commands_executed: list[InfoResult] = field(default_factory=list) + raw_data: dict[str, Any] = field(default_factory=dict) + category: InfoCategory = InfoCategory.CUSTOM + + +# Common info command templates for quick lookups +# Note: Commands are simplified to avoid || patterns which are blocked by CommandValidator +COMMON_INFO_COMMANDS: dict[str, list[InfoCommand]] = { + # Hardware Information + "cpu": [ + InfoCommand("lscpu", "Get CPU architecture and details", InfoCategory.HARDWARE), + InfoCommand("head -30 /proc/cpuinfo", "Get CPU model and cores", InfoCategory.HARDWARE), + InfoCommand("nproc", "Get number of processing units", InfoCategory.HARDWARE), + ], + "memory": [ + InfoCommand("free -h", "Get memory usage in human-readable format", InfoCategory.HARDWARE), + InfoCommand("head -20 /proc/meminfo", "Get detailed memory information", InfoCategory.HARDWARE), + ], + "disk": [ + InfoCommand("df -h", "Get disk space usage", InfoCategory.STORAGE), + InfoCommand("lsblk", "List block devices", InfoCategory.STORAGE), + ], + "gpu": [ + InfoCommand("nvidia-smi --query-gpu=name,memory.total,driver_version --format=csv,noheader", "Get NVIDIA GPU info", InfoCategory.HARDWARE), + InfoCommand("lspci", "List PCI devices including VGA", InfoCategory.HARDWARE), + ], + + # OS Information + "os": [ + InfoCommand("cat /etc/os-release", "Get OS release information", InfoCategory.SOFTWARE), + InfoCommand("uname -a", "Get kernel and system info", InfoCategory.SOFTWARE), + InfoCommand("lsb_release -a", "Get LSB release info", InfoCategory.SOFTWARE), + ], + "kernel": [ + InfoCommand("uname -r", "Get kernel version", InfoCategory.SOFTWARE), + InfoCommand("cat /proc/version", "Get detailed kernel version", InfoCategory.SOFTWARE), + ], + + # Network Information + "network": [ + InfoCommand("ip addr show", "List network interfaces", InfoCategory.NETWORK), + InfoCommand("ip route show", "Show routing table", InfoCategory.NETWORK), + InfoCommand("ss -tuln", "List listening ports", InfoCategory.NETWORK), + ], + "dns": [ + InfoCommand("cat /etc/resolv.conf", "Get DNS configuration", InfoCategory.NETWORK), + InfoCommand("host google.com", "Test DNS resolution", InfoCategory.NETWORK), + ], + + # Services + "services": [ + InfoCommand("systemctl list-units --type=service --state=running --no-pager", "List running services", InfoCategory.SERVICES), + InfoCommand("systemctl list-units --type=service --state=failed --no-pager", "List failed services", InfoCategory.SERVICES), + ], + + # Security + "security": [ + InfoCommand("ufw status", "Check firewall status", InfoCategory.SECURITY), + InfoCommand("aa-status", "Check AppArmor status", InfoCategory.SECURITY), + InfoCommand("wc -l /etc/passwd", "Count system users", InfoCategory.SECURITY), + ], + + # Processes + "processes": [ + InfoCommand("ps aux --sort=-%mem", "Top memory-consuming processes", InfoCategory.PROCESSES), + InfoCommand("ps aux --sort=-%cpu", "Top CPU-consuming processes", InfoCategory.PROCESSES), + ], + + # Environment + "environment": [ + InfoCommand("env", "List environment variables", InfoCategory.CONFIGURATION), + InfoCommand("echo $PATH", "Show PATH", InfoCategory.CONFIGURATION), + InfoCommand("echo $SHELL", "Show current shell", InfoCategory.CONFIGURATION), + ], +} + +# Application-specific info templates +# Note: Commands are simplified to avoid || patterns which are blocked by CommandValidator +APP_INFO_TEMPLATES: dict[str, dict[str, list[InfoCommand]]] = { + "nginx": { + "status": [ + InfoCommand("systemctl status nginx --no-pager", "Check nginx service status", InfoCategory.SERVICES), + InfoCommand("nginx -v", "Get nginx version", InfoCategory.SOFTWARE), + ], + "config": [ + InfoCommand("cat /etc/nginx/nginx.conf", "Get nginx configuration", InfoCategory.CONFIGURATION), + InfoCommand("ls -la /etc/nginx/sites-enabled/", "List enabled sites", InfoCategory.CONFIGURATION), + ], + "logs": [ + InfoCommand("tail -50 /var/log/nginx/access.log", "Recent access logs", InfoCategory.LOGS), + InfoCommand("tail -50 /var/log/nginx/error.log", "Recent error logs", InfoCategory.LOGS), + ], + }, + "docker": { + "status": [ + InfoCommand("docker --version", "Get Docker version", InfoCategory.SOFTWARE), + InfoCommand("docker info", "Get Docker info", InfoCategory.SOFTWARE), + ], + "containers": [ + InfoCommand("docker ps -a", "List containers", InfoCategory.APPLICATION), + InfoCommand("docker images", "List images", InfoCategory.APPLICATION), + ], + "resources": [ + InfoCommand("docker stats --no-stream", "Container resource usage", InfoCategory.PERFORMANCE), + ], + }, + "postgresql": { + "status": [ + InfoCommand("systemctl status postgresql --no-pager", "Check PostgreSQL service", InfoCategory.SERVICES), + InfoCommand("psql --version", "Get PostgreSQL version", InfoCategory.SOFTWARE), + ], + "config": [ + InfoCommand("head -50 /etc/postgresql/14/main/postgresql.conf", "PostgreSQL config", InfoCategory.CONFIGURATION), + ], + }, + "mysql": { + "status": [ + InfoCommand("systemctl status mysql --no-pager", "Check MySQL status", InfoCategory.SERVICES), + InfoCommand("mysql --version", "Get MySQL version", InfoCategory.SOFTWARE), + ], + }, + "redis": { + "status": [ + InfoCommand("systemctl status redis-server --no-pager", "Check Redis status", InfoCategory.SERVICES), + InfoCommand("redis-cli --version", "Get Redis version", InfoCategory.SOFTWARE), + ], + "info": [ + InfoCommand("redis-cli info", "Redis server info", InfoCategory.APPLICATION), + ], + }, + "python": { + "version": [ + InfoCommand("python3 --version", "Get Python version", InfoCategory.SOFTWARE), + InfoCommand("which python3", "Find Python executable", InfoCategory.SOFTWARE), + ], + "packages": [ + InfoCommand("pip3 list --format=freeze", "List installed packages", InfoCategory.PACKAGES), + ], + "venv": [ + InfoCommand("echo $VIRTUAL_ENV", "Check active virtual environment", InfoCategory.CONFIGURATION), + ], + }, + "nodejs": { + "version": [ + InfoCommand("node --version", "Get Node.js version", InfoCategory.SOFTWARE), + InfoCommand("npm --version", "Get npm version", InfoCategory.SOFTWARE), + ], + "packages": [ + InfoCommand("npm list -g --depth=0", "List global npm packages", InfoCategory.PACKAGES), + ], + }, + "git": { + "version": [ + InfoCommand("git --version", "Get Git version", InfoCategory.SOFTWARE), + ], + "config": [ + InfoCommand("git config --global --list", "Git global config", InfoCategory.CONFIGURATION), + ], + }, + "ssh": { + "status": [ + InfoCommand("systemctl status ssh --no-pager", "Check SSH service", InfoCategory.SERVICES), + ], + "config": [ + InfoCommand("head -50 /etc/ssh/sshd_config", "SSH server config", InfoCategory.CONFIGURATION), + ], + }, + "systemd": { + "status": [ + InfoCommand("systemctl --version", "Get systemd version", InfoCategory.SOFTWARE), + InfoCommand("systemctl list-units --state=failed --no-pager", "Failed units", InfoCategory.SERVICES), + ], + "timers": [ + InfoCommand("systemctl list-timers --no-pager", "List active timers", InfoCategory.SERVICES), + ], + }, +} + + +class SystemInfoGenerator: + """ + Generates read-only commands to retrieve system and application information. + + Uses LLM to generate appropriate commands based on natural language queries, + while enforcing read-only access through CommandValidator. + """ + + MAX_ITERATIONS = 5 + MAX_OUTPUT_CHARS = 4000 + + def __init__( + self, + api_key: str | None = None, + provider: str = "claude", + model: str | None = None, + debug: bool = False, + ): + """ + Initialize the system info generator. + + Args: + api_key: API key for LLM provider (defaults to env var) + provider: LLM provider ("claude", "openai", "ollama") + model: Optional model override + debug: Enable debug output + """ + self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + self.provider = provider.lower() + self.model = model or self._default_model() + self.debug = debug + + self._initialize_client() + + def _default_model(self) -> str: + if self.provider == "openai": + return "gpt-4o" + elif self.provider == "claude": + return "claude-sonnet-4-20250514" + elif self.provider == "ollama": + return "llama3.2" + return "gpt-4o" + + def _initialize_client(self): + """Initialize the LLM client.""" + if self.provider == "openai": + try: + from openai import OpenAI + self.client = OpenAI(api_key=self.api_key) + except ImportError: + raise ImportError("OpenAI package not installed. Run: pip install openai") + elif self.provider == "claude": + try: + from anthropic import Anthropic + self.client = Anthropic(api_key=self.api_key) + except ImportError: + raise ImportError("Anthropic package not installed. Run: pip install anthropic") + elif self.provider == "ollama": + self.ollama_url = os.environ.get("OLLAMA_HOST", "http://localhost:11434") + self.client = None + else: + raise ValueError(f"Unsupported provider: {self.provider}") + + def _get_system_prompt(self, context: str = "") -> str: + """Get the system prompt for info command generation.""" + app_list = ", ".join(sorted(APP_INFO_TEMPLATES.keys())) + category_list = ", ".join([c.value for c in InfoCategory]) + + prompt = f"""You are a Linux system information assistant that generates READ-ONLY shell commands. + +Your task is to generate shell commands that gather system information to answer the user's query. +You can ONLY generate commands that READ information - no modifications allowed. + +IMPORTANT RULES: +- Generate ONLY read-only commands (cat, ls, grep, find, ps, etc.) +- NEVER generate commands that modify the system (rm, mv, cp, apt install, etc.) +- NEVER use sudo (commands must work as regular user where possible) +- NEVER use output redirection (>, >>) +- NEVER use dangerous command chaining (;, &&, ||) except for fallback patterns +- Commands should handle missing files/tools gracefully using || echo fallbacks + +ALLOWED COMMAND PATTERNS: +- Reading files: cat, head, tail, less (without writing) +- Listing: ls, find, locate, which, whereis, type +- System info: uname, hostname, uptime, whoami, id, lscpu, lsmem, lsblk +- Process info: ps, top, pgrep, pidof, pstree, free, vmstat +- Package queries: dpkg-query, dpkg -l, apt-cache, pip list/show/freeze +- Network info: ip addr, ip route, ss, netstat (read operations) +- Service status: systemctl status (NOT start/stop/restart) +- Text processing: grep, awk, sed (for filtering, NOT modifying files) + +BLOCKED PATTERNS (NEVER USE): +- sudo, su +- apt install/remove, pip install/uninstall +- rm, mv, cp, mkdir, touch, chmod, chown +- Output redirection: > or >> +- systemctl start/stop/restart/enable/disable + +RESPONSE FORMAT: +You must respond with a JSON object in one of these formats: + +For generating a command to gather info: +{{ + "response_type": "command", + "command": "", + "category": "<{category_list}>", + "reasoning": "" +}} + +For providing the final answer: +{{ + "response_type": "answer", + "answer": "", + "reasoning": "" +}} + +KNOWN APPLICATIONS with pre-defined info commands: {app_list} + +{context}""" + return prompt + + def _truncate_output(self, output: str) -> str: + """Truncate output to avoid context overflow.""" + if len(output) <= self.MAX_OUTPUT_CHARS: + return output + half = self.MAX_OUTPUT_CHARS // 2 + return f"{output[:half]}\n\n... [truncated {len(output) - self.MAX_OUTPUT_CHARS} chars] ...\n\n{output[-half:]}" + + def _execute_command(self, command: str, timeout: int = 30) -> InfoResult: + """Execute a validated read-only command.""" + import time + start_time = time.time() + + # Validate command first + is_valid, error = CommandValidator.validate_command(command) + if not is_valid: + return InfoResult( + command=command, + success=False, + output="", + error=f"Command blocked: {error}", + execution_time=time.time() - start_time, + ) + + try: + result = subprocess.run( + command, + shell=True, + capture_output=True, + text=True, + timeout=timeout, + ) + return InfoResult( + command=command, + success=result.returncode == 0, + output=result.stdout.strip(), + error=result.stderr.strip() if result.returncode != 0 else "", + execution_time=time.time() - start_time, + ) + except subprocess.TimeoutExpired: + return InfoResult( + command=command, + success=False, + output="", + error=f"Command timed out after {timeout}s", + execution_time=timeout, + ) + except Exception as e: + return InfoResult( + command=command, + success=False, + output="", + error=str(e), + execution_time=time.time() - start_time, + ) + + def _call_llm(self, system_prompt: str, user_prompt: str) -> dict[str, Any]: + """Call the LLM and parse the response.""" + try: + if self.provider == "claude": + response = self.client.messages.create( + model=self.model, + max_tokens=2048, + system=system_prompt, + messages=[{"role": "user", "content": user_prompt}], + ) + content = response.content[0].text + elif self.provider == "openai": + response = self.client.chat.completions.create( + model=self.model, + max_tokens=2048, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + ) + content = response.choices[0].message.content + elif self.provider == "ollama": + import httpx + response = httpx.post( + f"{self.ollama_url}/api/chat", + json={ + "model": self.model, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + "stream": False, + }, + timeout=60.0, + ) + response.raise_for_status() + content = response.json()["message"]["content"] + else: + raise ValueError(f"Unsupported provider: {self.provider}") + + # Parse JSON from response + json_match = re.search(r"\{[\s\S]*\}", content) + if json_match: + return json.loads(json_match.group()) + raise ValueError("No JSON found in response") + + except json.JSONDecodeError as e: + if self.debug: + console.print(f"[red]JSON parse error: {e}[/red]") + return {"response_type": "answer", "answer": f"Error parsing LLM response: {e}", "reasoning": ""} + except Exception as e: + if self.debug: + console.print(f"[red]LLM error: {e}[/red]") + return {"response_type": "answer", "answer": f"Error calling LLM: {e}", "reasoning": ""} + + def get_info(self, query: str, context: str = "") -> SystemInfoResult: + """ + Get system information based on a natural language query. + + Uses an agentic loop to: + 1. Generate commands to gather information + 2. Execute commands (read-only only) + 3. Analyze results + 4. Either generate more commands or provide final answer + + Args: + query: Natural language question about the system + context: Optional additional context for the LLM + + Returns: + SystemInfoResult with answer and command execution details + """ + system_prompt = self._get_system_prompt(context) + commands_executed: list[InfoResult] = [] + history: list[dict[str, str]] = [] + + user_prompt = f"Query: {query}" + + for iteration in range(self.MAX_ITERATIONS): + if self.debug: + console.print(f"[dim]Iteration {iteration + 1}/{self.MAX_ITERATIONS}[/dim]") + + # Build prompt with history + full_prompt = user_prompt + if history: + full_prompt += "\n\nPrevious commands and results:\n" + for i, entry in enumerate(history, 1): + full_prompt += f"\n--- Command {i} ---\n" + full_prompt += f"Command: {entry['command']}\n" + if entry['success']: + full_prompt += f"Output:\n{self._truncate_output(entry['output'])}\n" + else: + full_prompt += f"Error: {entry['error']}\n" + full_prompt += "\nBased on these results, either run another command or provide the final answer.\n" + + # Call LLM + response = self._call_llm(system_prompt, full_prompt) + + if response.get("response_type") == "answer": + # Final answer + return SystemInfoResult( + query=query, + answer=response.get("answer", "No answer provided"), + commands_executed=commands_executed, + raw_data={h["command"]: h["output"] for h in history if h.get("success")}, + ) + + elif response.get("response_type") == "command": + command = response.get("command", "") + if not command: + continue + + if self.debug: + console.print(f"[cyan]Executing:[/cyan] {command}") + + result = self._execute_command(command) + commands_executed.append(result) + + history.append({ + "command": command, + "success": result.success, + "output": result.output, + "error": result.error, + }) + + if self.debug: + if result.success: + console.print(f"[green]✓ Success[/green]") + else: + console.print(f"[red]✗ Failed: {result.error}[/red]") + + # Max iterations reached + return SystemInfoResult( + query=query, + answer="Could not complete the query within iteration limit.", + commands_executed=commands_executed, + raw_data={h["command"]: h["output"] for h in history if h.get("success")}, + ) + + def get_app_info( + self, + app_name: str, + query: str | None = None, + aspects: list[str] | None = None, + ) -> SystemInfoResult: + """ + Get information about a specific application. + + Args: + app_name: Application name (nginx, docker, postgresql, etc.) + query: Optional natural language query about the app + aspects: Optional list of aspects to check (status, config, logs, etc.) + + Returns: + SystemInfoResult with application information + """ + app_lower = app_name.lower() + commands_executed: list[InfoResult] = [] + raw_data: dict[str, Any] = {} + + # Check if we have predefined commands for this app + if app_lower in APP_INFO_TEMPLATES: + templates = APP_INFO_TEMPLATES[app_lower] + aspects_to_check = aspects or list(templates.keys()) + + for aspect in aspects_to_check: + if aspect in templates: + for cmd_info in templates[aspect]: + result = self._execute_command(cmd_info.command, cmd_info.timeout) + commands_executed.append(result) + if result.success and result.output: + raw_data[f"{aspect}:{cmd_info.purpose}"] = result.output + + # If there's a specific query, use LLM to analyze + if query: + context = f"""Application: {app_name} +Already gathered data: +{json.dumps(raw_data, indent=2)[:2000]} + +Now answer the specific question about this application.""" + + result = self.get_info(query, context) + result.commands_executed = commands_executed + result.commands_executed + result.raw_data.update(raw_data) + return result + + # Generate summary answer from raw data + answer_parts = [f"**{app_name.title()} Information**\n"] + for key, value in raw_data.items(): + aspect, desc = key.split(":", 1) + answer_parts.append(f"\n**{aspect.title()}** ({desc}):\n```\n{value[:500]}{'...' if len(value) > 500 else ''}\n```") + + return SystemInfoResult( + query=query or f"Get information about {app_name}", + answer="\n".join(answer_parts) if raw_data else f"No information found for {app_name}", + commands_executed=commands_executed, + raw_data=raw_data, + category=InfoCategory.APPLICATION, + ) + + def get_structured_info( + self, + category: str | InfoCategory, + aspects: list[str] | None = None, + ) -> SystemInfoResult: + """ + Get structured system information for a category. + + Args: + category: Info category (hardware, network, services, etc.) + aspects: Optional specific aspects (cpu, memory, disk for hardware, etc.) + + Returns: + SystemInfoResult with structured information + """ + if isinstance(category, str): + category = category.lower() + else: + category = category.value + + commands_executed: list[InfoResult] = [] + raw_data: dict[str, Any] = {} + + # Map categories to common commands + category_mapping = { + "hardware": ["cpu", "memory", "disk", "gpu"], + "software": ["os", "kernel"], + "network": ["network", "dns"], + "services": ["services"], + "security": ["security"], + "processes": ["processes"], + "storage": ["disk"], + "performance": ["cpu", "memory", "processes"], + "configuration": ["environment"], + } + + aspects_to_check = aspects or category_mapping.get(category, []) + + for aspect in aspects_to_check: + if aspect in COMMON_INFO_COMMANDS: + for cmd_info in COMMON_INFO_COMMANDS[aspect]: + result = self._execute_command(cmd_info.command, cmd_info.timeout) + commands_executed.append(result) + if result.success and result.output: + raw_data[f"{aspect}:{cmd_info.purpose}"] = result.output + + # Generate structured answer + answer_parts = [f"**{category.title()} Information**\n"] + for key, value in raw_data.items(): + aspect, desc = key.split(":", 1) + answer_parts.append(f"\n**{aspect.upper()}** ({desc}):\n```\n{value[:800]}{'...' if len(value) > 800 else ''}\n```") + + return SystemInfoResult( + query=f"Get {category} information", + answer="\n".join(answer_parts) if raw_data else f"No {category} information found", + commands_executed=commands_executed, + raw_data=raw_data, + category=InfoCategory(category) if category in [c.value for c in InfoCategory] else InfoCategory.CUSTOM, + ) + + def quick_info(self, info_type: str) -> str: + """ + Quick lookup for common system information. + + Args: + info_type: Type of info (cpu, memory, disk, os, network, etc.) + + Returns: + String with the requested information + """ + info_lower = info_type.lower() + + if info_lower in COMMON_INFO_COMMANDS: + outputs = [] + for cmd_info in COMMON_INFO_COMMANDS[info_lower]: + result = self._execute_command(cmd_info.command) + if result.success and result.output: + outputs.append(result.output) + return "\n\n".join(outputs) if outputs else f"No {info_type} information available" + + # Try as app info + if info_lower in APP_INFO_TEMPLATES: + result = self.get_app_info(info_lower, aspects=["status", "version"]) + return result.answer + + return f"Unknown info type: {info_type}. Available: {', '.join(COMMON_INFO_COMMANDS.keys())}" + + def list_available_info(self) -> dict[str, list[str]]: + """List all available pre-defined info types and applications.""" + return { + "system_info": list(COMMON_INFO_COMMANDS.keys()), + "applications": list(APP_INFO_TEMPLATES.keys()), + "categories": [c.value for c in InfoCategory], + } + + +def get_system_info_generator( + provider: str = "claude", + debug: bool = False, +) -> SystemInfoGenerator: + """ + Factory function to create a SystemInfoGenerator with default configuration. + + Args: + provider: LLM provider to use + debug: Enable debug output + + Returns: + Configured SystemInfoGenerator instance + """ + api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + if not api_key: + raise ValueError("No API key found. Set ANTHROPIC_API_KEY or OPENAI_API_KEY") + + return SystemInfoGenerator(api_key=api_key, provider=provider, debug=debug) + + +# CLI helper for quick testing +if __name__ == "__main__": + import sys + + if len(sys.argv) < 2: + print("Usage: python system_info_generator.py ") + print(" python system_info_generator.py --quick ") + print(" python system_info_generator.py --app [query]") + print(" python system_info_generator.py --list") + sys.exit(1) + + try: + generator = get_system_info_generator(debug=True) + + if sys.argv[1] == "--list": + available = generator.list_available_info() + console.print("\n[bold]Available Information Types:[/bold]") + console.print(f"System: {', '.join(available['system_info'])}") + console.print(f"Apps: {', '.join(available['applications'])}") + console.print(f"Categories: {', '.join(available['categories'])}") + + elif sys.argv[1] == "--quick" and len(sys.argv) > 2: + info = generator.quick_info(sys.argv[2]) + console.print(Panel(info, title=f"{sys.argv[2].title()} Info")) + + elif sys.argv[1] == "--app" and len(sys.argv) > 2: + app_name = sys.argv[2] + query = " ".join(sys.argv[3:]) if len(sys.argv) > 3 else None + result = generator.get_app_info(app_name, query) + console.print(Panel(result.answer, title=f"{app_name.title()} Info")) + + else: + query = " ".join(sys.argv[1:]) + result = generator.get_info(query) + console.print(Panel(result.answer, title="System Info")) + + if result.commands_executed: + table = Table(title="Commands Executed") + table.add_column("Command", style="cyan") + table.add_column("Status", style="green") + table.add_column("Time", style="dim") + for cmd in result.commands_executed: + status = "✓" if cmd.success else "✗" + table.add_row(cmd.command[:60], status, f"{cmd.execution_time:.2f}s") + console.print(table) + + except ValueError as e: + console.print(f"[red]Error: {e}[/red]") + sys.exit(1) + diff --git a/cortex/test.py b/cortex/test.py new file mode 100644 index 000000000..e69de29bb diff --git a/cortex/watch_service.py b/cortex/watch_service.py new file mode 100644 index 000000000..83681a6cf --- /dev/null +++ b/cortex/watch_service.py @@ -0,0 +1,716 @@ +#!/usr/bin/env python3 +""" +Cortex Watch Service - Background terminal monitoring daemon. + +This service runs in the background and monitors all terminal activity, +logging commands for Cortex to use during manual intervention. + +Features: +- Runs as a systemd user service +- Auto-starts on login +- Auto-restarts on crash +- Assigns unique IDs to each terminal +- Excludes Cortex's own terminal from logging +""" + +import datetime +import fcntl +import hashlib +import json +import os +import signal +import subprocess +import sys +import threading +import time +from pathlib import Path +from typing import Any + + +class CortexWatchDaemon: + """Background daemon that monitors terminal activity.""" + + def __init__(self): + self.running = False + self.cortex_dir = Path.home() / ".cortex" + self.watch_log = self.cortex_dir / "terminal_watch.log" + self.terminals_dir = self.cortex_dir / "terminals" + self.pid_file = self.cortex_dir / "watch_service.pid" + self.state_file = self.cortex_dir / "watch_state.json" + + # Terminal tracking + self.terminals: dict[str, dict[str, Any]] = {} + self.terminal_counter = 0 + + # Track commands seen from watch_hook to avoid duplicates with bash_history + self._watch_hook_commands: set[str] = set() + self._recent_commands: list[str] = [] # Last 100 commands for dedup + + # Ensure directories exist + self.cortex_dir.mkdir(parents=True, exist_ok=True) + self.terminals_dir.mkdir(parents=True, exist_ok=True) + + # Setup signal handlers + signal.signal(signal.SIGTERM, self._handle_signal) + signal.signal(signal.SIGINT, self._handle_signal) + signal.signal(signal.SIGHUP, self._handle_reload) + + def _handle_signal(self, signum, frame): + """Handle shutdown signals.""" + self.log(f"Received signal {signum}, shutting down...") + self.running = False + + def _handle_reload(self, signum, frame): + """Handle reload signal (SIGHUP).""" + self.log("Received SIGHUP, reloading configuration...") + self._load_state() + + def log(self, message: str): + """Log a message to the service log.""" + log_file = self.cortex_dir / "watch_service.log" + timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + with open(log_file, "a") as f: + f.write(f"[{timestamp}] {message}\n") + + def _load_state(self): + """Load saved state from file.""" + if self.state_file.exists(): + try: + with open(self.state_file) as f: + state = json.load(f) + self.terminal_counter = state.get("terminal_counter", 0) + self.terminals = state.get("terminals", {}) + except Exception as e: + self.log(f"Error loading state: {e}") + + def _save_state(self): + """Save current state to file.""" + try: + state = { + "terminal_counter": self.terminal_counter, + "terminals": self.terminals, + "last_update": datetime.datetime.now().isoformat(), + } + with open(self.state_file, "w") as f: + json.dump(state, f, indent=2) + except Exception as e: + self.log(f"Error saving state: {e}") + + def _get_terminal_id(self, pts: str) -> str: + """Generate or retrieve a unique terminal ID.""" + if pts in self.terminals: + return self.terminals[pts]["id"] + + self.terminal_counter += 1 + terminal_id = f"term_{self.terminal_counter:04d}" + + self.terminals[pts] = { + "id": terminal_id, + "pts": pts, + "created": datetime.datetime.now().isoformat(), + "is_cortex": False, + "command_count": 0, + } + + self._save_state() + return terminal_id + + def _is_cortex_terminal(self, pid: int) -> bool: + """Check if a process is a Cortex terminal.""" + try: + # Check environment variables + environ_file = Path(f"/proc/{pid}/environ") + if environ_file.exists(): + environ = environ_file.read_bytes() + if b"CORTEX_TERMINAL=1" in environ: + return True + + # Check command line + cmdline_file = Path(f"/proc/{pid}/cmdline") + if cmdline_file.exists(): + cmdline = cmdline_file.read_bytes().decode("utf-8", errors="ignore") + if "cortex" in cmdline.lower(): + return True + except (PermissionError, FileNotFoundError, ProcessLookupError): + pass + + return False + + def _get_active_terminals(self) -> list[dict]: + """Get list of active terminal processes.""" + terminals = [] + + try: + # Find all pts (pseudo-terminal) devices + pts_dir = Path("/dev/pts") + if pts_dir.exists(): + for pts_file in pts_dir.iterdir(): + if pts_file.name.isdigit(): + pts_path = str(pts_file) + + # Find process using this pts + result = subprocess.run( + ["fuser", pts_path], + capture_output=True, + text=True, + timeout=2 + ) + + if result.stdout.strip(): + pids = result.stdout.strip().split() + for pid_str in pids: + try: + pid = int(pid_str) + is_cortex = self._is_cortex_terminal(pid) + terminal_id = self._get_terminal_id(pts_path) + + # Update cortex flag + if pts_path in self.terminals: + self.terminals[pts_path]["is_cortex"] = is_cortex + + terminals.append({ + "pts": pts_path, + "pid": pid, + "id": terminal_id, + "is_cortex": is_cortex, + }) + except ValueError: + continue + + except Exception as e: + self.log(f"Error getting terminals: {e}") + + return terminals + + def _monitor_bash_history(self): + """Monitor bash history for new commands using inotify if available.""" + history_files = [ + Path.home() / ".bash_history", + Path.home() / ".zsh_history", + ] + + positions: dict[str, int] = {} + last_commands: dict[str, str] = {} # Track last command per file to avoid duplicates + + # Initialize positions to current end of file + for hist_file in history_files: + if hist_file.exists(): + positions[str(hist_file)] = hist_file.stat().st_size + # Read last line to track for dedup + try: + content = hist_file.read_text() + lines = content.strip().split("\n") + if lines: + last_commands[str(hist_file)] = lines[-1].strip() + except Exception: + pass + + # Try to use inotify for more efficient monitoring + try: + import select + import struct + import ctypes + + # Check if inotify is available + libc = ctypes.CDLL("libc.so.6") + inotify_init = libc.inotify_init + inotify_add_watch = libc.inotify_add_watch + + IN_MODIFY = 0x00000002 + IN_CLOSE_WRITE = 0x00000008 + + fd = inotify_init() + if fd < 0: + raise OSError("Failed to initialize inotify") + + watches = {} + for hist_file in history_files: + if hist_file.exists(): + wd = inotify_add_watch(fd, str(hist_file).encode(), IN_MODIFY | IN_CLOSE_WRITE) + if wd >= 0: + watches[wd] = hist_file + + self.log(f"Using inotify to monitor {len(watches)} history files") + + while self.running: + # Wait for inotify event with timeout + r, _, _ = select.select([fd], [], [], 1.0) + if not r: + continue + + data = os.read(fd, 4096) + # Process inotify events + for hist_file in history_files: + key = str(hist_file) + if not hist_file.exists(): + continue + + try: + current_size = hist_file.stat().st_size + + if key not in positions: + positions[key] = current_size + continue + + if current_size < positions[key]: + positions[key] = current_size + continue + + if current_size > positions[key]: + with open(hist_file) as f: + f.seek(positions[key]) + new_content = f.read() + + for line in new_content.split("\n"): + line = line.strip() + # Skip empty, short, or duplicate commands + if line and len(line) > 1: + if last_commands.get(key) != line: + self._log_command(line, "history") + last_commands[key] = line + + positions[key] = current_size + except Exception as e: + self.log(f"Error reading {hist_file}: {e}") + + os.close(fd) + return + + except Exception as e: + self.log(f"Inotify not available, using polling: {e}") + + # Fallback to polling + while self.running: + for hist_file in history_files: + if not hist_file.exists(): + continue + + key = str(hist_file) + try: + current_size = hist_file.stat().st_size + + if key not in positions: + positions[key] = current_size + continue + + if current_size < positions[key]: + # File was truncated + positions[key] = current_size + continue + + if current_size > positions[key]: + with open(hist_file) as f: + f.seek(positions[key]) + new_content = f.read() + + for line in new_content.split("\n"): + line = line.strip() + if line and len(line) > 1: + if last_commands.get(key) != line: + self._log_command(line, "history") + last_commands[key] = line + + positions[key] = current_size + + except Exception as e: + self.log(f"Error reading {hist_file}: {e}") + + time.sleep(0.3) + + def _monitor_watch_hook(self): + """Monitor the watch hook log file and sync to terminal_commands.json.""" + position = 0 + + while self.running: + try: + if not self.watch_log.exists(): + time.sleep(0.5) + continue + + current_size = self.watch_log.stat().st_size + + if current_size < position: + position = 0 + + if current_size > position: + with open(self.watch_log) as f: + f.seek(position) + new_content = f.read() + + for line in new_content.split("\n"): + line = line.strip() + if not line or len(line) < 2: + continue + + # Parse format: TTY|COMMAND (new format from updated hook) + # Skip lines that don't have the TTY| prefix or have "shared|" + if "|" not in line: + continue + + parts = line.split("|", 1) + terminal_id = parts[0] + + # Skip "shared" entries (those come from bash_history monitor) + if terminal_id == "shared": + continue + + # Must have valid TTY format (pts_X, tty_X, etc.) + if not terminal_id or terminal_id == "unknown": + continue + + command = parts[1] if len(parts) > 1 else "" + if not command: + continue + + # Skip duplicates + if self._is_duplicate(command): + continue + + # Mark this command as seen from watch_hook + self._watch_hook_commands.add(command) + + # Log to terminal_commands.json only + self._log_to_json(command, "watch_hook", terminal_id) + + position = current_size + + except Exception as e: + self.log(f"Error monitoring watch hook: {e}") + + time.sleep(0.2) + + def _log_to_json(self, command: str, source: str, terminal_id: str): + """Log a command only to terminal_commands.json.""" + try: + detailed_log = self.cortex_dir / "terminal_commands.json" + entry = { + "timestamp": datetime.datetime.now().isoformat(), + "command": command, + "source": source, + "terminal_id": terminal_id, + } + + with open(detailed_log, "a") as f: + f.write(json.dumps(entry) + "\n") + except Exception as e: + self.log(f"Error logging to JSON: {e}") + + def _is_duplicate(self, command: str) -> bool: + """Check if command was recently logged to avoid duplicates.""" + if command in self._recent_commands: + return True + + # Keep last 100 commands + self._recent_commands.append(command) + if len(self._recent_commands) > 100: + self._recent_commands.pop(0) + + return False + + def _log_command(self, command: str, source: str = "unknown", terminal_id: str | None = None): + """Log a command from bash_history (watch_hook uses _log_to_json directly).""" + # Skip cortex commands + if command.lower().startswith("cortex "): + return + if "watch_hook" in command: + return + if command.startswith("source ") and ".cortex" in command: + return + + # Skip if this command was already logged by watch_hook + if command in self._watch_hook_commands: + self._watch_hook_commands.discard(command) # Clear it for next time + return + + # Skip duplicates + if self._is_duplicate(command): + return + + # For bash_history source, we can't know which terminal - use "shared" + if terminal_id is None: + terminal_id = "shared" + + try: + # Write to watch_log with format TTY|COMMAND + with open(self.watch_log, "a") as f: + f.write(f"{terminal_id}|{command}\n") + + # Log to JSON + self._log_to_json(command, source, terminal_id) + + except Exception as e: + self.log(f"Error logging command: {e}") + + def _cleanup_stale_terminals(self): + """Remove stale terminal entries.""" + while self.running: + try: + active_pts = set() + pts_dir = Path("/dev/pts") + if pts_dir.exists(): + for pts_file in pts_dir.iterdir(): + if pts_file.name.isdigit(): + active_pts.add(str(pts_file)) + + # Remove stale entries + stale = [pts for pts in self.terminals if pts not in active_pts] + for pts in stale: + del self.terminals[pts] + + if stale: + self._save_state() + + except Exception as e: + self.log(f"Error cleaning up terminals: {e}") + + time.sleep(30) # Check every 30 seconds + + def start(self): + """Start the watch daemon.""" + # Check if already running + if self.pid_file.exists(): + try: + pid = int(self.pid_file.read_text().strip()) + os.kill(pid, 0) # Check if process exists + self.log(f"Daemon already running with PID {pid}") + return False + except (ProcessLookupError, ValueError): + # Stale PID file + self.pid_file.unlink() + + # Write PID file + self.pid_file.write_text(str(os.getpid())) + + self.running = True + self._load_state() + + self.log("Cortex Watch Service starting...") + + # Start monitor threads + threads = [ + threading.Thread(target=self._monitor_bash_history, daemon=True), + threading.Thread(target=self._monitor_watch_hook, daemon=True), + threading.Thread(target=self._cleanup_stale_terminals, daemon=True), + ] + + for t in threads: + t.start() + + self.log(f"Cortex Watch Service started (PID: {os.getpid()})") + + # Main loop - just keep alive and handle signals + try: + while self.running: + time.sleep(1) + finally: + self._shutdown() + + return True + + def _shutdown(self): + """Clean shutdown.""" + self.log("Shutting down...") + self._save_state() + + if self.pid_file.exists(): + self.pid_file.unlink() + + self.log("Cortex Watch Service stopped") + + def stop(self): + """Stop the running daemon.""" + if not self.pid_file.exists(): + return False, "Service not running" + + try: + pid = int(self.pid_file.read_text().strip()) + os.kill(pid, signal.SIGTERM) + + # Wait for process to exit + for _ in range(10): + try: + os.kill(pid, 0) + time.sleep(0.5) + except ProcessLookupError: + break + + return True, f"Service stopped (PID: {pid})" + + except ProcessLookupError: + self.pid_file.unlink() + return True, "Service was not running" + except Exception as e: + return False, f"Error stopping service: {e}" + + def status(self) -> dict: + """Get service status.""" + status = { + "running": False, + "pid": None, + "terminals": 0, + "commands_logged": 0, + } + + if self.pid_file.exists(): + try: + pid = int(self.pid_file.read_text().strip()) + os.kill(pid, 0) + status["running"] = True + status["pid"] = pid + except (ProcessLookupError, ValueError): + pass + + if self.watch_log.exists(): + try: + content = self.watch_log.read_text() + status["commands_logged"] = len([l for l in content.split("\n") if l.strip()]) + except Exception: + pass + + self._load_state() + status["terminals"] = len(self.terminals) + + return status + + +def get_systemd_service_content() -> str: + """Generate systemd service file content.""" + python_path = sys.executable + service_script = Path(__file__).resolve() + + return f"""[Unit] +Description=Cortex Terminal Watch Service +Documentation=https://github.com/cortexlinux/cortex +After=default.target + +[Service] +Type=simple +ExecStart={python_path} {service_script} --daemon +ExecStop={python_path} {service_script} --stop +ExecReload=/bin/kill -HUP $MAINPID +Restart=always +RestartSec=5 +StandardOutput=journal +StandardError=journal + +# Security +NoNewPrivileges=true +PrivateTmp=true + +[Install] +WantedBy=default.target +""" + + +def install_service() -> tuple[bool, str]: + """Install the systemd user service.""" + service_dir = Path.home() / ".config" / "systemd" / "user" + service_file = service_dir / "cortex-watch.service" + + try: + # Create directory + service_dir.mkdir(parents=True, exist_ok=True) + + # Write service file + service_file.write_text(get_systemd_service_content()) + + # Reload systemd + subprocess.run(["systemctl", "--user", "daemon-reload"], check=True) + + # Enable and start service + subprocess.run(["systemctl", "--user", "enable", "cortex-watch.service"], check=True) + subprocess.run(["systemctl", "--user", "start", "cortex-watch.service"], check=True) + + # Enable lingering so service runs even when not logged in + subprocess.run(["loginctl", "enable-linger", os.getenv("USER", "")], capture_output=True) + + return True, f"""✓ Cortex Watch Service installed and started! + +Service file: {service_file} + +The service will: + • Start automatically on login + • Restart automatically if it crashes + • Monitor all terminal activity + +Commands: + systemctl --user status cortex-watch # Check status + systemctl --user restart cortex-watch # Restart + systemctl --user stop cortex-watch # Stop + journalctl --user -u cortex-watch # View logs +""" + except subprocess.CalledProcessError as e: + return False, f"Failed to install service: {e}" + except Exception as e: + return False, f"Error: {e}" + + +def uninstall_service() -> tuple[bool, str]: + """Uninstall the systemd user service.""" + service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" + + try: + # Stop and disable service + subprocess.run(["systemctl", "--user", "stop", "cortex-watch.service"], capture_output=True) + subprocess.run(["systemctl", "--user", "disable", "cortex-watch.service"], capture_output=True) + + # Remove service file + if service_file.exists(): + service_file.unlink() + + # Reload systemd + subprocess.run(["systemctl", "--user", "daemon-reload"], check=True) + + return True, "✓ Cortex Watch Service uninstalled" + except Exception as e: + return False, f"Error: {e}" + + +def main(): + """Main entry point.""" + import argparse + + parser = argparse.ArgumentParser(description="Cortex Watch Service") + parser.add_argument("--daemon", action="store_true", help="Run as daemon") + parser.add_argument("--stop", action="store_true", help="Stop the daemon") + parser.add_argument("--status", action="store_true", help="Show status") + parser.add_argument("--install", action="store_true", help="Install systemd service") + parser.add_argument("--uninstall", action="store_true", help="Uninstall systemd service") + + args = parser.parse_args() + + daemon = CortexWatchDaemon() + + if args.install: + success, msg = install_service() + print(msg) + sys.exit(0 if success else 1) + + if args.uninstall: + success, msg = uninstall_service() + print(msg) + sys.exit(0 if success else 1) + + if args.status: + status = daemon.status() + print(f"Running: {status['running']}") + if status['pid']: + print(f"PID: {status['pid']}") + print(f"Terminals tracked: {status['terminals']}") + print(f"Commands logged: {status['commands_logged']}") + sys.exit(0) + + if args.stop: + success, msg = daemon.stop() + print(msg) + sys.exit(0 if success else 1) + + if args.daemon: + daemon.start() + else: + parser.print_help() + + +if __name__ == "__main__": + main() + diff --git a/docs/ASK_DO_ARCHITECTURE.md b/docs/ASK_DO_ARCHITECTURE.md new file mode 100644 index 000000000..3b4261231 --- /dev/null +++ b/docs/ASK_DO_ARCHITECTURE.md @@ -0,0 +1,741 @@ +# Cortex `ask --do` Architecture + +> AI-powered command execution with intelligent error handling, auto-repair, and real-time terminal monitoring. + +## Table of Contents + +- [Overview](#overview) +- [Architecture Diagram](#architecture-diagram) +- [Core Components](#core-components) +- [Execution Flow](#execution-flow) +- [Terminal Monitoring](#terminal-monitoring) +- [Error Handling & Auto-Fix](#error-handling--auto-fix) +- [Session Management](#session-management) +- [Key Files](#key-files) +- [Data Flow](#data-flow) + +--- + +## Overview + +`cortex ask --do` is an interactive AI assistant that can execute commands on your Linux system. Unlike simple command execution, it features: + +- **Natural Language Understanding** - Describe what you want in plain English +- **Conflict Detection** - Detects existing resources (Docker containers, services, files) before execution +- **Task Tree Execution** - Structured command execution with dependencies +- **Auto-Repair** - Automatically diagnoses and fixes failed commands +- **Terminal Monitoring** - Watches your other terminals for real-time feedback +- **Session Persistence** - Tracks history across multiple interactions + +--- + +## Architecture Diagram + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ USER INPUT │ +│ "install nginx and configure it" │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ CLI Layer │ +│ (cli.py) │ +│ ┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ │ +│ │ Signal Handlers │ │ Session Manager │ │ Interactive │ │ +│ │ (Ctrl+Z/C) │ │ (session_id) │ │ Prompt │ │ +│ └─────────────────┘ └─────────────────┘ └─────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ AskHandler │ +│ (ask.py) │ +│ ┌─────────────────────────────────────────────────────────────────────┐ │ +│ │ LLM Integration │ │ +│ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │ +│ │ │ Claude │ │ Kimi K2 │ │ Ollama │ │ │ +│ │ │ (Primary) │ │ (Fallback) │ │ (Local) │ │ │ +│ │ └─────────────┘ └─────────────┘ └─────────────┘ │ │ +│ └─────────────────────────────────────────────────────────────────────┘ │ +│ │ +│ Response Types: │ +│ ├── "command" → Read-only info gathering │ +│ ├── "do_commands" → Commands to execute (requires approval) │ +│ └── "answer" → Final response to user │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ DoHandler │ +│ (do_runner/handler.py) │ +│ │ +│ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ ┌──────────────┐ │ +│ │ Conflict │ │ Task Tree │ │ Auto │ │ Terminal │ │ +│ │ Detection │ │ Execution │ │ Repair │ │ Monitor │ │ +│ └──────────────┘ └──────────────┘ └──────────────┘ └──────────────┘ │ +│ │ +│ Execution Modes: │ +│ ├── Automatic → Commands run with user approval │ +│ └── Manual → User runs commands, Cortex monitors │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ┌───────────────┴───────────────┐ + ▼ ▼ +┌─────────────────────────────┐ ┌─────────────────────────────────────────┐ +│ Automatic Execution │ │ Manual Intervention │ +│ │ │ │ +│ ┌────────────────────────┐ │ │ ┌────────────────────────────────────┐ │ +│ │ ConflictDetector │ │ │ │ TerminalMonitor │ │ +│ │ (verification.py) │ │ │ │ (terminal.py) │ │ +│ │ │ │ │ │ │ │ +│ │ Checks for: │ │ │ │ Monitors: │ │ +│ │ • Docker containers │ │ │ │ • ~/.bash_history │ │ +│ │ • Running services │ │ │ │ • ~/.zsh_history │ │ +│ │ • Existing files │ │ │ │ • terminal_watch.log │ │ +│ │ • Port conflicts │ │ │ │ • Cursor IDE terminals │ │ +│ │ • Package conflicts │ │ │ │ │ │ +│ └────────────────────────┘ │ │ │ Features: │ │ +│ │ │ │ • Real-time command detection │ │ +│ ┌────────────────────────┐ │ │ │ • Error detection & auto-fix │ │ +│ │ CommandExecutor │ │ │ │ • Desktop notifications │ │ +│ │ (executor.py) │ │ │ │ • Terminal ID tracking │ │ +│ │ │ │ │ └────────────────────────────────────┘ │ +│ │ • Subprocess mgmt │ │ │ │ +│ │ • Timeout handling │ │ │ ┌────────────────────────────────────┐ │ +│ │ • Output capture │ │ │ │ Watch Service (Daemon) │ │ +│ │ • Sudo handling │ │ │ │ (watch_service.py) │ │ +│ └────────────────────────┘ │ │ │ │ │ +│ │ │ │ • Runs as systemd user service │ │ +│ ┌────────────────────────┐ │ │ │ • Auto-starts on login │ │ +│ │ ErrorDiagnoser │ │ │ │ • Uses inotify for efficiency │ │ +│ │ (diagnosis.py) │ │ │ │ • Logs to terminal_commands.json │ │ +│ │ │ │ │ └────────────────────────────────────┘ │ +│ │ • Pattern matching │ │ │ │ +│ │ • LLM-powered diag │ │ └─────────────────────────────────────────┘ +│ │ • Fix suggestions │ │ +│ └────────────────────────┘ │ +│ │ +│ ┌────────────────────────┐ │ +│ │ AutoFixer │ │ +│ │ (diagnosis.py) │ │ +│ │ │ │ +│ │ • Automatic repairs │ │ +│ │ • Retry strategies │ │ +│ │ • Verification tests │ │ +│ └────────────────────────┘ │ +└─────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ Persistence Layer │ +│ │ +│ ┌─────────────────────────────┐ ┌─────────────────────────────────────┐ │ +│ │ DoRunDatabase │ │ Log Files │ │ +│ │ (~/.cortex/do_runs.db) │ │ │ │ +│ │ │ │ • terminal_watch.log │ │ +│ │ Tables: │ │ • terminal_commands.json │ │ +│ │ • do_runs │ │ • watch_service.log │ │ +│ │ • do_sessions │ │ │ │ +│ └─────────────────────────────┘ └─────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Core Components + +### 1. CLI Layer (`cli.py`) + +The entry point for `cortex ask --do`. Handles: + +- **Signal Handlers**: Ctrl+Z stops current command (not the session), Ctrl+C exits +- **Session Management**: Creates/tracks session IDs for history grouping +- **Interactive Loop**: "What would you like to do?" prompt with suggestions +- **Error Handling**: Graceful error display without exposing internal details + +```python +# Key functions +_run_interactive_do_session(handler) # Main interactive loop +handle_session_interrupt() # Ctrl+Z handler +``` + +### 2. AskHandler (`ask.py`) + +Manages LLM communication and response parsing: + +- **Multi-LLM Support**: Claude (primary), Kimi K2, Ollama (local) +- **Response Types**: + - `command` - Read-only info gathering (ls, cat, systemctl status) + - `do_commands` - Commands requiring execution (apt install, systemctl restart) + - `answer` - Final response to user +- **Guardrails**: Rejects non-Linux/technical queries +- **Chained Command Handling**: Splits `&&` chains into individual commands + +```python +# Key methods +_get_do_mode_system_prompt() # LLM system prompt +_handle_do_commands() # Process do_commands response +_call_llm() # Make LLM API call with interrupt support +``` + +### 3. DoHandler (`do_runner/handler.py`) + +The execution engine. Core responsibilities: + +- **Conflict Detection**: Checks for existing resources before execution +- **Task Tree Building**: Creates structured execution plan +- **Command Execution**: Runs commands with approval workflow +- **Auto-Repair**: Handles failures with diagnostic commands +- **Manual Intervention**: Coordinates with TerminalMonitor + +```python +# Key methods +execute_with_task_tree() # Main execution method +_handle_resource_conflict() # User prompts for conflicts +_execute_task_node() # Execute single task +_interactive_session() # Post-execution suggestions +``` + +### 4. ConflictDetector (`verification.py`) + +Pre-flight checks before command execution: + +| Resource Type | Check Method | +|--------------|--------------| +| Docker containers | `docker ps -a --filter name=X` | +| Systemd services | `systemctl is-active X` | +| Files/directories | `os.path.exists()` | +| Ports | `ss -tlnp \| grep :PORT` | +| Packages (apt) | `dpkg -l \| grep X` | +| Packages (pip) | `pip show X` | +| Users/groups | `getent passwd/group` | +| Databases | `mysql/psql -e "SHOW DATABASES"` | + +### 5. TerminalMonitor (`terminal.py`) + +Real-time monitoring for manual intervention mode: + +- **Sources Monitored**: + - `~/.bash_history` and `~/.zsh_history` + - `~/.cortex/terminal_watch.log` (from shell hooks) + - Cursor IDE terminal files + - tmux panes + +- **Features**: + - Command detection with terminal ID tracking + - Error detection in command output + - LLM-powered error analysis + - Desktop notifications for errors/fixes + - Auto-fix execution (non-sudo only) + +### 6. Watch Service (`watch_service.py`) + +Background daemon for persistent terminal monitoring: + +```bash +# Install and manage +cortex watch --install --service # Install systemd service +cortex watch --status # Check status +cortex watch --uninstall --service +``` + +- Runs as systemd user service +- Uses inotify for efficient file watching +- Auto-starts on login, auto-restarts on crash +- Logs to `~/.cortex/terminal_commands.json` + +--- + +## Execution Flow + +### Flow 1: Automatic Execution + +``` +User: "install nginx" + │ + ▼ + ┌─────────────────┐ + │ LLM Analysis │ ──→ Gathers system info (OS, existing packages) + └─────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ Conflict Check │ ──→ Is nginx already installed? + └─────────────────┘ + │ + ┌────┴────┐ + │ │ + ▼ ▼ + Conflict No Conflict + │ │ + ▼ │ +┌─────────────────┐ │ +│ User Choice: │ │ +│ 1. Use existing │ │ +│ 2. Restart │ │ +│ 3. Recreate │ │ +└─────────────────┘ │ + │ │ + └──────┬──────┘ + │ + ▼ + ┌─────────────────┐ + │ Show Commands │ ──→ Display planned commands for approval + └─────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ User Approval? │ + └─────────────────┘ + │ + ┌────┴────┐ + │ │ + ▼ ▼ + Yes No ──→ Cancel + │ + ▼ + ┌─────────────────┐ + │ Execute Tasks │ ──→ Run commands one by one + └─────────────────┘ + │ + ┌────┴────┐ + │ │ + ▼ ▼ + Success Failure + │ │ + │ ▼ + │ ┌─────────────────┐ + │ │ Error Diagnosis │ ──→ Pattern matching + LLM analysis + │ └─────────────────┘ + │ │ + │ ▼ + │ ┌─────────────────┐ + │ │ Auto-Repair │ ──→ Execute fix commands + │ └─────────────────┘ + │ │ + │ ▼ + │ ┌─────────────────┐ + │ │ Verify Fix │ + │ └─────────────────┘ + │ │ + └────┬────┘ + │ + ▼ + ┌─────────────────┐ + │ Verification │ ──→ Run tests to confirm success + └─────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ Interactive │ ──→ "What would you like to do next?" + │ Session │ + └─────────────────┘ +``` + +### Flow 2: Manual Intervention + +``` +User requests sudo commands OR chooses manual execution + │ + ▼ + ┌─────────────────────────────────────────────────────────┐ + │ Manual Intervention Mode │ + │ │ + │ ┌────────────────────────────────────────────────────┐ │ + │ │ Cortex Terminal │ │ + │ │ Shows: │ │ + │ │ • Commands to run │ │ + │ │ • Live terminal feed │ │ + │ │ • Real-time feedback │ │ + │ └────────────────────────────────────────────────────┘ │ + │ ▲ │ + │ │ monitors │ + │ │ │ + │ ┌────────────────────────────────────────────────────┐ │ + │ │ Other Terminal(s) │ │ + │ │ User runs: │ │ + │ │ $ sudo systemctl restart nginx │ │ + │ │ $ sudo apt install package │ │ + │ └────────────────────────────────────────────────────┘ │ + └─────────────────────────────────────────────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ Command Match? │ + └─────────────────┘ + │ + ┌────┴────────────┐ + │ │ │ + ▼ ▼ ▼ + Correct Wrong Error in + Command Command Output + │ │ │ + │ ▼ ▼ + │ Notification Notification + │ "Expected: "Fixing error..." + │ " + Auto-fix + │ │ │ + └────┬────┴───────┘ + │ + ▼ + User presses Enter when done + │ + ▼ + ┌─────────────────┐ + │ Validate │ ──→ Check if expected commands were run + └─────────────────┘ + │ + ▼ + ┌─────────────────┐ + │ Continue or │ + │ Show Next Steps │ + └─────────────────┘ +``` + +--- + +## Terminal Monitoring + +### Watch Hook Flow + +``` +┌──────────────────────────────────────────────────────────────────────────┐ +│ Terminal with Hook Active │ +│ │ +│ $ sudo systemctl restart nginx │ +│ │ │ +│ ▼ │ +│ PROMPT_COMMAND triggers __cortex_log_cmd() │ +│ │ │ +│ ▼ │ +│ Writes to ~/.cortex/terminal_watch.log │ +│ Format: pts_1|sudo systemctl restart nginx │ +└──────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────────────────────┐ +│ Watch Service (Daemon) │ +│ │ +│ Monitors with inotify: │ +│ • ~/.cortex/terminal_watch.log │ +│ • ~/.bash_history │ +│ • ~/.zsh_history │ +│ │ │ +│ ▼ │ +│ Parses: TTY|COMMAND │ +│ │ │ +│ ▼ │ +│ Writes to ~/.cortex/terminal_commands.json │ +│ {"timestamp": "...", "command": "...", "source": "watch_hook", │ +│ "terminal_id": "pts_1"} │ +└──────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌──────────────────────────────────────────────────────────────────────────┐ +│ TerminalMonitor (In Cortex) │ +│ │ +│ During manual intervention: │ +│ 1. Reads terminal_watch.log │ +│ 2. Detects new commands │ +│ 3. Shows in "Live Terminal Feed" │ +│ 4. Checks if command matches expected │ +│ 5. Detects errors in output │ +│ 6. Triggers auto-fix if needed │ +└──────────────────────────────────────────────────────────────────────────┘ +``` + +### Log File Formats + +**`~/.cortex/terminal_watch.log`** (Simple): +``` +pts_1|docker ps +pts_1|sudo systemctl restart nginx +pts_2|ls -la +shared|cd /home/user +``` + +**`~/.cortex/terminal_commands.json`** (Detailed): +```json +{"timestamp": "2026-01-16T14:15:00.123", "command": "docker ps", "source": "watch_hook", "terminal_id": "pts_1"} +{"timestamp": "2026-01-16T14:15:05.456", "command": "sudo systemctl restart nginx", "source": "watch_hook", "terminal_id": "pts_1"} +{"timestamp": "2026-01-16T14:15:10.789", "command": "cd /home/user", "source": "history", "terminal_id": "shared"} +``` + +--- + +## Error Handling & Auto-Fix + +### Error Diagnosis Pipeline + +``` +Command fails with error + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ Pattern Matching │ +│ │ +│ COMMAND_SHELL_ERRORS = { │ +│ "Permission denied": "permission_error", │ +│ "command not found": "missing_package", │ +│ "Connection refused": "service_not_running", │ +│ "No space left": "disk_full", │ +│ ... │ +│ } │ +└─────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ LLM Analysis (Claude) │ +│ │ +│ Prompt: "Analyze this error and suggest a fix" │ +│ Response: │ +│ CAUSE: Service not running │ +│ FIX: sudo systemctl start nginx │ +└─────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────┐ +│ AutoFixer Execution │ +│ │ +│ 1. Check if fix requires sudo │ +│ - Yes → Show manual instructions + notification │ +│ - No → Execute automatically │ +│ 2. Verify fix worked │ +│ 3. Retry original command if fixed │ +└─────────────────────────────────────────────────────────────┘ +``` + +### Auto-Fix Strategies + +| Error Type | Strategy | Actions | +|------------|----------|---------| +| `permission_error` | `fix_permissions` | `chmod`, `chown`, or manual sudo | +| `missing_package` | `install_package` | `apt install`, `pip install` | +| `service_not_running` | `start_service` | `systemctl start`, check logs | +| `port_in_use` | `kill_port_user` | Find and stop conflicting process | +| `disk_full` | `free_disk_space` | `apt clean`, suggest cleanup | +| `config_error` | `fix_config` | Backup + LLM-suggested fix | + +--- + +## Session Management + +### Session Structure + +``` +Session (session_id: sess_20260116_141500) +│ +├── Run 1 (run_id: do_20260116_141500_abc123) +│ ├── Query: "install nginx" +│ ├── Commands: +│ │ ├── apt update +│ │ ├── apt install -y nginx +│ │ └── systemctl start nginx +│ └── Status: SUCCESS +│ +├── Run 2 (run_id: do_20260116_141600_def456) +│ ├── Query: "configure nginx for my domain" +│ ├── Commands: +│ │ ├── cat /etc/nginx/sites-available/default +│ │ └── [manual: edit config] +│ └── Status: SUCCESS +│ +└── Run 3 (run_id: do_20260116_141700_ghi789) + ├── Query: "test nginx" + ├── Commands: + │ └── curl localhost + └── Status: SUCCESS +``` + +### Database Schema + +```sql +-- Sessions table +CREATE TABLE do_sessions ( + session_id TEXT PRIMARY KEY, + started_at TEXT, + ended_at TEXT, + total_runs INTEGER DEFAULT 0 +); + +-- Runs table +CREATE TABLE do_runs ( + run_id TEXT PRIMARY KEY, + session_id TEXT, + summary TEXT, + mode TEXT, + commands TEXT, -- JSON array + started_at TEXT, + completed_at TEXT, + user_query TEXT, + FOREIGN KEY (session_id) REFERENCES do_sessions(session_id) +); +``` + +--- + +## Key Files + +| File | Purpose | +|------|---------| +| `cortex/cli.py` | CLI entry point, signal handlers, interactive loop | +| `cortex/ask.py` | LLM communication, response parsing, command validation | +| `cortex/do_runner/handler.py` | Main execution engine, conflict handling, task tree | +| `cortex/do_runner/executor.py` | Subprocess management, timeout handling | +| `cortex/do_runner/verification.py` | Conflict detection, verification tests | +| `cortex/do_runner/diagnosis.py` | Error patterns, diagnosis, auto-fix strategies | +| `cortex/do_runner/terminal.py` | Terminal monitoring, shell hooks | +| `cortex/do_runner/models.py` | Data models (TaskNode, DoRun, CommandStatus) | +| `cortex/do_runner/database.py` | SQLite persistence for runs/sessions | +| `cortex/watch_service.py` | Background daemon for terminal monitoring | +| `cortex/llm_router.py` | Multi-LLM routing (Claude, Kimi, Ollama) | + +--- + +## Data Flow + +``` +┌─────────────────────────────────────────────────────────────────────────┐ +│ Data Flow │ +│ │ +│ User Query ──→ AskHandler ──→ LLM ──→ Response │ +│ │ │ │ │ │ +│ │ │ │ ▼ │ +│ │ │ │ ┌─────────┐ │ +│ │ │ │ │ command │ ──→ Execute read-only │ +│ │ │ │ └─────────┘ │ │ +│ │ │ │ │ │ │ +│ │ │ │ ▼ │ │ +│ │ │ │ Output added │ │ +│ │ │ │ to history ─────────┘ │ +│ │ │ │ │ │ +│ │ │ │ ▼ │ +│ │ │ │ Loop back to LLM │ +│ │ │ │ │ │ +│ │ │ ▼ │ │ +│ │ │ ┌──────────────┐│ │ +│ │ │ │ do_commands ││ │ +│ │ │ └──────────────┘│ │ +│ │ │ │ │ │ +│ │ │ ▼ │ │ +│ │ │ DoHandler │ │ +│ │ │ │ │ │ +│ │ │ ▼ │ │ +│ │ │ Task Tree ──────┘ │ +│ │ │ │ │ +│ │ │ ▼ │ +│ │ │ Execute ──→ Success ──→ Verify ──→ Done │ +│ │ │ │ │ +│ │ │ ▼ │ +│ │ │ Failure ──→ Diagnose ──→ Fix ──→ Retry │ +│ │ │ │ +│ │ ▼ │ +│ │ ┌────────────┐ │ +│ │ │ answer │ ──→ Display to user │ +│ │ └────────────┘ │ +│ │ │ │ +│ ▼ ▼ │ +│ ┌─────────────────────┐ │ +│ │ Session Database │ │ +│ │ ~/.cortex/do_runs.db │ +│ └─────────────────────┘ │ +└─────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## Usage Examples + +### Basic Usage + +```bash +# Start interactive session +cortex ask --do + +# One-shot command +cortex ask --do "install docker and run hello-world" +``` + +### With Terminal Monitoring + +```bash +# Terminal 1: Start Cortex +cortex ask --do +> install nginx with ssl + +# Terminal 2: Run sudo commands shown by Cortex +$ sudo apt install nginx +$ sudo systemctl start nginx +``` + +### Check History + +```bash +# View do history +cortex do history + +# Shows: +# Session: sess_20260116_141500 (3 runs) +# Run 1: install nginx - SUCCESS +# Run 2: configure nginx - SUCCESS +# Run 3: test nginx - SUCCESS +``` + +--- + +## Configuration + +### Environment Variables + +| Variable | Purpose | Default | +|----------|---------|---------| +| `ANTHROPIC_API_KEY` | Claude API key | Required | +| `CORTEX_TERMINAL` | Marks Cortex's own terminal | Set automatically | +| `CORTEX_DO_TIMEOUT` | Command timeout (seconds) | 120 | + +### Watch Service + +```bash +# Install (recommended) +cortex watch --install --service + +# Check status +cortex watch --status + +# View logs +journalctl --user -u cortex-watch +cat ~/.cortex/watch_service.log +``` + +--- + +## Troubleshooting + +### Terminal monitoring not working + +1. Check if service is running: `cortex watch --status` +2. Check hook is in .bashrc: `grep "Cortex Terminal Watch" ~/.bashrc` +3. For existing terminals, run: `source ~/.cortex/watch_hook.sh` + +### Commands not being detected + +1. Check watch log: `cat ~/.cortex/terminal_watch.log` +2. Ensure format is `TTY|COMMAND` (e.g., `pts_1|ls -la`) +3. Restart service: `systemctl --user restart cortex-watch` + +### Auto-fix not working + +1. Check if command requires sudo (auto-fix can't run sudo) +2. Check error diagnosis: Look for `⚠ Fix requires manual execution` +3. Run suggested commands manually in another terminal + +--- + +## See Also + +- [LLM Integration](./LLM_INTEGRATION.md) +- [Error Handling](./modules/README_ERROR_PARSER.md) +- [Verification System](./modules/README_VERIFICATION.md) +- [Troubleshooting Guide](./TROUBLESHOOTING.md) + diff --git a/scripts/setup_ask_do.py b/scripts/setup_ask_do.py new file mode 100755 index 000000000..e593a2e08 --- /dev/null +++ b/scripts/setup_ask_do.py @@ -0,0 +1,637 @@ +#!/usr/bin/env python3 +""" +Setup script for Cortex `ask --do` command. + +This script sets up everything needed for the AI-powered command execution: +1. Installs required Python dependencies +2. Sets up Ollama Docker container with a small model +3. Installs and starts the Cortex Watch service +4. Configures shell hooks for terminal monitoring + +Usage: + python scripts/setup_ask_do.py [--no-docker] [--model MODEL] [--skip-watch] + +Options: + --no-docker Skip Docker/Ollama setup (use cloud LLM only) + --model MODEL Ollama model to install (default: mistral) + --skip-watch Skip watch service installation + --uninstall Remove all ask --do components +""" + +import argparse +import os +import shutil +import subprocess +import sys +import time +from pathlib import Path + + +# ANSI colors +class Colors: + HEADER = '\033[95m' + BLUE = '\033[94m' + CYAN = '\033[96m' + GREEN = '\033[92m' + YELLOW = '\033[93m' + RED = '\033[91m' + BOLD = '\033[1m' + DIM = '\033[2m' + END = '\033[0m' + + +def print_header(text: str): + """Print a section header.""" + print(f"\n{Colors.BOLD}{Colors.CYAN}{'═' * 60}{Colors.END}") + print(f"{Colors.BOLD}{Colors.CYAN} {text}{Colors.END}") + print(f"{Colors.BOLD}{Colors.CYAN}{'═' * 60}{Colors.END}\n") + + +def print_step(text: str): + """Print a step.""" + print(f"{Colors.BLUE}▶{Colors.END} {text}") + + +def print_success(text: str): + """Print success message.""" + print(f"{Colors.GREEN}✓{Colors.END} {text}") + + +def print_warning(text: str): + """Print warning message.""" + print(f"{Colors.YELLOW}⚠{Colors.END} {text}") + + +def print_error(text: str): + """Print error message.""" + print(f"{Colors.RED}✗{Colors.END} {text}") + + +def run_cmd(cmd: list[str], check: bool = True, capture: bool = False, timeout: int = 300) -> subprocess.CompletedProcess: + """Run a command and return the result.""" + try: + result = subprocess.run( + cmd, + check=check, + capture_output=capture, + text=True, + timeout=timeout + ) + return result + except subprocess.CalledProcessError as e: + if capture: + print_error(f"Command failed: {' '.join(cmd)}") + if e.stderr: + print(f" {Colors.DIM}{e.stderr[:200]}{Colors.END}") + raise + except subprocess.TimeoutExpired: + print_error(f"Command timed out: {' '.join(cmd)}") + raise + + +def check_docker() -> bool: + """Check if Docker is installed and running.""" + try: + result = run_cmd(["docker", "info"], capture=True, check=False) + return result.returncode == 0 + except FileNotFoundError: + return False + + +def check_ollama_container() -> tuple[bool, bool]: + """Check if Ollama container exists and is running. + + Returns: (exists, running) + """ + try: + result = run_cmd( + ["docker", "ps", "-a", "--filter", "name=ollama", "--format", "{{.Status}}"], + capture=True, + check=False + ) + if result.returncode != 0 or not result.stdout.strip(): + return False, False + + status = result.stdout.strip().lower() + running = "up" in status + return True, running + except Exception: + return False, False + + +def setup_ollama(model: str = "mistral") -> bool: + """Set up Ollama Docker container and pull a model.""" + print_header("Setting up Ollama (Local LLM)") + + # Check Docker + print_step("Checking Docker...") + if not check_docker(): + print_error("Docker is not installed or not running") + print(f" {Colors.DIM}Install Docker: https://docs.docker.com/get-docker/{Colors.END}") + print(f" {Colors.DIM}Then run: sudo systemctl start docker{Colors.END}") + return False + print_success("Docker is available") + + # Check existing container + exists, running = check_ollama_container() + + if exists and running: + print_success("Ollama container is already running") + elif exists and not running: + print_step("Starting existing Ollama container...") + run_cmd(["docker", "start", "ollama"]) + print_success("Ollama container started") + else: + # Pull and run Ollama + print_step("Pulling Ollama Docker image...") + run_cmd(["docker", "pull", "ollama/ollama"]) + print_success("Ollama image pulled") + + print_step("Starting Ollama container...") + run_cmd([ + "docker", "run", "-d", + "--name", "ollama", + "-p", "11434:11434", + "-v", "ollama:/root/.ollama", + "--restart", "unless-stopped", + "ollama/ollama" + ]) + print_success("Ollama container started") + + # Wait for container to be ready + print_step("Waiting for Ollama to initialize...") + time.sleep(5) + + # Check if model exists + print_step(f"Checking for {model} model...") + try: + result = run_cmd( + ["docker", "exec", "ollama", "ollama", "list"], + capture=True, + check=False + ) + if model in result.stdout: + print_success(f"Model {model} is already installed") + return True + except Exception: + pass + + # Pull model + print_step(f"Pulling {model} model (this may take a few minutes)...") + print(f" {Colors.DIM}Model size: ~4GB for mistral, ~2GB for phi{Colors.END}") + + try: + # Use subprocess directly for streaming output + process = subprocess.Popen( + ["docker", "exec", "ollama", "ollama", "pull", model], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True + ) + + for line in process.stdout: + line = line.strip() + if line: + # Show progress + if "pulling" in line.lower() or "%" in line: + print(f"\r {Colors.DIM}{line[:70]}{Colors.END}", end="", flush=True) + + process.wait() + print() # New line after progress + + if process.returncode == 0: + print_success(f"Model {model} installed successfully") + return True + else: + print_error(f"Failed to pull model {model}") + return False + + except Exception as e: + print_error(f"Error pulling model: {e}") + return False + + +def setup_watch_service() -> bool: + """Install and start the Cortex Watch service.""" + print_header("Setting up Cortex Watch Service") + + # Check if service is already installed + service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" + + if service_file.exists(): + print_step("Watch service is already installed, checking status...") + result = run_cmd( + ["systemctl", "--user", "is-active", "cortex-watch.service"], + capture=True, + check=False + ) + if result.stdout.strip() == "active": + print_success("Cortex Watch service is running") + return True + else: + print_step("Starting watch service...") + run_cmd(["systemctl", "--user", "start", "cortex-watch.service"], check=False) + else: + # Install the service + print_step("Installing Cortex Watch service...") + + try: + # Import and run the installation + from cortex.watch_service import install_service + success, msg = install_service() + + if success: + print_success("Watch service installed and started") + print(f" {Colors.DIM}{msg[:200]}...{Colors.END}" if len(msg) > 200 else f" {Colors.DIM}{msg}{Colors.END}") + else: + print_error(f"Failed to install watch service: {msg}") + return False + + except ImportError: + print_warning("Could not import watch_service module") + print_step("Installing via CLI...") + + result = run_cmd( + ["cortex", "watch", "--install", "--service"], + capture=True, + check=False + ) + if result.returncode == 0: + print_success("Watch service installed via CLI") + else: + print_error("Failed to install watch service") + return False + + # Verify service is running + result = run_cmd( + ["systemctl", "--user", "is-active", "cortex-watch.service"], + capture=True, + check=False + ) + if result.stdout.strip() == "active": + print_success("Watch service is active and monitoring terminals") + return True + else: + print_warning("Watch service installed but not running") + return True # Still return True as installation succeeded + + +def setup_shell_hooks() -> bool: + """Set up shell hooks for terminal monitoring.""" + print_header("Setting up Shell Hooks") + + cortex_dir = Path.home() / ".cortex" + cortex_dir.mkdir(parents=True, exist_ok=True) + + # Create watch hook script + hook_file = cortex_dir / "watch_hook.sh" + hook_content = '''#!/bin/bash +# Cortex Terminal Watch Hook +# This hook logs commands for Cortex to monitor during manual intervention + +__cortex_last_histnum="" +__cortex_log_cmd() { + local histnum="$(history 1 | awk '{print $1}')" + [[ "$histnum" == "$__cortex_last_histnum" ]] && return + __cortex_last_histnum="$histnum" + + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" + [[ -z "${cmd// /}" ]] && return + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *"source"*".cortex"* ]] && return + [[ "$cmd" == *"watch_hook"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + + # Include terminal ID (TTY) in the log + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${tty_name:-unknown}|$cmd" >> ~/.cortex/terminal_watch.log +} +export PROMPT_COMMAND='history -a; __cortex_log_cmd' +echo "✓ Cortex is now watching this terminal" +''' + + print_step("Creating watch hook script...") + hook_file.write_text(hook_content) + hook_file.chmod(0o755) + print_success(f"Created {hook_file}") + + # Add to .bashrc if not already present + bashrc = Path.home() / ".bashrc" + marker = "# Cortex Terminal Watch Hook" + + if bashrc.exists(): + content = bashrc.read_text() + if marker not in content: + print_step("Adding hook to .bashrc...") + + bashrc_addition = f''' +{marker} +__cortex_last_histnum="" +__cortex_log_cmd() {{ + local histnum="$(history 1 | awk '{{print $1}}')" + [[ "$histnum" == "$__cortex_last_histnum" ]] && return + __cortex_last_histnum="$histnum" + + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" + [[ -z "${{cmd// /}}" ]] && return + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *"source"*".cortex"* ]] && return + [[ "$cmd" == *"watch_hook"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${{tty_name:-unknown}}|$cmd" >> ~/.cortex/terminal_watch.log +}} +export PROMPT_COMMAND='history -a; __cortex_log_cmd' + +alias cw="source ~/.cortex/watch_hook.sh" +''' + with open(bashrc, "a") as f: + f.write(bashrc_addition) + print_success("Hook added to .bashrc") + else: + print_success("Hook already in .bashrc") + + # Add to .zshrc if it exists + zshrc = Path.home() / ".zshrc" + if zshrc.exists(): + content = zshrc.read_text() + if marker not in content: + print_step("Adding hook to .zshrc...") + + zshrc_addition = f''' +{marker} +typeset -g __cortex_last_cmd="" +cortex_watch_hook() {{ + local cmd="$(fc -ln -1 | sed 's/^[[:space:]]*//')" + [[ -z "$cmd" ]] && return + [[ "$cmd" == "$__cortex_last_cmd" ]] && return + __cortex_last_cmd="$cmd" + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *".cortex"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${{tty_name:-unknown}}|$cmd" >> ~/.cortex/terminal_watch.log +}} +precmd_functions+=(cortex_watch_hook) +''' + with open(zshrc, "a") as f: + f.write(zshrc_addition) + print_success("Hook added to .zshrc") + else: + print_success("Hook already in .zshrc") + + return True + + +def check_api_keys() -> dict[str, bool]: + """Check for available API keys.""" + print_header("Checking API Keys") + + keys = { + "ANTHROPIC_API_KEY": False, + "OPENAI_API_KEY": False, + } + + # Check environment variables + for key in keys: + if os.environ.get(key): + keys[key] = True + print_success(f"{key} found in environment") + + # Check .env file + env_file = Path.cwd() / ".env" + if env_file.exists(): + content = env_file.read_text() + for key in keys: + if key in content and not keys[key]: + keys[key] = True + print_success(f"{key} found in .env file") + + # Report missing keys + if not any(keys.values()): + print_warning("No API keys found") + print(f" {Colors.DIM}For cloud LLM, set ANTHROPIC_API_KEY or OPENAI_API_KEY{Colors.END}") + print(f" {Colors.DIM}Or use local Ollama (--no-docker to skip){Colors.END}") + + return keys + + +def verify_installation() -> bool: + """Verify the installation is working.""" + print_header("Verifying Installation") + + all_good = True + + # Check cortex command + print_step("Checking cortex command...") + result = run_cmd(["cortex", "--version"], capture=True, check=False) + if result.returncode == 0: + print_success(f"Cortex installed: {result.stdout.strip()}") + else: + print_error("Cortex command not found") + all_good = False + + # Check watch service + print_step("Checking watch service...") + result = run_cmd( + ["systemctl", "--user", "is-active", "cortex-watch.service"], + capture=True, + check=False + ) + if result.stdout.strip() == "active": + print_success("Watch service is running") + else: + print_warning("Watch service is not running") + + # Check Ollama + print_step("Checking Ollama...") + exists, running = check_ollama_container() + if running: + print_success("Ollama container is running") + + # Check if model is available + result = run_cmd( + ["docker", "exec", "ollama", "ollama", "list"], + capture=True, + check=False + ) + if result.returncode == 0 and result.stdout.strip(): + models = [line.split()[0] for line in result.stdout.strip().split('\n')[1:] if line.strip()] + if models: + print_success(f"Models available: {', '.join(models[:3])}") + elif exists: + print_warning("Ollama container exists but not running") + else: + print_warning("Ollama not installed (will use cloud LLM)") + + # Check API keys + api_keys = check_api_keys() + has_llm = any(api_keys.values()) or running + + if not has_llm: + print_error("No LLM available (need API key or Ollama)") + all_good = False + + return all_good + + +def uninstall() -> bool: + """Remove all ask --do components.""" + print_header("Uninstalling Cortex ask --do Components") + + # Stop and remove watch service + print_step("Removing watch service...") + run_cmd(["systemctl", "--user", "stop", "cortex-watch.service"], check=False) + run_cmd(["systemctl", "--user", "disable", "cortex-watch.service"], check=False) + + service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" + if service_file.exists(): + service_file.unlink() + print_success("Watch service removed") + + # Remove shell hooks from .bashrc and .zshrc + marker = "# Cortex Terminal Watch Hook" + for rc_file in [Path.home() / ".bashrc", Path.home() / ".zshrc"]: + if rc_file.exists(): + content = rc_file.read_text() + if marker in content: + print_step(f"Removing hook from {rc_file.name}...") + lines = content.split('\n') + new_lines = [] + skip = False + for line in lines: + if marker in line: + skip = True + elif skip and line.strip() == '': + skip = False + continue + elif not skip: + new_lines.append(line) + rc_file.write_text('\n'.join(new_lines)) + print_success(f"Hook removed from {rc_file.name}") + + # Remove cortex directory files (but keep config) + cortex_dir = Path.home() / ".cortex" + files_to_remove = [ + "watch_hook.sh", + "terminal_watch.log", + "terminal_commands.json", + "watch_service.log", + "watch_service.pid", + "watch_state.json", + ] + for filename in files_to_remove: + filepath = cortex_dir / filename + if filepath.exists(): + filepath.unlink() + print_success("Cortex watch files removed") + + # Optionally remove Ollama container + exists, _ = check_ollama_container() + if exists: + print_step("Ollama container found") + response = input(" Remove Ollama container and data? [y/N]: ").strip().lower() + if response == 'y': + run_cmd(["docker", "stop", "ollama"], check=False) + run_cmd(["docker", "rm", "ollama"], check=False) + run_cmd(["docker", "volume", "rm", "ollama"], check=False) + print_success("Ollama container and data removed") + else: + print(f" {Colors.DIM}Keeping Ollama container{Colors.END}") + + print_success("Uninstallation complete") + return True + + +def main(): + parser = argparse.ArgumentParser( + description="Setup script for Cortex ask --do command", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=""" +Examples: + python scripts/setup_ask_do.py # Full setup with Ollama + python scripts/setup_ask_do.py --no-docker # Skip Docker/Ollama setup + python scripts/setup_ask_do.py --model phi # Use smaller phi model + python scripts/setup_ask_do.py --uninstall # Remove all components +""" + ) + parser.add_argument("--no-docker", action="store_true", help="Skip Docker/Ollama setup") + parser.add_argument("--model", default="mistral", help="Ollama model to install (default: mistral)") + parser.add_argument("--skip-watch", action="store_true", help="Skip watch service installation") + parser.add_argument("--uninstall", action="store_true", help="Remove all ask --do components") + + args = parser.parse_args() + + print(f"\n{Colors.BOLD}{Colors.CYAN}") + print(" ██████╗ ██████╗ ██████╗ ████████╗███████╗██╗ ██╗") + print(" ██╔════╝██╔═══██╗██╔══██╗╚══██╔══╝██╔════╝╚██╗██╔╝") + print(" ██║ ██║ ██║██████╔╝ ██║ █████╗ ╚███╔╝ ") + print(" ██║ ██║ ██║██╔══██╗ ██║ ██╔══╝ ██╔██╗ ") + print(" ╚██████╗╚██████╔╝██║ ██║ ██║ ███████╗██╔╝ ██╗") + print(" ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝") + print(f"{Colors.END}") + print(f" {Colors.DIM}ask --do Setup Wizard{Colors.END}\n") + + if args.uninstall: + return 0 if uninstall() else 1 + + success = True + + # Step 1: Check API keys + api_keys = check_api_keys() + + # Step 2: Setup Ollama (unless skipped) + if not args.no_docker: + if not setup_ollama(args.model): + if not any(api_keys.values()): + print_error("No LLM available - need either Ollama or API key") + success = False + else: + print_warning("Skipping Docker/Ollama setup (--no-docker)") + if not any(api_keys.values()): + print_warning("No API keys found - you'll need to set one up") + + # Step 3: Setup watch service + if not args.skip_watch: + if not setup_watch_service(): + print_warning("Watch service setup had issues") + else: + print_warning("Skipping watch service (--skip-watch)") + + # Step 4: Setup shell hooks + setup_shell_hooks() + + # Step 5: Verify installation + if verify_installation(): + print_header("Setup Complete! 🎉") + print(f""" +{Colors.GREEN}Everything is ready!{Colors.END} + +{Colors.BOLD}To use Cortex ask --do:{Colors.END} + cortex ask --do + +{Colors.BOLD}To start an interactive session:{Colors.END} + cortex ask --do "install nginx and configure it" + +{Colors.BOLD}For terminal monitoring in existing terminals:{Colors.END} + source ~/.cortex/watch_hook.sh + {Colors.DIM}(or just type 'cw' after opening a new terminal){Colors.END} + +{Colors.BOLD}To check status:{Colors.END} + cortex watch --status +""") + return 0 + else: + print_header("Setup Completed with Warnings") + print(f""" +{Colors.YELLOW}Some components may need attention.{Colors.END} + +Run {Colors.CYAN}cortex watch --status{Colors.END} to check the current state. +""") + return 1 + + +if __name__ == "__main__": + sys.exit(main()) + diff --git a/scripts/setup_ask_do.sh b/scripts/setup_ask_do.sh new file mode 100755 index 000000000..0d1fa40ba --- /dev/null +++ b/scripts/setup_ask_do.sh @@ -0,0 +1,435 @@ +#!/bin/bash +# +# Cortex ask --do Setup Script +# +# This script sets up everything needed for the AI-powered command execution: +# - Ollama Docker container with a local LLM +# - Cortex Watch service for terminal monitoring +# - Shell hooks for command logging +# +# Usage: +# ./scripts/setup_ask_do.sh [options] +# +# Options: +# --no-docker Skip Docker/Ollama setup +# --model MODEL Ollama model (default: mistral, alternatives: phi, llama2) +# --skip-watch Skip watch service installation +# --uninstall Remove all components +# + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +CYAN='\033[0;36m' +BOLD='\033[1m' +DIM='\033[2m' +NC='\033[0m' # No Color + +# Defaults +MODEL="mistral" +NO_DOCKER=false +SKIP_WATCH=false +UNINSTALL=false + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --no-docker) + NO_DOCKER=true + shift + ;; + --model) + MODEL="$2" + shift 2 + ;; + --skip-watch) + SKIP_WATCH=true + shift + ;; + --uninstall) + UNINSTALL=true + shift + ;; + -h|--help) + echo "Usage: $0 [options]" + echo "" + echo "Options:" + echo " --no-docker Skip Docker/Ollama setup" + echo " --model MODEL Ollama model (default: mistral)" + echo " --skip-watch Skip watch service installation" + echo " --uninstall Remove all components" + exit 0 + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + exit 1 + ;; + esac +done + +print_header() { + echo -e "\n${BOLD}${CYAN}════════════════════════════════════════════════════════════${NC}" + echo -e "${BOLD}${CYAN} $1${NC}" + echo -e "${BOLD}${CYAN}════════════════════════════════════════════════════════════${NC}\n" +} + +print_step() { + echo -e "${BLUE}▶${NC} $1" +} + +print_success() { + echo -e "${GREEN}✓${NC} $1" +} + +print_warning() { + echo -e "${YELLOW}⚠${NC} $1" +} + +print_error() { + echo -e "${RED}✗${NC} $1" +} + +# Banner +echo -e "\n${BOLD}${CYAN}" +echo " ██████╗ ██████╗ ██████╗ ████████╗███████╗██╗ ██╗" +echo " ██╔════╝██╔═══██╗██╔══██╗╚══██╔══╝██╔════╝╚██╗██╔╝" +echo " ██║ ██║ ██║██████╔╝ ██║ █████╗ ╚███╔╝ " +echo " ██║ ██║ ██║██╔══██╗ ██║ ██╔══╝ ██╔██╗ " +echo " ╚██████╗╚██████╔╝██║ ██║ ██║ ███████╗██╔╝ ██╗" +echo " ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝" +echo -e "${NC}" +echo -e " ${DIM}ask --do Setup Wizard${NC}\n" + +# Uninstall +if [ "$UNINSTALL" = true ]; then + print_header "Uninstalling Cortex ask --do Components" + + # Stop watch service + print_step "Stopping watch service..." + systemctl --user stop cortex-watch.service 2>/dev/null || true + systemctl --user disable cortex-watch.service 2>/dev/null || true + rm -f ~/.config/systemd/user/cortex-watch.service + systemctl --user daemon-reload + print_success "Watch service removed" + + # Remove shell hooks + print_step "Removing shell hooks..." + if [ -f ~/.bashrc ]; then + sed -i '/# Cortex Terminal Watch Hook/,/^$/d' ~/.bashrc + sed -i '/alias cw=/d' ~/.bashrc + fi + if [ -f ~/.zshrc ]; then + sed -i '/# Cortex Terminal Watch Hook/,/^$/d' ~/.zshrc + fi + print_success "Shell hooks removed" + + # Remove cortex files + print_step "Removing cortex watch files..." + rm -f ~/.cortex/watch_hook.sh + rm -f ~/.cortex/terminal_watch.log + rm -f ~/.cortex/terminal_commands.json + rm -f ~/.cortex/watch_service.log + rm -f ~/.cortex/watch_service.pid + rm -f ~/.cortex/watch_state.json + print_success "Watch files removed" + + # Ask about Ollama + if docker ps -a --format '{{.Names}}' | grep -q '^ollama$'; then + print_step "Ollama container found" + read -p " Remove Ollama container and data? [y/N]: " -n 1 -r + echo + if [[ $REPLY =~ ^[Yy]$ ]]; then + docker stop ollama 2>/dev/null || true + docker rm ollama 2>/dev/null || true + docker volume rm ollama 2>/dev/null || true + print_success "Ollama removed" + fi + fi + + print_success "Uninstallation complete" + exit 0 +fi + +# Check Python environment +print_header "Checking Environment" + +print_step "Checking Python..." +if command -v python3 &> /dev/null; then + PYTHON_VERSION=$(python3 --version 2>&1) + print_success "Python installed: $PYTHON_VERSION" +else + print_error "Python 3 not found" + exit 1 +fi + +# Check if in virtual environment +if [ -z "$VIRTUAL_ENV" ]; then + print_warning "Not in a virtual environment" + if [ -f "venv/bin/activate" ]; then + print_step "Activating venv..." + source venv/bin/activate + print_success "Activated venv" + else + print_warning "Consider running: python3 -m venv venv && source venv/bin/activate" + fi +else + print_success "Virtual environment active: $VIRTUAL_ENV" +fi + +# Check cortex installation +print_step "Checking Cortex installation..." +if command -v cortex &> /dev/null; then + print_success "Cortex is installed" +else + print_warning "Cortex not found in PATH, installing..." + pip install -e . -q + print_success "Cortex installed" +fi + +# Setup Ollama +if [ "$NO_DOCKER" = false ]; then + print_header "Setting up Ollama (Local LLM)" + + print_step "Checking Docker..." + if ! command -v docker &> /dev/null; then + print_error "Docker is not installed" + echo -e " ${DIM}Install Docker: https://docs.docker.com/get-docker/${NC}" + NO_DOCKER=true + elif ! docker info &> /dev/null; then + print_error "Docker daemon is not running" + echo -e " ${DIM}Run: sudo systemctl start docker${NC}" + NO_DOCKER=true + else + print_success "Docker is available" + + # Check Ollama container + if docker ps --format '{{.Names}}' | grep -q '^ollama$'; then + print_success "Ollama container is running" + elif docker ps -a --format '{{.Names}}' | grep -q '^ollama$'; then + print_step "Starting Ollama container..." + docker start ollama + print_success "Ollama started" + else + print_step "Pulling Ollama image..." + docker pull ollama/ollama + print_success "Ollama image pulled" + + print_step "Starting Ollama container..." + docker run -d \ + --name ollama \ + -p 11434:11434 \ + -v ollama:/root/.ollama \ + --restart unless-stopped \ + ollama/ollama + print_success "Ollama container started" + + sleep 3 + fi + + # Check model + print_step "Checking for $MODEL model..." + if docker exec ollama ollama list 2>/dev/null | grep -q "$MODEL"; then + print_success "Model $MODEL is installed" + else + print_step "Pulling $MODEL model (this may take a few minutes)..." + echo -e " ${DIM}Model size: ~4GB for mistral, ~2GB for phi${NC}" + docker exec ollama ollama pull "$MODEL" + print_success "Model $MODEL installed" + fi + fi +else + print_warning "Skipping Docker/Ollama setup (--no-docker)" +fi + +# Setup Watch Service +if [ "$SKIP_WATCH" = false ]; then + print_header "Setting up Cortex Watch Service" + + print_step "Installing watch service..." + cortex watch --install --service 2>/dev/null || { + # Manual installation if CLI fails + mkdir -p ~/.config/systemd/user + + # Get Python path + PYTHON_PATH=$(which python3) + CORTEX_PATH=$(which cortex 2>/dev/null || echo "$HOME/.local/bin/cortex") + + cat > ~/.config/systemd/user/cortex-watch.service << EOF +[Unit] +Description=Cortex Terminal Watch Service +After=default.target + +[Service] +Type=simple +ExecStart=$PYTHON_PATH -m cortex.watch_service +Restart=always +RestartSec=5 +Environment=PATH=$HOME/.local/bin:/usr/local/bin:/usr/bin:/bin +WorkingDirectory=$HOME + +[Install] +WantedBy=default.target +EOF + + systemctl --user daemon-reload + systemctl --user enable cortex-watch.service + systemctl --user start cortex-watch.service + } + + sleep 2 + + if systemctl --user is-active cortex-watch.service &> /dev/null; then + print_success "Watch service is running" + else + print_warning "Watch service installed but may need attention" + echo -e " ${DIM}Check with: systemctl --user status cortex-watch.service${NC}" + fi +else + print_warning "Skipping watch service (--skip-watch)" +fi + +# Setup Shell Hooks +print_header "Setting up Shell Hooks" + +CORTEX_DIR="$HOME/.cortex" +mkdir -p "$CORTEX_DIR" + +# Create watch hook +print_step "Creating watch hook script..." +cat > "$CORTEX_DIR/watch_hook.sh" << 'EOF' +#!/bin/bash +# Cortex Terminal Watch Hook + +__cortex_last_histnum="" +__cortex_log_cmd() { + local histnum="$(history 1 | awk '{print $1}')" + [[ "$histnum" == "$__cortex_last_histnum" ]] && return + __cortex_last_histnum="$histnum" + + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" + [[ -z "${cmd// /}" ]] && return + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *"source"*".cortex"* ]] && return + [[ "$cmd" == *"watch_hook"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${tty_name:-unknown}|$cmd" >> ~/.cortex/terminal_watch.log +} +export PROMPT_COMMAND='history -a; __cortex_log_cmd' +echo "✓ Cortex is now watching this terminal" +EOF +chmod +x "$CORTEX_DIR/watch_hook.sh" +print_success "Created watch hook script" + +# Add to .bashrc +MARKER="# Cortex Terminal Watch Hook" +if [ -f ~/.bashrc ]; then + if ! grep -q "$MARKER" ~/.bashrc; then + print_step "Adding hook to .bashrc..." + cat >> ~/.bashrc << 'EOF' + +# Cortex Terminal Watch Hook +__cortex_last_histnum="" +__cortex_log_cmd() { + local histnum="$(history 1 | awk '{print $1}')" + [[ "$histnum" == "$__cortex_last_histnum" ]] && return + __cortex_last_histnum="$histnum" + + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" + [[ -z "${cmd// /}" ]] && return + [[ "$cmd" == cortex* ]] && return + [[ "$cmd" == *"source"*".cortex"* ]] && return + [[ "$cmd" == *"watch_hook"* ]] && return + [[ -n "$CORTEX_TERMINAL" ]] && return + + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" + echo "${tty_name:-unknown}|$cmd" >> ~/.cortex/terminal_watch.log +} +export PROMPT_COMMAND='history -a; __cortex_log_cmd' + +alias cw="source ~/.cortex/watch_hook.sh" +EOF + print_success "Hook added to .bashrc" + else + print_success "Hook already in .bashrc" + fi +fi + +# Check API keys +print_header "Checking API Keys" + +HAS_API_KEY=false +if [ -n "$ANTHROPIC_API_KEY" ]; then + print_success "ANTHROPIC_API_KEY found in environment" + HAS_API_KEY=true +fi +if [ -n "$OPENAI_API_KEY" ]; then + print_success "OPENAI_API_KEY found in environment" + HAS_API_KEY=true +fi +if [ -f ".env" ]; then + if grep -q "ANTHROPIC_API_KEY" .env || grep -q "OPENAI_API_KEY" .env; then + print_success "API key(s) found in .env file" + HAS_API_KEY=true + fi +fi + +if [ "$HAS_API_KEY" = false ] && [ "$NO_DOCKER" = true ]; then + print_warning "No API keys found and Ollama not set up" + echo -e " ${DIM}Set ANTHROPIC_API_KEY or OPENAI_API_KEY for cloud LLM${NC}" +fi + +# Verify +print_header "Verification" + +print_step "Checking cortex command..." +if cortex --version &> /dev/null; then + print_success "Cortex: $(cortex --version 2>&1)" +else + print_error "Cortex command not working" +fi + +print_step "Checking watch service..." +if systemctl --user is-active cortex-watch.service &> /dev/null; then + print_success "Watch service: running" +else + print_warning "Watch service: not running" +fi + +if [ "$NO_DOCKER" = false ]; then + print_step "Checking Ollama..." + if docker ps --format '{{.Names}}' | grep -q '^ollama$'; then + print_success "Ollama: running" + MODELS=$(docker exec ollama ollama list 2>/dev/null | tail -n +2 | awk '{print $1}' | tr '\n' ', ' | sed 's/,$//') + if [ -n "$MODELS" ]; then + print_success "Models: $MODELS" + fi + else + print_warning "Ollama: not running" + fi +fi + +# Final message +print_header "Setup Complete! 🎉" + +echo -e "${GREEN}Everything is ready!${NC}" +echo "" +echo -e "${BOLD}To use Cortex ask --do:${NC}" +echo " cortex ask --do" +echo "" +echo -e "${BOLD}To start an interactive session:${NC}" +echo " cortex ask --do \"install nginx and configure it\"" +echo "" +echo -e "${BOLD}For terminal monitoring in existing terminals:${NC}" +echo " source ~/.cortex/watch_hook.sh" +echo -e " ${DIM}(or just type 'cw' after opening a new terminal)${NC}" +echo "" +echo -e "${BOLD}To check status:${NC}" +echo " cortex watch --status" +echo "" + From a0c952f41695a02a4deab520f03ce3a30c200b1b Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Thu, 22 Jan 2026 07:53:36 +0000 Subject: [PATCH 2/2] [autofix.ci] apply automated fixes --- cortex/ask.py | 2242 ++++--------- cortex/cli.py | 5192 +++++++++++++++++++++++------- cortex/demo.py | 83 +- cortex/do_runner.py | 28 +- cortex/do_runner/__init__.py | 64 +- cortex/do_runner/database.py | 322 +- cortex/do_runner/diagnosis.py | 3212 ++++++++++++------ cortex/do_runner/diagnosis_v2.py | 970 +++--- cortex/do_runner/executor.py | 237 +- cortex/do_runner/handler.py | 2988 +++++++++-------- cortex/do_runner/managers.py | 88 +- cortex/do_runner/models.py | 77 +- cortex/do_runner/terminal.py | 1555 +++++---- cortex/do_runner/verification.py | 782 +++-- cortex/semantic_cache.py | 24 +- cortex/system_info_generator.py | 287 +- cortex/watch_service.py | 291 +- scripts/setup_ask_do.py | 248 +- 18 files changed, 11342 insertions(+), 7348 deletions(-) diff --git a/cortex/ask.py b/cortex/ask.py index 2f9954c72..b14914b63 100644 --- a/cortex/ask.py +++ b/cortex/ask.py @@ -1,538 +1,371 @@ """Natural language query interface for Cortex. Handles user questions about installed packages, configurations, -and system state using an agentic LLM loop with command execution. - -The --do mode enables write and execute capabilities with user confirmation -and privilege management. +and system state using LLM with semantic caching. Also provides +educational content and tracks learning progress. """ import json +import logging import os +import platform import re -import shlex +import shutil import sqlite3 import subprocess -from enum import Enum +from datetime import datetime, timezone +from pathlib import Path from typing import Any -from pydantic import BaseModel, Field, field_validator - - -class LLMResponseType(str, Enum): - """Type of response from the LLM.""" - COMMAND = "command" - ANSWER = "answer" - DO_COMMANDS = "do_commands" # For --do mode: commands that modify the system - - -class DoCommand(BaseModel): - """A single command for --do mode with explanation.""" - command: str = Field(description="The shell command to execute") - purpose: str = Field(description="Brief explanation of what this command does") - requires_sudo: bool = Field(default=False, description="Whether this command requires sudo") - - -class SystemCommand(BaseModel): - """Pydantic model for a system command to be executed. - - The LLM must return either a command to execute for data gathering, - or a final answer to the user's question. - In --do mode, it can also return a list of commands to execute. - """ - response_type: LLMResponseType = Field( - description="Whether this is a command to execute, a final answer, or do commands" - ) - command: str | None = Field( - default=None, - description="The shell command to execute (only for response_type='command')" - ) - answer: str | None = Field( - default=None, - description="The final answer to the user (only for response_type='answer')" - ) - do_commands: list[DoCommand] | None = Field( - default=None, - description="List of commands to execute (only for response_type='do_commands')" - ) - reasoning: str = Field( - default="", - description="Brief explanation of why this command/answer was chosen" - ) - - @field_validator("command") - @classmethod - def validate_command_not_empty(cls, v: str | None, info) -> str | None: - if info.data.get("response_type") == LLMResponseType.COMMAND: - if not v or not v.strip(): - raise ValueError("Command cannot be empty when response_type is 'command'") - return v - - @field_validator("answer") - @classmethod - def validate_answer_not_empty(cls, v: str | None, info) -> str | None: - if info.data.get("response_type") == LLMResponseType.ANSWER: - if not v or not v.strip(): - raise ValueError("Answer cannot be empty when response_type is 'answer'") - return v - - @field_validator("do_commands") - @classmethod - def validate_do_commands_not_empty(cls, v: list[DoCommand] | None, info) -> list[DoCommand] | None: - if info.data.get("response_type") == LLMResponseType.DO_COMMANDS: - if not v or len(v) == 0: - raise ValueError("do_commands cannot be empty when response_type is 'do_commands'") - return v - - -class CommandValidator: - """Validates and filters commands to ensure they are read-only. - - Only allows commands that fetch data, blocks any that modify the system. - """ - - # Commands that are purely read-only and safe - ALLOWED_COMMANDS: set[str] = { - # System info - "uname", "hostname", "uptime", "whoami", "id", "groups", "w", "who", "last", - "date", "cal", "timedatectl", - # File/directory listing (read-only) - "ls", "pwd", "tree", "file", "stat", "readlink", "realpath", "dirname", "basename", - "find", "locate", "which", "whereis", "type", "command", - # Text viewing (read-only) - "cat", "head", "tail", "less", "more", "wc", "nl", "strings", - # Text processing (non-modifying) - "grep", "egrep", "fgrep", "awk", "sed", "cut", "sort", "uniq", "tr", "column", - "diff", "comm", "join", "paste", "expand", "unexpand", "fold", "fmt", - # Package queries (read-only) - "dpkg-query", "dpkg", "apt-cache", "apt-mark", "apt-config", "aptitude", "apt", - "pip3", "pip", "python3", "python", "gem", "npm", "cargo", "go", - # System info commands - "lsb_release", "hostnamectl", "lscpu", "lsmem", "lsblk", "lspci", "lsusb", - "lshw", "dmidecode", "hwinfo", "inxi", - # Process/resource info - "ps", "top", "htop", "pgrep", "pidof", "pstree", "free", "vmstat", "iostat", - "mpstat", "sar", "nproc", "getconf", - # Disk/filesystem info - "df", "du", "mount", "findmnt", "blkid", "lsof", "fuser", "fdisk", - # Network info (read-only) - "ip", "ifconfig", "netstat", "ss", "route", "arp", "ping", "traceroute", - "tracepath", "nslookup", "dig", "host", "getent", "hostname", - # GPU info - "nvidia-smi", "nvcc", "rocm-smi", "clinfo", - # Environment - "env", "printenv", "echo", "printf", - # Systemd info (read-only) - "systemctl", "journalctl", "loginctl", "timedatectl", "localectl", - # Kernel/modules - "uname", "lsmod", "modinfo", "sysctl", - # Misc info - "getconf", "locale", "xdpyinfo", "xrandr", - # Container/virtualization info - "docker", "podman", "kubectl", "crictl", "nerdctl", - "lxc-ls", "virsh", "vboxmanage", - # Development tools (version checks) - "git", "node", "nodejs", "deno", "bun", "ruby", "perl", "php", "java", "javac", - "rustc", "gcc", "g++", "clang", "clang++", "make", "cmake", "ninja", "meson", - "dotnet", "mono", "swift", "kotlin", "scala", "groovy", "gradle", "mvn", "ant", - # Database clients (info/version) - "mysql", "psql", "sqlite3", "mongosh", "redis-cli", - # Web/network tools - "curl", "wget", "httpie", "openssl", "ssh", "scp", "rsync", - # Cloud CLIs - "aws", "gcloud", "az", "doctl", "linode-cli", "vultr-cli", - "terraform", "ansible", "vagrant", "packer", - # Other common tools - "jq", "yq", "xmllint", "ffmpeg", "ffprobe", "imagemagick", "convert", - "gh", "hub", "lab", # GitHub/GitLab CLIs - "snap", "flatpak", # For version/list only - "systemd-analyze", "bootctl", - } - - # Version check flags - these make ANY command safe (read-only) - VERSION_FLAGS: set[str] = { - "--version", "-v", "-V", "--help", "-h", "-help", - "version", "help", "--info", "-version", - } - - # Subcommands that are blocked for otherwise allowed commands - BLOCKED_SUBCOMMANDS: dict[str, set[str]] = { - "dpkg": {"--configure", "-i", "--install", "--remove", "-r", "--purge", "-P", - "--unpack", "--clear-avail", "--forget-old-unavail", "--update-avail", - "--merge-avail", "--set-selections", "--clear-selections"}, - "apt-mark": {"auto", "manual", "hold", "unhold", "showauto", "showmanual"}, # only show* are safe - "pip3": {"install", "uninstall", "download", "wheel", "cache"}, - "pip": {"install", "uninstall", "download", "wheel", "cache"}, - "python3": {"-c"}, # Block arbitrary code execution - "python": {"-c"}, - "npm": {"install", "uninstall", "update", "ci", "run", "exec", "init", "publish"}, - "gem": {"install", "uninstall", "update", "cleanup", "pristine"}, - "cargo": {"install", "uninstall", "build", "run", "clean", "publish"}, - "go": {"install", "get", "build", "run", "clean", "mod"}, - "systemctl": {"start", "stop", "restart", "reload", "enable", "disable", - "mask", "unmask", "edit", "set-property", "reset-failed", - "daemon-reload", "daemon-reexec", "kill", "isolate", - "set-default", "set-environment", "unset-environment"}, - "mount": {"--bind", "-o", "--move"}, # Block actual mounting - "fdisk": {"-l"}, # Only allow listing (-l), block everything else (inverted logic handled below) - "sysctl": {"-w", "--write", "-p", "--load"}, # Block writes - # Container tools - block modifying commands - "docker": {"run", "exec", "build", "push", "pull", "rm", "rmi", "kill", "stop", "start", - "restart", "pause", "unpause", "create", "commit", "tag", "load", "save", - "import", "export", "login", "logout", "network", "volume", "system", "prune"}, - "podman": {"run", "exec", "build", "push", "pull", "rm", "rmi", "kill", "stop", "start", - "restart", "pause", "unpause", "create", "commit", "tag", "load", "save", - "import", "export", "login", "logout", "network", "volume", "system", "prune"}, - "kubectl": {"apply", "create", "delete", "edit", "patch", "replace", "scale", "exec", - "run", "expose", "set", "rollout", "drain", "cordon", "uncordon", "taint"}, - # Git - block modifying commands - "git": {"push", "commit", "add", "rm", "mv", "reset", "revert", "merge", "rebase", - "checkout", "switch", "restore", "stash", "clean", "init", "clone", "pull", - "fetch", "cherry-pick", "am", "apply"}, - # Cloud CLIs - block modifying commands - "aws": {"s3", "ec2", "iam", "lambda", "rds", "ecs", "eks"}, # Block service commands (allow sts, configure list) - "gcloud": {"compute", "container", "functions", "run", "sql", "storage"}, - # Snap/Flatpak - block modifying commands - "snap": {"install", "remove", "refresh", "revert", "enable", "disable", "set", "unset"}, - "flatpak": {"install", "uninstall", "update", "repair"}, - } - - # Commands that are completely blocked (never allowed, even with --version) - BLOCKED_COMMANDS: set[str] = { - # Dangerous/destructive - "rm", "rmdir", "unlink", "shred", - "mv", "cp", "install", "mkdir", "touch", - # Editors (sed is allowed for text processing, redirections are blocked separately) - "nano", "vim", "vi", "emacs", "ed", - # Package modification (apt-get is dangerous, apt is allowed with restrictions) - "apt-get", "dpkg-reconfigure", "update-alternatives", - # System modification - "shutdown", "reboot", "poweroff", "halt", "init", "telinit", - "useradd", "userdel", "usermod", "groupadd", "groupdel", "groupmod", - "passwd", "chpasswd", "chage", - "chmod", "chown", "chgrp", "chattr", "setfacl", - "ln", "mkfifo", "mknod", - # Dangerous utilities - "dd", "mkfs", "fsck", "parted", "gdisk", "cfdisk", "sfdisk", - "kill", "killall", "pkill", - "nohup", "disown", "bg", "fg", - "crontab", "at", "batch", - "su", "sudo", "doas", "pkexec", - # Network modification - "iptables", "ip6tables", "nft", "ufw", "firewall-cmd", - "ifup", "ifdown", "dhclient", - # Shell/code execution - "bash", "sh", "zsh", "fish", "dash", "csh", "tcsh", "ksh", - "eval", "exec", "source", - "xargs", # Can be used to execute arbitrary commands - "tee", # Writes to files - } - - # Patterns that indicate dangerous operations (NOT including safe chaining) - DANGEROUS_PATTERNS: list[str] = [ - r">\s*[^|]", # Output redirection (except pipes) - r">>\s*", # Append redirection - r"<\s*", # Input redirection - r"\$\(", # Command substitution - r"`[^`]+`", # Backtick command substitution - r"\|.*(?:sh|bash|zsh|exec|eval|xargs)", # Piping to shell - ] - - # Chaining patterns that we'll split instead of block - CHAINING_PATTERNS: list[str] = [ - r";\s*", # Command chaining - r"\s*&&\s*", # AND chaining - r"\s*\|\|\s*", # OR chaining - ] - - @classmethod - def split_chained_commands(cls, command: str) -> list[str]: - """Split a chained command into individual commands.""" - # Split by ;, &&, or || - parts = re.split(r'\s*(?:;|&&|\|\|)\s*', command) - return [p.strip() for p in parts if p.strip()] - - @classmethod - def validate_command(cls, command: str) -> tuple[bool, str]: - """Validate a command for safety. - - Args: - command: The shell command to validate - - Returns: - Tuple of (is_valid, error_message) - """ - if not command or not command.strip(): - return False, "Empty command" - - command = command.strip() - - # Check for dangerous patterns (NOT chaining - we handle that separately) - for pattern in cls.DANGEROUS_PATTERNS: - if re.search(pattern, command): - return False, f"Command contains blocked pattern (redirections or subshells)" - - # Check if command has chaining - if so, validate each part - has_chaining = any(re.search(p, command) for p in cls.CHAINING_PATTERNS) - if has_chaining: - subcommands = cls.split_chained_commands(command) - for subcmd in subcommands: - is_valid, error = cls._validate_single_command(subcmd) - if not is_valid: - return False, f"In chained command '{subcmd}': {error}" - return True, "" - - return cls._validate_single_command(command) - - @classmethod - def _validate_single_command(cls, command: str) -> tuple[bool, str]: - """Validate a single (non-chained) command.""" - if not command or not command.strip(): - return False, "Empty command" - - command = command.strip() - - # Parse the command +from cortex.config_utils import get_ollama_model + +# Module logger for debug diagnostics +logger = logging.getLogger(__name__) + +# Maximum number of tokens to request from LLM +MAX_TOKENS = 2000 + + +class SystemInfoGatherer: + """Gathers local system information for context-aware responses.""" + + @staticmethod + def get_python_version() -> str: + """Get installed Python version.""" + return platform.python_version() + + @staticmethod + def get_python_path() -> str: + """Get Python executable path.""" + import sys + + return sys.executable + + @staticmethod + def get_os_info() -> dict[str, str]: + """Get OS information.""" + return { + "system": platform.system(), + "release": platform.release(), + "version": platform.version(), + "machine": platform.machine(), + } + + @staticmethod + def get_installed_package(package: str) -> str | None: + """Check if a package is installed via apt and return version.""" try: - parts = shlex.split(command) - except ValueError as e: - return False, f"Invalid command syntax: {e}" - - if not parts: - return False, "Empty command" - - # Get base command (handle sudo prefix) - base_cmd = parts[0] - cmd_args = parts[1:] - - if base_cmd == "sudo": - return False, "sudo is not allowed - only read-only commands permitted" - - # Check if this is a version/help check - these are always safe - # Allow ANY command if it only has version/help flags - if cmd_args and all(arg in cls.VERSION_FLAGS for arg in cmd_args): - return True, "" # Safe: just checking version/help - - # Also allow if first arg is a version flag (e.g., "docker --version" or "git version") - if cmd_args and cmd_args[0] in cls.VERSION_FLAGS: - return True, "" # Safe: version/help check - - # Check if command is completely blocked (unless it's a version check) - if base_cmd in cls.BLOCKED_COMMANDS: - return False, f"Command '{base_cmd}' is not allowed - it can modify the system" - - # Check if command is in allowed list - if base_cmd not in cls.ALLOWED_COMMANDS: - return False, f"Command '{base_cmd}' is not in the allowed list of read-only commands" - - # Check for blocked subcommands - if base_cmd in cls.BLOCKED_SUBCOMMANDS: - blocked = cls.BLOCKED_SUBCOMMANDS[base_cmd] - for arg in cmd_args: - # Handle fdisk specially - only -l is allowed - if base_cmd == "fdisk": - if arg not in ["-l", "--list"]: - return False, f"fdisk only allows -l/--list for listing partitions" - elif arg in blocked: - return False, f"Subcommand '{arg}' is not allowed for '{base_cmd}' - it can modify the system" - - # Special handling for pip/pip3 - only allow show, list, freeze, check, config - if base_cmd in ["pip", "pip3"]: - if cmd_args: - allowed_pip_cmds = {"show", "list", "freeze", "check", "config", "--version", "-V", "help", "--help"} - if cmd_args[0] not in allowed_pip_cmds: - return False, f"pip command '{cmd_args[0]}' is not allowed - only read-only commands like 'show', 'list', 'freeze' are permitted" - - # Special handling for apt-mark - only showhold, showauto, showmanual - if base_cmd == "apt-mark": - if cmd_args: - allowed_apt_mark = {"showhold", "showauto", "showmanual"} - if cmd_args[0] not in allowed_apt_mark: - return False, f"apt-mark command '{cmd_args[0]}' is not allowed - only showhold, showauto, showmanual are permitted" - - # Special handling for docker/podman - allow info and list commands - if base_cmd in ["docker", "podman"]: - if cmd_args: - allowed_docker_cmds = { - "ps", "images", "info", "version", "inspect", "logs", "top", "stats", - "port", "diff", "history", "search", "events", "container", "image", - "--version", "-v", "help", "--help", - } - # Also allow "container ls", "image ls", etc. - if cmd_args[0] not in allowed_docker_cmds: - return False, f"docker command '{cmd_args[0]}' is not allowed - only read-only commands like 'ps', 'images', 'info', 'inspect', 'logs' are permitted" - # Check container/image subcommands - if cmd_args[0] in ["container", "image"] and len(cmd_args) > 1: - allowed_sub = {"ls", "list", "inspect", "history", "prune"} # prune for info only - if cmd_args[1] not in allowed_sub and cmd_args[1] not in cls.VERSION_FLAGS: - return False, f"docker {cmd_args[0]} '{cmd_args[1]}' is not allowed - only ls, list, inspect are permitted" - - # Special handling for kubectl - allow get, describe, logs - if base_cmd == "kubectl": - if cmd_args: - allowed_kubectl_cmds = { - "get", "describe", "logs", "top", "cluster-info", "config", "version", - "api-resources", "api-versions", "explain", "auth", - "--version", "-v", "help", "--help", - } - if cmd_args[0] not in allowed_kubectl_cmds: - return False, f"kubectl command '{cmd_args[0]}' is not allowed - only read-only commands like 'get', 'describe', 'logs' are permitted" - - # Special handling for git - allow status, log, show, diff, branch, remote, config (get) - if base_cmd == "git": - if cmd_args: - allowed_git_cmds = { - "status", "log", "show", "diff", "branch", "remote", "tag", "describe", - "ls-files", "ls-tree", "ls-remote", "rev-parse", "rev-list", "cat-file", - "config", "shortlog", "blame", "annotate", "grep", "reflog", - "version", "--version", "-v", "help", "--help", - } - if cmd_args[0] not in allowed_git_cmds: - return False, f"git command '{cmd_args[0]}' is not allowed - only read-only commands like 'status', 'log', 'diff', 'branch' are permitted" - # Block git config --set/--add - if cmd_args[0] == "config" and any(a in cmd_args for a in ["--add", "--unset", "--remove-section", "--rename-section"]): - return False, "git config modifications are not allowed" - - # Special handling for snap/flatpak - allow list and info commands - if base_cmd == "snap": - if cmd_args: - allowed_snap = {"list", "info", "find", "version", "connections", "services", "logs", "--version", "help", "--help"} - if cmd_args[0] not in allowed_snap: - return False, f"snap command '{cmd_args[0]}' is not allowed - only list, info, find are permitted" - - if base_cmd == "flatpak": - if cmd_args: - allowed_flatpak = {"list", "info", "search", "remote-ls", "remotes", "history", "--version", "help", "--help"} - if cmd_args[0] not in allowed_flatpak: - return False, f"flatpak command '{cmd_args[0]}' is not allowed - only list, info, search are permitted" - - # Special handling for AWS CLI - allow read-only commands - if base_cmd == "aws": - if cmd_args: - allowed_aws = {"--version", "help", "--help", "sts", "configure"} - # sts get-caller-identity is safe, configure list is safe - if cmd_args[0] not in allowed_aws: - return False, f"aws command '{cmd_args[0]}' is not allowed - use 'sts get-caller-identity' or 'configure list' for read-only queries" - - # Special handling for apt - only allow list, show, search, policy, depends - if base_cmd == "apt": - if cmd_args: - allowed_apt = {"list", "show", "search", "policy", "depends", "rdepends", "madison", "--version", "help", "--help"} - if cmd_args[0] not in allowed_apt: - return False, f"apt command '{cmd_args[0]}' is not allowed - only list, show, search, policy are permitted for read-only queries" - else: - return False, "apt requires a subcommand like 'list', 'show', or 'search'" - - return True, "" - - @classmethod - def execute_command(cls, command: str, timeout: int = 10) -> tuple[bool, str, str]: - """Execute a validated command and return the result. - - For chained commands (&&, ||, ;), executes each command separately - and combines the output. - - Args: - command: The shell command to execute - timeout: Maximum execution time in seconds - - Returns: - Tuple of (success, stdout, stderr) - """ - # Validate first - is_valid, error = cls.validate_command(command) - if not is_valid: - return False, "", f"Command blocked: {error}" - - # Check if this is a chained command - has_chaining = any(re.search(p, command) for p in cls.CHAINING_PATTERNS) - - if has_chaining: - # Split and execute each command separately - subcommands = cls.split_chained_commands(command) - all_stdout = [] - all_stderr = [] - overall_success = True - - for subcmd in subcommands: - try: - result = subprocess.run( - subcmd, - shell=True, - capture_output=True, - text=True, - timeout=timeout, - ) - - if result.stdout.strip(): - all_stdout.append(f"# {subcmd}\n{result.stdout.strip()}") - if result.stderr.strip(): - all_stderr.append(f"# {subcmd}\n{result.stderr.strip()}") - - if result.returncode != 0: - overall_success = False - # For && chaining, stop on first failure - if "&&" in command: - break - - except subprocess.TimeoutExpired: - all_stderr.append(f"# {subcmd}\nCommand timed out after {timeout} seconds") - overall_success = False - break - except Exception as e: - all_stderr.append(f"# {subcmd}\nExecution failed: {e}") - overall_success = False - break - - return ( - overall_success, - "\n\n".join(all_stdout), - "\n\n".join(all_stderr), + result = subprocess.run( + ["dpkg-query", "-W", "-f=${Version}", package], + capture_output=True, + text=True, + timeout=5, ) - - # Single command + if result.returncode == 0: + return result.stdout.strip() + except (subprocess.SubprocessError, FileNotFoundError): + # If dpkg-query is unavailable or fails, return None silently. + # We avoid user-visible logs to keep CLI output clean. + pass + return None + + @staticmethod + def get_pip_package(package: str) -> str | None: + """Check if a Python package is installed via pip.""" try: result = subprocess.run( - command, - shell=True, + ["pip3", "show", package], capture_output=True, text=True, - timeout=timeout, + timeout=5, ) - return ( - result.returncode == 0, - result.stdout.strip(), - result.stderr.strip(), + if result.returncode == 0: + for line in result.stdout.splitlines(): + if line.startswith("Version:"): + return line.split(":", 1)[1].strip() + except (subprocess.SubprocessError, FileNotFoundError): + # If pip is unavailable or the command fails, return None silently. + pass + return None + + @staticmethod + def check_command_exists(cmd: str) -> bool: + """Check if a command exists in PATH.""" + return shutil.which(cmd) is not None + + @staticmethod + def get_gpu_info() -> dict[str, Any]: + """Get GPU information if available.""" + gpu_info: dict[str, Any] = {"available": False, "nvidia": False, "cuda": None} + + # Check for nvidia-smi + if shutil.which("nvidia-smi"): + gpu_info["nvidia"] = True + gpu_info["available"] = True + try: + result = subprocess.run( + ["nvidia-smi", "--query-gpu=name,driver_version", "--format=csv,noheader"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + gpu_info["model"] = result.stdout.strip().split(",")[0] + except (subprocess.SubprocessError, FileNotFoundError): + # If nvidia-smi is unavailable or fails, keep defaults. + pass + + # Check CUDA version + try: + result = subprocess.run( + ["nvcc", "--version"], + capture_output=True, + text=True, + timeout=5, + ) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if "release" in line.lower(): + parts = line.split("release") + if len(parts) > 1: + gpu_info["cuda"] = parts[1].split(",")[0].strip() + except (subprocess.SubprocessError, FileNotFoundError): + # If nvcc is unavailable or fails, leave CUDA info unset. + pass + + return gpu_info + + def gather_context(self) -> dict[str, Any]: + """Gather relevant system context for LLM.""" + return { + "python_version": self.get_python_version(), + "python_path": self.get_python_path(), + "os": self.get_os_info(), + "gpu": self.get_gpu_info(), + } + + +class LearningTracker: + """Tracks educational topics the user has explored.""" + + _progress_file: Path | None = None + + # Patterns that indicate educational questions + EDUCATIONAL_PATTERNS = [ + r"^explain\b", + r"^teach\s+me\b", + r"^what\s+is\b", + r"^what\s+are\b", + r"^how\s+does\b", + r"^how\s+do\b", + r"^how\s+to\b", + r"\bbest\s+practices?\b", + r"^tutorial\b", + r"^guide\s+to\b", + r"^learn\s+about\b", + r"^introduction\s+to\b", + r"^basics\s+of\b", + ] + + # Compiled patterns shared across all instances for efficiency + _compiled_patterns: list[re.Pattern[str]] = [ + re.compile(p, re.IGNORECASE) for p in EDUCATIONAL_PATTERNS + ] + + def __init__(self) -> None: + """Initialize the learning tracker. + + Uses pre-compiled educational patterns for efficient matching + across multiple queries. Patterns are shared as class variables + to avoid recompilation overhead. + """ + + @property + def progress_file(self) -> Path: + """Lazily compute the progress file path to avoid import-time errors.""" + if self._progress_file is None: + try: + self._progress_file = Path.home() / ".cortex" / "learning_history.json" + except RuntimeError: + # Fallback for restricted environments where home is inaccessible + import tempfile + + self._progress_file = ( + Path(tempfile.gettempdir()) / ".cortex" / "learning_history.json" + ) + return self._progress_file + + def is_educational_query(self, question: str) -> bool: + """Determine if a question is educational in nature.""" + return any(pattern.search(question) for pattern in self._compiled_patterns) + + def extract_topic(self, question: str) -> str: + """Extract the main topic from an educational question.""" + # Remove common prefixes + topic = question.lower() + prefixes_to_remove = [ + r"^explain\s+", + r"^teach\s+me\s+about\s+", + r"^teach\s+me\s+", + r"^what\s+is\s+", + r"^what\s+are\s+", + r"^how\s+does\s+", + r"^how\s+do\s+", + r"^how\s+to\s+", + r"^tutorial\s+on\s+", + r"^guide\s+to\s+", + r"^learn\s+about\s+", + r"^introduction\s+to\s+", + r"^basics\s+of\s+", + r"^best\s+practices\s+for\s+", + ] + for prefix in prefixes_to_remove: + topic = re.sub(prefix, "", topic, flags=re.IGNORECASE) + + # Clean up and truncate + topic = topic.strip("? ").strip() + + # Truncate at word boundaries to keep topic identifier meaningful + # If topic exceeds 50 chars, truncate at the last space within those 50 chars + # to preserve whole words. If the first 50 chars contain no spaces, + # keep the full 50-char prefix. + if len(topic) > 50: + truncated = topic[:50] + # Try to split at word boundary; keep full 50 chars if no spaces found + words = truncated.rsplit(" ", 1) + # Handle case where topic starts with space after prefix removal + topic = words[0] if words[0] else truncated + + return topic + + def record_topic(self, question: str) -> None: + """Record that the user explored an educational topic. + + Note: This method performs a read-modify-write cycle on the history file + without file locking. If multiple cortex ask processes run concurrently, + concurrent updates could theoretically be lost. This is acceptable for a + single-user CLI tool where concurrent invocations are rare and learning + history is non-critical, but worth noting for future enhancements. + """ + if not self.is_educational_query(question): + return + + topic = self.extract_topic(question) + if not topic: + return + + history = self._load_history() + if not isinstance(history, dict): + history = {"topics": {}, "total_queries": 0} + + # Ensure history has expected structure (defensive defaults for malformed data) + history.setdefault("topics", {}) + history.setdefault("total_queries", 0) + if not isinstance(history.get("topics"), dict): + history["topics"] = {} + + # Ensure total_queries is an integer + if not isinstance(history.get("total_queries"), int): + try: + history["total_queries"] = int(history["total_queries"]) + except (ValueError, TypeError): + history["total_queries"] = 0 + + # Use UTC timestamps for consistency and accurate sorting + utc_now = datetime.now(timezone.utc).isoformat() + + # Update or add topic + if topic in history["topics"]: + # Check if the topic data is actually a dict before accessing it + if not isinstance(history["topics"][topic], dict): + # If topic data is malformed, reinitialize it + history["topics"][topic] = { + "count": 1, + "first_accessed": utc_now, + "last_accessed": utc_now, + } + else: + try: + # Safely increment count, handle missing key + history["topics"][topic]["count"] = history["topics"][topic].get("count", 0) + 1 + history["topics"][topic]["last_accessed"] = utc_now + except (KeyError, TypeError, AttributeError): + # If topic data is malformed, reinitialize it + history["topics"][topic] = { + "count": 1, + "first_accessed": utc_now, + "last_accessed": utc_now, + } + else: + history["topics"][topic] = { + "count": 1, + "first_accessed": utc_now, + "last_accessed": utc_now, + } + + history["total_queries"] = history.get("total_queries", 0) + 1 + self._save_history(history) + + def get_history(self) -> dict[str, Any]: + """Get the learning history.""" + return self._load_history() + + def get_recent_topics(self, limit: int = 5) -> list[str]: + """Get recently explored topics.""" + history = self._load_history() + topics = history.get("topics", {}) + + # Filter out malformed entries and sort by last_accessed + valid_topics = [ + (name, data) + for name, data in topics.items() + if isinstance(data, dict) and "last_accessed" in data + ] + sorted_topics = sorted( + valid_topics, + key=lambda x: x[1].get("last_accessed", ""), + reverse=True, + ) + return [t[0] for t in sorted_topics[:limit]] + + def _load_history(self) -> dict[str, Any]: + """Load learning history from file.""" + if not self.progress_file.exists(): + return {"topics": {}, "total_queries": 0} + + try: + with open(self.progress_file, encoding="utf-8") as f: + return json.load(f) + except (json.JSONDecodeError, OSError): + return {"topics": {}, "total_queries": 0} + + def _save_history(self, history: dict[str, Any]) -> None: + """Save learning history to file. + + Silently handles save failures to keep CLI clean, but logs at debug level + for diagnostics. Failures may occur due to permission issues or disk space. + """ + try: + self.progress_file.parent.mkdir(parents=True, exist_ok=True) + with open(self.progress_file, "w", encoding="utf-8") as f: + json.dump(history, f, indent=2) + except OSError as e: + # Log at debug level to help diagnose permission/disk issues + # without breaking CLI output or crashing the application + logger.debug( + f"Failed to save learning history to {self.progress_file}: {e}", + exc_info=False, ) - except subprocess.TimeoutExpired: - return False, "", f"Command timed out after {timeout} seconds" - except Exception as e: - return False, "", f"Command execution failed: {e}" class AskHandler: - """Handles natural language questions about the system using an agentic loop. - - The handler uses an iterative approach: - 1. LLM generates a read-only command to gather information - 2. Command is validated and executed - 3. Output is sent back to LLM - 4. LLM either generates another command or provides final answer - 5. Max 5 iterations before giving up - - In --do mode, the handler can execute write and modify commands with - user confirmation and privilege management. - """ - - MAX_ITERATIONS = 5 - MAX_DO_ITERATIONS = 15 # More iterations for --do mode since it's solving problems + """Handles natural language questions about the system.""" def __init__( self, api_key: str, provider: str = "claude", model: str | None = None, - debug: bool = False, do_mode: bool = False, ): """Initialize the ask handler. @@ -541,41 +374,23 @@ def __init__( api_key: API key for the LLM provider provider: Provider name ("openai", "claude", or "ollama") model: Optional model name override - debug: Enable debug output to shell - do_mode: Enable write/execute mode with user confirmation + do_mode: If True, enable execution mode with do_runner """ self.api_key = api_key self.provider = provider.lower() self.model = model or self._default_model() - self.debug = debug + self.info_gatherer = SystemInfoGatherer() + self.learning_tracker = LearningTracker() self.do_mode = do_mode - - # Import rich console for debug output - if self.debug: - from rich.console import Console - from rich.panel import Panel - self._console = Console() - else: - self._console = None - - # For expandable output storage - self._last_output: str | None = None - self._last_output_command: str | None = None - - # Interrupt flag - can be set externally to stop execution - self._interrupted = False - - # Initialize DoHandler for --do mode + + # Initialize do_handler if in do_mode self._do_handler = None - if self.do_mode: + if do_mode: try: - from cortex.do_runner import DoHandler - # Pass LLM callback so DoHandler can make LLM calls for interactive session - self._do_handler = DoHandler(llm_callback=self._call_llm_for_do) - except (ImportError, OSError, Exception) as e: - # Log error but don't fail - do mode just won't work - if self.debug and self._console: - self._console.print(f"[yellow]Warning: Could not initialize DoHandler: {e}[/yellow]") + from cortex.do_runner.handler import DoHandler + + self._do_handler = DoHandler() + except ImportError: pass # Initialize cache @@ -583,173 +398,28 @@ def __init__( from cortex.semantic_cache import SemanticCache self.cache: SemanticCache | None = SemanticCache() - except (ImportError, OSError, sqlite3.OperationalError, Exception): + except (ImportError, OSError): self.cache = None self._initialize_client() - def interrupt(self): - """Interrupt the current operation. Call this from signal handlers.""" - self._interrupted = True - # Also interrupt the DoHandler if it exists - if self._do_handler: - self._do_handler._interrupted = True - - def reset_interrupt(self): - """Reset the interrupt flag before starting a new operation.""" - self._interrupted = False - if self._do_handler: - self._do_handler._interrupted = False - def _default_model(self) -> str: if self.provider == "openai": - return "gpt-4o" # Use gpt-4o for 128K context + return "gpt-4" elif self.provider == "claude": return "claude-sonnet-4-20250514" elif self.provider == "ollama": - return "llama3.2" + return self._get_ollama_model() elif self.provider == "fake": return "fake" - return "gpt-4o" - - def _debug_print(self, title: str, content: str, style: str = "dim") -> None: - """Print debug output if debug mode is enabled.""" - if self.debug and self._console: - from rich.panel import Panel - self._console.print(Panel(content, title=f"[bold]{title}[/bold]", style=style)) - - def _print_query_summary(self, question: str, commands_run: list[str], answer: str) -> None: - """Print a condensed summary for question queries with improved visual design.""" - if not self._console: - return - - from rich.panel import Panel - from rich.table import Table - from rich.text import Text - from rich import box - - # Clean the answer - remove any JSON/shell script that might have leaked - clean_answer = answer - import re - - # Check if answer looks like JSON or contains shell script fragments - if clean_answer.startswith('{') or '{"' in clean_answer[:100]: - # Try to extract just the answer field if present - answer_match = re.search(r'"answer"\s*:\s*"([^"]*)"', clean_answer, re.DOTALL) - if answer_match: - clean_answer = answer_match.group(1) - # Unescape common JSON escapes - clean_answer = clean_answer.replace('\\n', '\n').replace('\\"', '"') - - # Remove shell script-like content that shouldn't be in the answer - if re.search(r'^(if \[|while |for |echo \$|sed |awk |grep -)', clean_answer, re.MULTILINE): - # This looks like shell script leaked - try to extract readable parts - readable_lines = [] - for line in clean_answer.split('\n'): - # Keep lines that look like actual content, not script - if not re.match(r'^(if \[|fi$|done$|else$|then$|do$|while |for |echo \$|sed |awk )', line.strip()): - if line.strip() and not line.strip().startswith('#!'): - readable_lines.append(line) - if readable_lines: - clean_answer = '\n'.join(readable_lines[:20]) # Limit to 20 lines - - self._console.print() - - # Query section - q_display = question[:80] + "..." if len(question) > 80 else question - self._console.print(Panel( - f"[bold]{q_display}[/bold]", - title="[bold white on blue] 🔍 Query [/bold white on blue]", - title_align="left", - border_style="blue", - padding=(0, 1), - expand=False, - )) - - # Info gathered section - if commands_run: - info_table = Table( - show_header=False, - box=box.SIMPLE, - padding=(0, 1), - expand=True, - ) - info_table.add_column("", style="dim") - - for cmd in commands_run[:4]: - cmd_display = cmd[:60] + "..." if len(cmd) > 60 else cmd - info_table.add_row(f"$ {cmd_display}") - if len(commands_run) > 4: - info_table.add_row(f"[dim]... and {len(commands_run) - 4} more commands[/dim]") - - self._console.print(Panel( - info_table, - title=f"[bold] 📊 Info Gathered ({len(commands_run)} commands) [/bold]", - title_align="left", - border_style="dim", - padding=(0, 0), - )) - - # Answer section - make it prominent - if clean_answer.strip(): - # Truncate very long answers - if len(clean_answer) > 800: - display_answer = clean_answer[:800] + "\n\n[dim]... (answer truncated)[/dim]" - else: - display_answer = clean_answer - - self._console.print(Panel( - display_answer, - title="[bold white on green] 💡 Answer [/bold white on green]", - title_align="left", - border_style="green", - padding=(1, 2), - )) - - def _show_expandable_output(self, console, output: str, command: str) -> None: - """Show output with expand/collapse capability.""" - from rich.panel import Panel - from rich.text import Text - - lines = output.split('\n') - total_lines = len(lines) - - # Always show first 3 lines as preview - preview_count = 3 - - if total_lines <= preview_count + 2: - # Small output - just show it all - console.print(Panel( - output, - title=f"[dim]Output[/dim]", - title_align="left", - border_style="dim", - padding=(0, 1), - )) - return - - # Show collapsed preview with expand option - preview = '\n'.join(lines[:preview_count]) - remaining = total_lines - preview_count - - # Build the panel content - content = Text() - content.append(preview) - content.append(f"\n\n[dim]─── {remaining} more lines hidden ───[/dim]", style="dim") - - console.print(Panel( - content, - title=f"[dim]Output ({total_lines} lines)[/dim]", - subtitle="[dim italic]Type 'e' to expand[/dim italic]", - subtitle_align="right", - title_align="left", - border_style="dim", - padding=(0, 1), - )) - - # Store for potential expansion - self._last_output = output - self._last_output_command = command + return "gpt-4" + + def _get_ollama_model(self) -> str: + """Determine which Ollama model to use. + + Delegates to the shared ``get_ollama_model()`` utility function. + """ + return get_ollama_model() def _initialize_client(self): if self.provider == "openai": @@ -762,11 +432,7 @@ def _initialize_client(self): elif self.provider == "claude": try: from anthropic import Anthropic - import logging - # Suppress noisy retry logging from anthropic client - logging.getLogger("anthropic").setLevel(logging.WARNING) - logging.getLogger("anthropic._base_client").setLevel(logging.WARNING) - + self.client = Anthropic(api_key=self.api_key) except ImportError: raise ImportError("Anthropic package not installed. Run: pip install anthropic") @@ -778,650 +444,162 @@ def _initialize_client(self): else: raise ValueError(f"Unsupported provider: {self.provider}") - def _get_system_prompt(self) -> str: - if self.do_mode: - return self._get_do_mode_system_prompt() - return self._get_read_only_system_prompt() - - def _get_read_only_system_prompt(self) -> str: - return """You are a Linux system assistant that answers questions by executing read-only shell commands. - -SCOPE RESTRICTION - VERY IMPORTANT: -You are ONLY a Linux/system administration assistant. You can ONLY help with: -- Linux system administration, configuration, and troubleshooting -- Package management (apt, snap, flatpak, pip, npm, etc.) -- Service management (systemctl, docker, etc.) -- File system operations and permissions -- Networking and security -- Development environment setup -- Server configuration - -If the user asks about anything unrelated to Linux/technical topics (social chat, personal advice, -creative writing, general knowledge questions not related to their system, etc.), you MUST respond with: -{ - "response_type": "answer", - "answer": "I'm Cortex, a Linux system assistant. I can only help with Linux system administration, package management, and technical tasks on your machine. I can't help with non-technical topics. Is there something I can help you with on your system?", - "reasoning": "User query is outside my scope as a Linux system assistant" -} - -Your task is to help answer the user's question about their system by: -1. Generating shell commands to gather the needed information -2. Analyzing the command output -3. Either generating another command if more info is needed, or providing the final answer - -IMPORTANT RULES: -- You can ONLY use READ-ONLY commands that fetch data (no modifications allowed) -- Allowed commands include: cat, ls, grep, find, dpkg-query, apt-cache, pip3 show/list, ps, df, free, uname, lscpu, etc. -- NEVER use commands that modify the system (rm, mv, cp, apt install, pip install, etc.) -- NEVER use sudo -- NEVER use output redirection (>, >>), command chaining (;, &&, ||), or command substitution ($(), ``) - -CRITICAL: You must respond with ONLY a JSON object - no other text before or after. -Do NOT include explanations outside the JSON. Put all reasoning inside the "reasoning" field. - -JSON format: -{ - "response_type": "command" | "answer", - "command": "" (only if response_type is "command"), - "answer": "" (only if response_type is "answer"), - "reasoning": "" -} - -Examples of ALLOWED commands: -- cat /etc/os-release -- dpkg-query -W -f='${Version}' python3 -- pip3 show numpy -- pip3 list -- ls -la /usr/bin/python* -- uname -a -- lscpu -- free -h -- df -h -- ps aux | grep python -- apt-cache show nginx -- systemctl status nginx (read-only status check) - -Examples of BLOCKED commands (NEVER use these): -- sudo anything -- apt install/remove -- pip install/uninstall -- rm, mv, cp, mkdir, touch -- echo "text" > file -- command1 && command2""" - - def _get_do_mode_system_prompt(self) -> str: - return """You are a Linux system assistant that can READ, WRITE, and EXECUTE commands to solve problems. - -SCOPE RESTRICTION - VERY IMPORTANT: -You are ONLY a Linux/system administration assistant. You can ONLY help with: -- Linux system administration, configuration, and troubleshooting -- Package management (apt, snap, flatpak, pip, npm, etc.) -- Service management (systemctl, docker, etc.) -- File system operations and permissions -- Networking and security -- Development environment setup -- Server configuration - -If the user asks about anything unrelated to Linux/technical topics (social chat, personal advice, -creative writing, general knowledge questions not related to their system, etc.), you MUST respond with: -{ - "response_type": "answer", - "answer": "I'm Cortex, a Linux system assistant. I can only help with Linux system administration, package management, and technical tasks on your machine. I can't help with non-technical topics. What would you like me to do on your system?", - "reasoning": "User query is outside my scope as a Linux system assistant" -} - -You are in DO MODE - you have the ability to make changes to the system to solve the user's problem. - -Your task is to: -1. Understand the user's problem or request -2. Quickly gather essential information (1-3 read commands MAX) -3. Plan and propose a solution with specific commands using "do_commands" -4. Execute the solution with the user's permission -5. Handle failures gracefully with repair attempts - -CRITICAL WORKFLOW RULES: -- DO NOT spend more than 3-4 iterations gathering information -- After gathering basic system info (OS, existing packages), IMMEDIATELY propose do_commands -- If you know how to install/configure something, propose do_commands right away -- Be action-oriented: the user wants you to DO something, not just analyze -- You can always gather more info AFTER the user approves the commands if needed - -WORKFLOW: -1. Quickly gather essential info (OS version, if package exists) - MAX 2-3 commands -2. IMMEDIATELY propose "do_commands" with your installation/setup plan -3. The do_commands will be shown to the user for approval before execution -4. Commands are executed using a TASK TREE system with auto-repair capabilities: - - If a command fails, Cortex will automatically diagnose the error - - Repair sub-tasks may be spawned and executed with additional permission requests - - Terminal monitoring is available during manual intervention -5. After execution, verify the changes worked and provide a final "answer" -6. If execution_failures appear in history, propose alternative solutions - -CRITICAL: You must respond with ONLY a JSON object - no other text before or after. -Do NOT include explanations outside the JSON. Put all reasoning inside the "reasoning" field. - -For gathering information (read-only): -{ - "response_type": "command", - "command": "", - "reasoning": "" -} - -For proposing changes (write/execute): -{ - "response_type": "do_commands", - "do_commands": [ - { - "command": "", - "purpose": "", - "requires_sudo": true/false - } - ], - "reasoning": "" -} - -For final answer: -{ - "response_type": "answer", - "answer": "", - "reasoning": "" -} - -For proposing repair commands after failures: -{ - "response_type": "do_commands", - "do_commands": [ - { - "command": "", - "purpose": "", - "requires_sudo": true/false - } - ], - "reasoning": "" -} - -HANDLING FAILURES: -- When you see "execution_failures" in history, analyze the error messages carefully -- Common errors and their fixes: - * "Permission denied" → Add sudo, check ownership, or run with elevated privileges - * "No such file or directory" → Create parent directories first (mkdir -p) - * "Command not found" → Install the package (apt install) - * "Service not running" → Start the service first (systemctl start) - * "Configuration syntax error" → Read config file, find and fix the error -- Always provide detailed reasoning when proposing repairs -- If the original approach won't work, suggest an alternative approach -- You may request multiple rounds of commands to diagnose and fix issues - -IMPORTANT RULES: -- BE ACTION-ORIENTED: After 2-3 info commands, propose do_commands immediately -- DO NOT over-analyze: You have enough info once you know the OS and if basic packages exist -- For installation tasks: Propose the installation commands right away -- For do_commands, each command should be atomic and specific -- Always include a clear purpose for each command -- Mark requires_sudo: true if the command needs root privileges -- Be careful with destructive commands - always explain what they do -- After making changes, verify they worked before giving final answer -- If something fails, diagnose and try alternative approaches -- Multiple permission requests may be made during a single session for repair commands - -ANTI-PATTERNS TO AVOID: -- Don't keep gathering info for more than 3 iterations -- Don't check every possible thing before proposing a solution -- Don't be overly cautious - the user wants action -- If you know how to solve the problem, propose do_commands NOW - -PROTECTED PATHS (will require user authentication): -- /etc/* - System configuration -- /boot/* - Boot configuration -- /usr/bin, /usr/sbin, /sbin, /bin - System binaries -- /root - Root home directory -- /var/log, /var/lib/apt - System data - -COMMAND RESTRICTIONS: -- Use SINGLE commands only - no chaining with &&, ||, or ; -- Use pipes (|) sparingly and only for filtering -- No output redirection (>, >>) in read commands -- If you need multiple commands, return them separately in sequence - -Examples of READ commands: -- cat /etc/nginx/nginx.conf -- ls -la /var/log/ -- systemctl status nginx -- grep -r "error" /var/log/syslog -- dpkg -l | grep nginx -- apt list --installed | grep docker (use apt list, not apt install) - -Examples of WRITE/EXECUTE commands (use with do_commands): -- echo 'server_name example.com;' >> /etc/nginx/sites-available/default -- systemctl restart nginx -- apt install -y nginx -- chmod 755 /var/www/html -- mkdir -p /etc/myapp -- cp /etc/nginx/nginx.conf /etc/nginx/nginx.conf.backup - -Examples of REPAIR commands after failures: -- sudo chown -R $USER:$USER /path/to/file # Fix ownership issues -- sudo mkdir -p /path/to/directory # Create missing directories -- sudo apt install -y missing-package # Install missing dependencies -- journalctl -u service-name -n 50 --no-pager # Diagnose service issues""" - - # Maximum characters of command output to include in history - MAX_OUTPUT_CHARS = 2000 - - def _truncate_output(self, output: str) -> str: - """Truncate command output to avoid context length issues.""" - if len(output) <= self.MAX_OUTPUT_CHARS: - return output - # Keep first and last portions - half = self.MAX_OUTPUT_CHARS // 2 - return f"{output[:half]}\n\n... [truncated {len(output) - self.MAX_OUTPUT_CHARS} chars] ...\n\n{output[-half:]}" - - def _build_iteration_prompt( - self, - question: str, - history: list[dict[str, str]] - ) -> str: - """Build the prompt for the current iteration.""" - prompt = f"User Question: {question}\n\n" - - if history: - prompt += "Previous commands and results:\n" - for i, entry in enumerate(history, 1): - # Handle execution_failures context from do_commands - if entry.get("type") == "execution_failures": - prompt += f"\n--- EXECUTION FAILURES (Need Repair) ---\n" - prompt += f"Message: {entry.get('message', 'Commands failed')}\n" - for fail in entry.get("failures", []): - prompt += f"\nFailed Command: {fail.get('command', 'unknown')}\n" - prompt += f"Purpose: {fail.get('purpose', 'unknown')}\n" - prompt += f"Error: {fail.get('error', 'unknown')}\n" - prompt += "\nPlease analyze these failures and propose repair commands or alternative approaches.\n" - continue - - # Handle regular commands - prompt += f"\n--- Attempt {i} ---\n" - - # Check if this is a do_command execution result - if "executed_by" in entry: - prompt += f"Command (executed by {entry['executed_by']}): {entry.get('command', 'unknown')}\n" - prompt += f"Purpose: {entry.get('purpose', 'unknown')}\n" - if entry.get('success'): - truncated_output = self._truncate_output(entry.get('output', '')) - prompt += f"Status: SUCCESS\nOutput:\n{truncated_output}\n" - else: - prompt += f"Status: FAILED\nError: {entry.get('error', 'unknown')}\n" - else: - prompt += f"Command: {entry.get('command', 'unknown')}\n" - if entry.get('success'): - truncated_output = self._truncate_output(entry.get('output', '')) - prompt += f"Output:\n{truncated_output}\n" - else: - prompt += f"Error: {entry.get('error', 'unknown')}\n" - - prompt += "\n" - - # Check if there were recent failures - has_failures = any( - e.get("type") == "execution_failures" or - (e.get("executed_by") and not e.get("success")) - for e in history[-5:] # Check last 5 entries - ) - - if has_failures: - prompt += "IMPORTANT: There were command failures. Please:\n" - prompt += "1. Analyze the error messages to understand what went wrong\n" - prompt += "2. Propose repair commands using 'do_commands' response type\n" - prompt += "3. Or suggest an alternative approach if the original won't work\n" - else: - prompt += "Based on the above results, either provide another command to gather more information, or provide the final answer.\n" - else: - prompt += "Generate a shell command to gather the information needed to answer this question.\n" - - prompt += "\nRespond with a JSON object as specified in the system prompt." - return prompt - - def _clean_llm_response(self, text: str) -> str: - """Clean raw LLM response to prevent JSON from being displayed to user. - - Extracts meaningful content like reasoning or answer from raw JSON, - or returns a generic error message if the response is pure JSON. - - NOTE: This is only called as a fallback when JSON parsing fails. - We should NOT return placeholder messages for valid response types. - """ - import re - - # If it looks like pure JSON, don't show it to user - text = text.strip() - - # Check for partial JSON (starts with ], }, or other JSON fragments) - if text.startswith((']', '},', ',"', '"response_type"', '"do_commands"', '"command"', '"reasoning"')): - return "" # Return empty so loop continues - - if text.startswith('{') and text.endswith('}'): - # Try to extract useful fields - try: - data = json.loads(text) - # Try to get meaningful content in order of preference - if data.get("answer"): - return data["answer"] - if data.get("reasoning") and data.get("response_type") == "answer": - # Only use reasoning if it's an answer type - reasoning = data["reasoning"] - if not any(p in reasoning for p in ['"command":', '"do_commands":', '"requires_sudo":']): - return f"Analysis: {reasoning}" - # For do_commands or command types, return empty to let parsing retry - if data.get("do_commands") or data.get("command"): - return "" # Return empty so the proper parsing can happen - # Pure JSON with no useful fields - return "" - except json.JSONDecodeError: - pass - - # Check for JSON-like patterns in the text - json_patterns = [ - r'"response_type"\s*:\s*"', - r'"do_commands"\s*:\s*\[', - r'"command"\s*:\s*"', - r'"requires_sudo"\s*:\s*', - r'\[\s*\{', # Start of array of objects - r'\}\s*,\s*\{', # Object separator - r'\]\s*,\s*"', # End of array followed by key - ] - - # If text contains raw JSON patterns, try to extract non-JSON parts - has_json_patterns = any(re.search(p, text) for p in json_patterns) - if has_json_patterns: - # Try to find text before or after JSON - parts = re.split(r'\{[\s\S]*"response_type"[\s\S]*\}', text) - clean_parts = [p.strip() for p in parts if p.strip() and len(p.strip()) > 20] - if clean_parts: - # Filter out parts that still look like JSON - clean_parts = [p for p in clean_parts if not any(j in p for j in ['":', '",', '{}', '[]'])] - if clean_parts: - return " ".join(clean_parts) - - # No good text found, return generic message - return "I'm processing your request. Please wait for the proper output." - - # Text doesn't look like JSON, return as-is - return text - - def _parse_llm_response(self, response_text: str) -> SystemCommand: - """Parse the LLM response into a SystemCommand object.""" - # Try to extract JSON from the response - original_text = response_text.strip() - response_text = original_text - - # Handle markdown code blocks - if "```json" in response_text: - response_text = response_text.split("```json")[1].split("```")[0].strip() - elif "```" in response_text: - parts = response_text.split("```") - if len(parts) >= 2: - response_text = parts[1].split("```")[0].strip() - - # Try direct JSON parsing first + def _get_system_prompt(self, context: dict[str, Any]) -> str: + return f"""You are a helpful Linux system assistant and tutor. You help users with both system-specific questions AND educational queries about Linux, packages, and best practices. + +System Context: +{json.dumps(context, indent=2)} + +**Query Type Detection** + +Automatically detect the type of question and respond appropriately: + +**Educational Questions (tutorials, explanations, learning)** + +Triggered by questions like: "explain...", "teach me...", "how does X work", "what is...", "best practices for...", "tutorial on...", "learn about...", "guide to..." + +For educational questions: +1. Provide structured, tutorial-style explanations +2. Include practical code examples with proper formatting +3. Highlight best practices and common pitfalls to avoid +4. Break complex topics into digestible sections +5. Use clear section labels and bullet points for readability +6. Mention related topics the user might want to explore next +7. Tailor examples to the user's system when relevant (e.g., use apt for Debian-based systems) + +**Diagnostic Questions (system-specific, troubleshooting)** + +Triggered by questions about: current system state, "why is my...", "what packages...", "check my...", specific errors, system status + +For diagnostic questions: +1. Analyze the provided system context +2. Give specific, actionable answers +3. Be concise but informative +4. If you don't have enough information, say so clearly + +**Output Formatting Rules (CRITICAL - Follow exactly)** + +1. NEVER use markdown headings (# or ##) - they render poorly in terminals +2. For section titles, use **Bold Text** on its own line instead +3. Use bullet points (-) for lists +4. Use numbered lists (1. 2. 3.) for sequential steps +5. Use triple backticks with language name for code blocks (```bash) +6. Use *italic* sparingly for emphasis +7. Keep lines under 100 characters when possible +8. Add blank lines between sections for readability +9. For tables, use simple text formatting, not markdown tables + +Example of good formatting: +**Installation Steps** + +1. Update your package list: +```bash +sudo apt update +``` + +2. Install the package: +```bash +sudo apt install nginx +``` + +**Key Points** +- Point one here +- Point two here""" + + def _call_openai(self, question: str, system_prompt: str) -> str: + response = self.client.chat.completions.create( + model=self.model, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": question}, + ], + temperature=0.3, + max_tokens=MAX_TOKENS, + ) + # Defensive: content may be None or choices could be empty in edge cases try: - data = json.loads(response_text) - except json.JSONDecodeError: - # Try to find JSON object in the text (LLM sometimes adds prose before/after) - json_match = re.search(r'\{[\s\S]*"response_type"[\s\S]*\}', original_text) - if json_match: - try: - # Find the complete JSON object by matching braces - json_str = json_match.group() - # Balance braces to get complete JSON - brace_count = 0 - json_end = 0 - for i, char in enumerate(json_str): - if char == '{': - brace_count += 1 - elif char == '}': - brace_count -= 1 - if brace_count == 0: - json_end = i + 1 - break - - if json_end > 0: - json_str = json_str[:json_end] - - data = json.loads(json_str) - except json.JSONDecodeError: - # If still fails, don't show raw JSON to user - clean_answer = self._clean_llm_response(original_text) - return SystemCommand( - response_type=LLMResponseType.ANSWER, - answer=clean_answer, - reasoning="Could not parse structured response, treating as direct answer" - ) - else: - # No JSON found, clean up before treating as direct answer - clean_answer = self._clean_llm_response(original_text) - return SystemCommand( - response_type=LLMResponseType.ANSWER, - answer=clean_answer, - reasoning="No JSON structure found, treating as direct answer" - ) - + content = response.choices[0].message.content or "" + except (IndexError, AttributeError): + content = "" + return content.strip() + + def _call_claude(self, question: str, system_prompt: str) -> str: + response = self.client.messages.create( + model=self.model, + max_tokens=MAX_TOKENS, + temperature=0.3, + system=system_prompt, + messages=[{"role": "user", "content": question}], + ) + # Defensive: content list or text may be missing/None try: - # Handle do_commands - convert dict list to DoCommand objects - if data.get("response_type") == "do_commands" and "do_commands" in data: - data["do_commands"] = [ - DoCommand(**cmd) if isinstance(cmd, dict) else cmd - for cmd in data["do_commands"] - ] - - return SystemCommand(**data) - except Exception as e: - # If SystemCommand creation fails, don't show raw JSON to user - clean_answer = self._clean_llm_response(original_text) - return SystemCommand( - response_type=LLMResponseType.ANSWER, - answer=clean_answer, - reasoning=f"Failed to create SystemCommand: {e}" - ) + text = getattr(response.content[0], "text", None) or "" + except (IndexError, AttributeError): + text = "" + return text.strip() - def _call_llm(self, system_prompt: str, user_prompt: str) -> str: - """Call the LLM and return the response text.""" - # Check for interrupt before making API call - if self._interrupted: - raise InterruptedError("Operation interrupted by user") - - if self.provider == "openai": - response = self.client.chat.completions.create( - model=self.model, - messages=[ - {"role": "system", "content": system_prompt}, - {"role": "user", "content": user_prompt}, - ], - temperature=0.3, - max_tokens=1000, - ) - try: - content = response.choices[0].message.content or "" - except (IndexError, AttributeError): - content = "" - return content.strip() - - elif self.provider == "claude": - response = self.client.messages.create( - model=self.model, - max_tokens=1000, - temperature=0.3, - system=system_prompt, - messages=[{"role": "user", "content": user_prompt}], - ) - try: - text = getattr(response.content[0], "text", None) or "" - except (IndexError, AttributeError): - text = "" - return text.strip() - - elif self.provider == "ollama": - import urllib.request + def _call_ollama(self, question: str, system_prompt: str) -> str: + import urllib.error + import urllib.request - url = f"{self.ollama_url}/api/generate" - prompt = f"{system_prompt}\n\n{user_prompt}" + url = f"{self.ollama_url}/api/generate" + prompt = f"{system_prompt}\n\nQuestion: {question}" - data = json.dumps({ + data = json.dumps( + { "model": self.model, "prompt": prompt, "stream": False, - "options": {"temperature": 0.3}, - }).encode("utf-8") - - req = urllib.request.Request( - url, data=data, headers={"Content-Type": "application/json"} - ) - - with urllib.request.urlopen(req, timeout=60) as response: - result = json.loads(response.read().decode("utf-8")) - return result.get("response", "").strip() - - elif self.provider == "fake": - # For testing - return a simple answer - fake_response = os.environ.get("CORTEX_FAKE_RESPONSE", "") - if fake_response: - return fake_response - return json.dumps({ - "response_type": "answer", - "answer": "Test mode response", - "reasoning": "Fake provider for testing" - }) - else: - raise ValueError(f"Unsupported provider: {self.provider}") - - def _call_llm_for_do(self, user_request: str, context: dict | None = None) -> dict: - """Call LLM to process a natural language request for the interactive session. - - This is passed to DoHandler as a callback so it can make LLM calls - during the interactive session. - - Args: - user_request: The user's natural language request - context: Optional context dict with executed_commands, session_actions, etc. - - Returns: - Dict with either: - - {"response_type": "do_commands", "do_commands": [...], "reasoning": "..."} - - {"response_type": "answer", "answer": "...", "reasoning": "..."} - - {"response_type": "command", "command": "...", "reasoning": "..."} - """ - context = context or {} - - system_prompt = """You are a Linux system assistant in an interactive session. -The user has just completed some tasks and now wants to do something else. - -SCOPE RESTRICTION: -You can ONLY help with Linux/technical topics. If the user asks about anything unrelated -(social chat, personal advice, general knowledge, etc.), respond with: -{ - "response_type": "answer", - "answer": "I'm Cortex, a Linux system assistant. I can only help with Linux system administration and technical tasks. What would you like me to do on your system?", - "reasoning": "User query is outside my scope" -} - -Based on their request, decide what to do: -1. If they want to EXECUTE commands (install, configure, start, etc.), respond with do_commands -2. If they want INFORMATION (show, explain, how to), respond with an answer -3. If they want to RUN a single read-only command, respond with command - -CRITICAL: Respond with ONLY a JSON object - no other text. - -For executing commands: -{ - "response_type": "do_commands", - "do_commands": [ - {"command": "...", "purpose": "...", "requires_sudo": true/false} - ], - "reasoning": "..." -} - -For providing information: -{ - "response_type": "answer", - "answer": "...", - "reasoning": "..." -} - -For running a read-only command: -{ - "response_type": "command", - "command": "...", - "reasoning": "..." -} -""" - - # Build context-aware prompt - user_prompt = f"Context:\n" - if context.get("original_query"): - user_prompt += f"- Original task: {context['original_query']}\n" - if context.get("executed_commands"): - user_prompt += f"- Commands already executed: {', '.join(context['executed_commands'][:5])}\n" - if context.get("session_actions"): - user_prompt += f"- Actions in this session: {', '.join(context['session_actions'][:3])}\n" - - user_prompt += f"\nUser request: {user_request}\n" - user_prompt += "\nRespond with a JSON object." - - try: - response_text = self._call_llm(system_prompt, user_prompt) - - # Parse the response - parsed = self._parse_llm_response(response_text) - - # Convert to dict - result = { - "response_type": parsed.response_type.value, - "reasoning": parsed.reasoning, + "options": {"temperature": 0.3, "num_predict": MAX_TOKENS}, } - - if parsed.response_type == LLMResponseType.DO_COMMANDS and parsed.do_commands: - result["do_commands"] = [ - {"command": cmd.command, "purpose": cmd.purpose, "requires_sudo": cmd.requires_sudo} - for cmd in parsed.do_commands - ] - elif parsed.response_type == LLMResponseType.COMMAND and parsed.command: - result["command"] = parsed.command - elif parsed.response_type == LLMResponseType.ANSWER and parsed.answer: - result["answer"] = parsed.answer - - return result - - except Exception as e: - return { - "response_type": "error", - "error": str(e), - } - - def ask(self, question: str) -> str: + ).encode("utf-8") + + req = urllib.request.Request(url, data=data, headers={"Content-Type": "application/json"}) + + with urllib.request.urlopen(req, timeout=60) as response: + result = json.loads(response.read().decode("utf-8")) + return result.get("response", "").strip() + + def _call_fake(self, question: str, system_prompt: str) -> str: + """Return predefined fake response for testing.""" + fake_response = os.environ.get("CORTEX_FAKE_RESPONSE", "") + if fake_response: + return fake_response + # Default fake responses for common questions + q_lower = question.lower() + if "python" in q_lower and "version" in q_lower: + return f"You have Python {platform.python_version()} installed." + return "I cannot answer that question in test mode." + + def ask(self, question: str, system_prompt: str | None = None) -> str: """Ask a natural language question about the system. - Uses an agentic loop to execute read-only commands and gather information - to answer the user's question. - - In --do mode, can also execute write/modify commands with user confirmation. - Args: question: Natural language question + system_prompt: Optional override for the system prompt Returns: Human-readable answer string Raises: ValueError: If question is empty - RuntimeError: If LLM API call fails + RuntimeError: If offline and no cached response exists """ if not question or not question.strip(): raise ValueError("Question cannot be empty") question = question.strip() - system_prompt = self._get_system_prompt() - - # Don't cache in do_mode (each run is unique) - cache_key = f"ask:v2:{question}" - if self.cache is not None and not self.do_mode: + + # Use provided system prompt or generate default + if system_prompt is None: + context = self.info_gatherer.gather_context() + system_prompt = self._get_system_prompt(context) + + # Cache lookup uses both question and system context (via system_prompt) for system-specific answers + cache_key = f"ask:{question}" + + # Try cache first + if self.cache is not None: cached = self.cache.get_commands( prompt=cache_key, provider=self.provider, @@ -1429,394 +607,58 @@ def ask(self, question: str) -> str: system_prompt=system_prompt, ) if cached is not None and len(cached) > 0: + # Track topic access even for cached responses + self.learning_tracker.record_topic(question) return cached[0] - # Agentic loop - history: list[dict[str, Any]] = [] - tried_commands: list[str] = [] - max_iterations = self.MAX_DO_ITERATIONS if self.do_mode else self.MAX_ITERATIONS - - if self.debug: - mode_str = "[DO MODE]" if self.do_mode else "" - self._debug_print("Ask Query", f"{mode_str} Question: {question}", style="cyan") - - # Import console for progress output - from rich.console import Console - loop_console = Console() - - for iteration in range(max_iterations): - # Check for interrupt at start of each iteration - if self._interrupted: - self._interrupted = False # Reset for next request - return "Operation interrupted by user." - - if self.debug: - self._debug_print( - f"Iteration {iteration + 1}/{max_iterations}", - f"Calling LLM ({self.provider}/{self.model})...", - style="blue" - ) - - # Show progress to user (even without --debug) - if self.do_mode and iteration > 0: - from rich.panel import Panel - loop_console.print() - loop_console.print(Panel( - f"[bold cyan]Analyzing results...[/bold cyan] [dim]Step {iteration + 1}[/dim]", - border_style="dim cyan", - padding=(0, 1), - expand=False, - )) - - # Build prompt with history - user_prompt = self._build_iteration_prompt(question, history) - - # Call LLM + # Call LLM + try: + if self.provider == "openai": + answer = self._call_openai(question, system_prompt) + elif self.provider == "claude": + answer = self._call_claude(question, system_prompt) + elif self.provider == "ollama": + answer = self._call_ollama(question, system_prompt) + elif self.provider == "fake": + answer = self._call_fake(question, system_prompt) + else: + raise ValueError(f"Unsupported provider: {self.provider}") + except Exception as e: + raise RuntimeError(f"LLM API call failed: {str(e)}") + + # Cache the response silently + if self.cache is not None and answer: try: - response_text = self._call_llm(system_prompt, user_prompt) - # Check for interrupt after LLM call - if self._interrupted: - self._interrupted = False - return "Operation interrupted by user." - except InterruptedError: - # Explicitly interrupted - self._interrupted = False - return "Operation interrupted by user." - except Exception as e: - if self._interrupted: - self._interrupted = False - return "Operation interrupted by user." - raise RuntimeError(f"LLM API call failed: {str(e)}") - - if self.debug: - self._debug_print("LLM Raw Response", response_text[:500] + ("..." if len(response_text) > 500 else ""), style="dim") - - # Parse response - parsed = self._parse_llm_response(response_text) - - if self.debug: - self._debug_print( - "LLM Parsed Response", - f"Type: {parsed.response_type.value}\n" - f"Reasoning: {parsed.reasoning}\n" - f"Command: {parsed.command or 'N/A'}\n" - f"Do Commands: {len(parsed.do_commands) if parsed.do_commands else 0}\n" - f"Answer: {(parsed.answer[:100] + '...') if parsed.answer and len(parsed.answer) > 100 else parsed.answer or 'N/A'}", - style="yellow" + self.cache.put_commands( + prompt=cache_key, + provider=self.provider, + model=self.model, + system_prompt=system_prompt, + commands=[answer], ) - - # Show what the LLM decided to do - if self.do_mode and not self.debug: - from rich.panel import Panel - if parsed.response_type == LLMResponseType.COMMAND and parsed.command: - loop_console.print(Panel( - f"[bold]🔍 Gathering info[/bold]\n[cyan]{parsed.command}[/cyan]", - border_style="blue", - padding=(0, 1), - expand=False, - )) - elif parsed.response_type == LLMResponseType.DO_COMMANDS and parsed.do_commands: - loop_console.print(Panel( - f"[bold green]📋 Ready to execute[/bold green] [white]{len(parsed.do_commands)} command(s)[/white]", - border_style="green", - padding=(0, 1), - expand=False, - )) - elif parsed.response_type == LLMResponseType.ANSWER and parsed.answer: - pass # Will be handled below - else: - # LLM returned an unexpected or empty response - loop_console.print(f"[dim yellow]⏳ Waiting for LLM to propose commands...[/dim yellow]") - - # If LLM provides a final answer, return it - if parsed.response_type == LLMResponseType.ANSWER: - answer = parsed.answer or "" - - # Skip empty answers (parsing fallback that should continue loop) - if not answer.strip(): - if self.do_mode: - loop_console.print(f"[dim] (waiting for LLM to propose commands...)[/dim]") - continue - - if self.debug: - self._debug_print("Final Answer", answer, style="green") - - # Cache the response (not in do_mode) - if self.cache is not None and answer and not self.do_mode: - try: - self.cache.put_commands( - prompt=cache_key, - provider=self.provider, - model=self.model, - system_prompt=system_prompt, - commands=[answer], - ) - except (OSError, sqlite3.Error): - pass - - # Print condensed summary for questions - self._print_query_summary(question, tried_commands, answer) - - return answer - - # Handle do_commands in --do mode - if parsed.response_type == LLMResponseType.DO_COMMANDS and self.do_mode: - if not parsed.do_commands: - # LLM said do_commands but provided none - ask it to try again - loop_console.print(f"[yellow]⚠ LLM response incomplete, retrying...[/yellow]") - history.append({ - "type": "error", - "message": "Response contained no commands. Please provide specific commands to execute.", - }) - continue - - result = self._handle_do_commands(parsed, question, history) - if result is not None: - # Result is either a completion message or None (continue loop) - return result - - # LLM wants to execute a read-only command - if parsed.command: - command = parsed.command - tried_commands.append(command) - - if self.debug: - self._debug_print("Executing Command", f"$ {command}", style="magenta") - - # Validate and execute the command - success, stdout, stderr = CommandValidator.execute_command(command) - - # Show execution result to user with expandable output - if self.do_mode and not self.debug: - if success: - output_lines = len(stdout.split('\n')) if stdout else 0 - loop_console.print(f"[green] ✓ Got {output_lines} lines of output[/green]") - - # Show expandable output - if stdout and output_lines > 0: - self._show_expandable_output(loop_console, stdout, command) - else: - loop_console.print(f"[yellow] ⚠ Command failed: {stderr[:100]}[/yellow]") - - if self.debug: - if success: - output_preview = stdout[:1000] + ("..." if len(stdout) > 1000 else "") if stdout else "(empty output)" - self._debug_print("Command Output (SUCCESS)", output_preview, style="green") - else: - self._debug_print("Command Output (FAILED)", f"Error: {stderr}", style="red") - - history.append({ - "command": command, - "success": success, - "output": stdout if success else "", - "error": stderr if not success else "", - }) - continue # Continue to next iteration with new info - - # If we get here, no valid action was taken - # This means LLM returned something we couldn't use - if self.do_mode and not self.debug: - if parsed.reasoning: - # Show reasoning if available - loop_console.print(f"[dim] LLM: {parsed.reasoning[:100]}{'...' if len(parsed.reasoning) > 100 else ''}[/dim]") - - # Max iterations reached - if self.do_mode: - if tried_commands: - commands_list = "\n".join(f" - {cmd}" for cmd in tried_commands) - result = f"The LLM gathered information but didn't propose any commands to execute.\n\nInfo gathered with:\n{commands_list}\n\nTry being more specific about what you want to do." - else: - result = "The LLM couldn't determine what commands to run. Try rephrasing your request with more specific details." - - loop_console.print(f"[yellow]⚠ {result}[/yellow]") - else: - commands_list = "\n".join(f" - {cmd}" for cmd in tried_commands) - result = f"Could not find an answer after {max_iterations} attempts.\n\nTried commands:\n{commands_list}" - - if self.debug: - self._debug_print("Max Iterations Reached", result, style="red") - - return result - - def _handle_do_commands( - self, - parsed: SystemCommand, - question: str, - history: list[dict[str, Any]] - ) -> str | None: - """Handle do_commands response type - execute with user confirmation. - - Uses task tree execution for advanced auto-repair capabilities: - - Spawns repair sub-tasks when commands fail - - Requests additional permissions during execution - - Monitors terminals during manual intervention - - Provides detailed failure reasoning - + except (OSError, sqlite3.Error): + pass # Silently fail cache writes + + # Track educational topics for learning history + self.learning_tracker.record_topic(question) + + return answer + + def get_learning_history(self) -> dict[str, Any]: + """Get the user's learning history. + Returns: - Result string if completed, None if should continue loop, - or "USER_DECLINED:..." if user declined. + Dictionary with topics explored and statistics """ - if not self._do_handler or not parsed.do_commands: - return None - - from rich.console import Console - console = Console() - - # Prepare commands for analysis - commands = [ - (cmd.command, cmd.purpose) for cmd in parsed.do_commands - ] - - # Analyze for protected paths - analyzed = self._do_handler.analyze_commands_for_protected_paths(commands) - - # Show reasoning - console.print() - console.print(f"[bold cyan]🤖 Cortex Analysis:[/bold cyan] {parsed.reasoning}") - console.print() - - # Show task tree preview - console.print("[dim]📋 Planned tasks:[/dim]") - for i, (cmd, purpose, protected) in enumerate(analyzed, 1): - protected_note = f" [yellow](protected: {', '.join(protected)})[/yellow]" if protected else "" - console.print(f"[dim] {i}. {cmd[:60]}...{protected_note}[/dim]") - console.print() - - # Request user confirmation - if self._do_handler.request_user_confirmation(analyzed): - # User approved - execute using task tree for better error handling - run = self._do_handler.execute_with_task_tree(analyzed, question) - - # Add execution results to history - for cmd_log in run.commands: - history.append({ - "command": cmd_log.command, - "success": cmd_log.status.value == "success", - "output": cmd_log.output, - "error": cmd_log.error, - "purpose": cmd_log.purpose, - "executed_by": "cortex" if "Manual execution" not in (cmd_log.purpose or "") else "user_manual", - }) - - # Check if any commands were completed manually during execution - manual_completed = self._do_handler.get_completed_manual_commands() - if manual_completed: - history.append({ - "type": "commands_completed_manually", - "commands": manual_completed, - "message": f"User manually executed these commands successfully: {', '.join(manual_completed)}. Do NOT re-propose them.", - }) - - # Check if there were failures that need LLM input - failures = [c for c in run.commands if c.status.value == "failed"] - if failures: - # Add failure context to history for LLM to help with - failure_summary = [] - for f in failures: - failure_summary.append({ - "command": f.command, - "error": f.error[:500] if f.error else "Unknown error", - "purpose": f.purpose, - }) - - history.append({ - "type": "execution_failures", - "failures": failure_summary, - "message": f"{len(failures)} command(s) failed during execution. Please analyze and suggest fixes.", - }) - - # Continue loop so LLM can suggest next steps - return None - - # All commands succeeded (automatically or manually) - successes = [c for c in run.commands if c.status.value == "success"] - if successes and not failures: - # Everything worked - return success message - summary = run.summary or f"Successfully executed {len(successes)} command(s)" - return f"✅ {summary}" - - # Return summary for now - LLM will provide final answer in next iteration - return None - else: - # User declined automatic execution - provide manual instructions with monitoring - run = self._do_handler.provide_manual_instructions(analyzed, question) - - # Check if any commands were completed manually - manual_completed = self._do_handler.get_completed_manual_commands() - - # Check success/failure status from the run - from cortex.do_runner.models import CommandStatus - successful_count = sum(1 for c in run.commands if c.status == CommandStatus.SUCCESS) - failed_count = sum(1 for c in run.commands if c.status == CommandStatus.FAILED) - total_expected = len(analyzed) - - if manual_completed and successful_count > 0: - # Commands were completed successfully - go to end - history.append({ - "type": "commands_completed_manually", - "commands": manual_completed, - "message": f"User manually executed {successful_count} commands successfully.", - }) - return f"✅ Commands completed manually. {successful_count} succeeded." - - # Commands were NOT all successful - ask user what they want to do - console.print() - from rich.panel import Panel - from rich.prompt import Prompt - - status_msg = [] - if successful_count > 0: - status_msg.append(f"[green]✓ {successful_count} succeeded[/green]") - if failed_count > 0: - status_msg.append(f"[red]✗ {failed_count} failed[/red]") - remaining = total_expected - successful_count - failed_count - if remaining > 0: - status_msg.append(f"[yellow]○ {remaining} not executed[/yellow]") - - console.print(Panel( - " | ".join(status_msg) if status_msg else "[yellow]No commands were executed[/yellow]", - title="[bold] Manual Intervention Result [/bold]", - border_style="yellow", - padding=(0, 1), - )) - - console.print() - console.print("[bold]What would you like to do?[/bold]") - console.print("[dim] • Type your request to retry or modify the approach[/dim]") - console.print("[dim] • Say 'done', 'no', or 'skip' to finish without retrying[/dim]") - console.print() - - try: - user_response = Prompt.ask("[cyan]Your response[/cyan]").strip() - except (EOFError, KeyboardInterrupt): - user_response = "done" - - # Check if user wants to end - end_keywords = ["done", "no", "skip", "exit", "quit", "stop", "cancel", "n", "finish", "end"] - if user_response.lower() in end_keywords or not user_response: - # User doesn't want to retry - go to end - history.append({ - "type": "manual_intervention_ended", - "message": f"User ended manual intervention. {successful_count} commands succeeded.", - }) - if successful_count > 0: - return f"✅ Session ended. {successful_count} command(s) completed successfully." - else: - return f"Session ended. No commands were executed." - - # User wants to retry or modify - add their input to history - history.append({ - "type": "manual_intervention_feedback", - "user_input": user_response, - "previous_commands": [(cmd, purpose, []) for cmd, purpose, _ in analyzed], - "successful_count": successful_count, - "failed_count": failed_count, - "message": f"User requested: {user_response}. Previous attempt had {successful_count} successes and {failed_count} failures.", - }) - - console.print() - console.print(f"[cyan]🔄 Processing your request: {user_response[:50]}{'...' if len(user_response) > 50 else ''}[/cyan]") - - # Continue the loop with user's new input as additional context - # The LLM will see the history and the user's feedback - return None + return self.learning_tracker.get_history() + + def get_recent_topics(self, limit: int = 5) -> list[str]: + """Get recently explored educational topics. + + Args: + limit: Maximum number of topics to return + + Returns: + List of topic strings + """ + return self.learning_tracker.get_recent_topics(limit) diff --git a/cortex/cli.py b/cortex/cli.py index eac50e186..4c197fc15 100644 --- a/cortex/cli.py +++ b/cortex/cli.py @@ -1,42 +1,174 @@ import argparse +import json import logging import os -import subprocess +import select import sys import time -from datetime import datetime -from typing import Any - +import uuid +from collections.abc import Callable +from datetime import datetime, timezone +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from rich.console import Console +from rich.markdown import Markdown +from rich.panel import Panel +from rich.table import Table + +from cortex.api_key_detector import auto_detect_api_key, setup_api_key from cortex.ask import AskHandler from cortex.branding import VERSION, console, cx_header, cx_print, show_banner from cortex.coordinator import InstallationCoordinator, InstallationStep, StepStatus from cortex.demo import run_demo -from cortex.dependency_importer import ( - DependencyImporter, - PackageEcosystem, - ParseResult, - format_package_list, -) +from cortex.dependency_importer import DependencyImporter, PackageEcosystem, ParseResult from cortex.env_manager import EnvironmentManager, get_env_manager +from cortex.i18n import SUPPORTED_LANGUAGES, LanguageConfig, get_language, set_language, t from cortex.installation_history import InstallationHistory, InstallationStatus, InstallationType from cortex.llm.interpreter import CommandInterpreter from cortex.network_config import NetworkConfig from cortex.notification_manager import NotificationManager +from cortex.predictive_prevention import FailurePrediction, PredictiveErrorManager, RiskLevel +from cortex.role_manager import RoleManager from cortex.stack_manager import StackManager +from cortex.stdin_handler import StdinHandler +from cortex.uninstall_impact import ( + ImpactResult, + ImpactSeverity, + ServiceStatus, + UninstallImpactAnalyzer, +) +from cortex.update_checker import UpdateChannel, should_notify_update +from cortex.updater import Updater, UpdateStatus from cortex.validators import validate_api_key, validate_install_request +from cortex.version_manager import get_version_string + +# CLI Help Constants +HELP_SKIP_CONFIRM = "Skip confirmation prompt" + +if TYPE_CHECKING: + from cortex.daemon_client import DaemonClient, DaemonResponse + from cortex.shell_env_analyzer import ShellEnvironmentAnalyzer # Suppress noisy log messages in normal operation logging.getLogger("httpx").setLevel(logging.WARNING) logging.getLogger("cortex.installation_history").setLevel(logging.ERROR) + sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) class CortexCLI: + RISK_COLORS = { + RiskLevel.NONE: "green", + RiskLevel.LOW: "green", + RiskLevel.MEDIUM: "yellow", + RiskLevel.HIGH: "orange1", + RiskLevel.CRITICAL: "red", + } + # Installation messages + INSTALL_FAIL_MSG = "Installation failed" + def __init__(self, verbose: bool = False): self.spinner_chars = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"] self.spinner_idx = 0 self.verbose = verbose + self.predict_manager = None + + @property + def risk_labels(self) -> dict[RiskLevel, str]: + """ + Localized mapping from RiskLevel enum values to human-readable strings. + + Returns a dictionary mapping each tier (RiskLevel.NONE to CRITICAL) + to its corresponding localized label via the t() translation helper. + """ + return { + RiskLevel.NONE: t("predictive.no_risk"), + RiskLevel.LOW: t("predictive.low_risk"), + RiskLevel.MEDIUM: t("predictive.medium_risk"), + RiskLevel.HIGH: t("predictive.high_risk"), + RiskLevel.CRITICAL: t("predictive.critical_risk"), + } + + # Define a method to handle Docker-specific permission repairs + def docker_permissions(self, args: argparse.Namespace) -> int: + """Handle the diagnosis and repair of Docker file permissions. + + This method coordinates the environment-aware scanning of the project + directory and applies ownership reclamation logic. It ensures that + administrative actions (sudo) are never performed without user + acknowledgment unless the non-interactive flag is present. + + Args: + args: The parsed command-line arguments containing the execution + context and safety flags. + + Returns: + int: 0 if successful or the operation was gracefully cancelled, + 1 if a system or logic error occurred. + """ + from cortex.permission_manager import PermissionManager + + try: + manager = PermissionManager(os.getcwd()) + cx_print("🔍 Scanning for Docker-related permission issues...", "info") + + # Validate Docker Compose configurations for missing user mappings + # to help prevent future permission drift. + manager.check_compose_config() + + # Retrieve execution context from argparse. + execute_flag = getattr(args, "execute", False) + yes_flag = getattr(args, "yes", False) + + # SAFETY GUARD: If executing repairs, prompt for confirmation unless + # the --yes flag was provided. This follows the project safety + # standard: 'No silent sudo execution'. + if execute_flag and not yes_flag: + mismatches = manager.diagnose() + if mismatches: + cx_print( + f"⚠️ Found {len(mismatches)} paths requiring ownership reclamation.", + "warning", + ) + try: + # Interactive confirmation prompt for administrative repair. + console.print( + "[bold cyan]Reclaim ownership using sudo? (y/n): [/bold cyan]", end="" + ) + response = StdinHandler.get_input() + if response.lower() not in ("y", "yes"): + cx_print("Operation cancelled", "info") + return 0 + except (EOFError, KeyboardInterrupt): + # Graceful handling of terminal exit or manual interruption. + console.print() + cx_print("Operation cancelled", "info") + return 0 + + # Delegate repair logic to PermissionManager. If execute is False, + # a dry-run report is generated. If True, repairs are batched to + # avoid system ARG_MAX shell limits. + if manager.fix_permissions(execute=execute_flag): + if execute_flag: + cx_print("✨ Permissions fixed successfully!", "success") + return 0 + + return 1 + + except (PermissionError, FileNotFoundError, OSError) as e: + # Handle system-level access issues or missing project files. + cx_print(f"❌ Permission check failed: {e}", "error") + return 1 + except NotImplementedError as e: + # Report environment incompatibility (e.g., native Windows). + cx_print(f"❌ {e}", "error") + return 1 + except Exception as e: + # Safety net for unexpected runtime exceptions to prevent CLI crashes. + cx_print(f"❌ Unexpected error: {e}", "error") + return 1 def _debug(self, message: str): """Print debug info only in verbose mode""" @@ -44,37 +176,50 @@ def _debug(self, message: str): console.print(f"[dim][DEBUG] {message}[/dim]") def _get_api_key(self) -> str | None: - # Check if using Ollama or Fake provider (no API key needed) - provider = self._get_provider() - if provider == "ollama": - self._debug("Using Ollama (no API key required)") - return "ollama-local" # Placeholder for Ollama - if provider == "fake": + # 1. Check explicit provider override first (fake/ollama need no key) + explicit_provider = os.environ.get("CORTEX_PROVIDER", "").lower() + if explicit_provider == "fake": self._debug("Using Fake provider for testing") - return "fake-key" # Placeholder for Fake provider + return "fake-key" + if explicit_provider == "ollama": + self._debug("Using Ollama (no API key required)") + return "ollama-local" - is_valid, detected_provider, error = validate_api_key() - if not is_valid: - self._print_error(error) - cx_print("Run [bold]cortex wizard[/bold] to configure your API key.", "info") - cx_print("Or use [bold]CORTEX_PROVIDER=ollama[/bold] for offline mode.", "info") - return None - api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") - return api_key + # 2. Try auto-detection + prompt to save (setup_api_key handles both) + success, key, detected_provider = setup_api_key() + if success: + self._debug(f"Using {detected_provider} API key") + # Store detected provider so _get_provider can use it + self._detected_provider = detected_provider + return key + + # Still no key + self._print_error(t("api_key.not_found")) + cx_print(t("api_key.configure_prompt"), "info") + cx_print(t("api_key.ollama_hint"), "info") + return None def _get_provider(self) -> str: - # Check environment variable for explicit provider choice + # 1. Check explicit provider override FIRST (highest priority) explicit_provider = os.environ.get("CORTEX_PROVIDER", "").lower() if explicit_provider in ["ollama", "openai", "claude", "fake"]: + self._debug(f"Using explicit CORTEX_PROVIDER={explicit_provider}") return explicit_provider - # Auto-detect based on available API keys + # 2. Use provider from auto-detection (set by _get_api_key) + detected = getattr(self, "_detected_provider", None) + if detected == "anthropic": + return "claude" + elif detected == "openai": + return "openai" + + # 3. Check env vars (may have been set by auto-detect) if os.environ.get("ANTHROPIC_API_KEY"): return "claude" elif os.environ.get("OPENAI_API_KEY"): return "openai" - # Fallback to Ollama for offline mode + # 4. Fallback to Ollama for offline mode return "ollama" def _print_status(self, emoji: str, message: str): @@ -89,7 +234,7 @@ def _print_status(self, emoji: str, message: str): cx_print(message, status) def _print_error(self, message: str): - cx_print(f"Error: {message}", "error") + cx_print(f"{t('ui.error_prefix')}: {message}", "error") def _print_success(self, message: str): cx_print(message, "success") @@ -174,6 +319,169 @@ def notify(self, args): return 1 # ------------------------------- + + def _ask_ai_and_render(self, question: str) -> int: + """Invoke AI with question and render response as Markdown.""" + api_key = self._get_api_key() + if not api_key: + self._print_error("No API key found. Please configure an API provider.") + return 1 + + provider = self._get_provider() + try: + handler = AskHandler(api_key=api_key, provider=provider) + answer = handler.ask(question) + console.print(Markdown(answer)) + return 0 + except ImportError as e: + self._print_error(str(e)) + cx_print("Install required SDK or use CORTEX_PROVIDER=ollama", "info") + return 1 + except (ValueError, RuntimeError) as e: + self._print_error(str(e)) + return 1 + + def role(self, args: argparse.Namespace) -> int: + """ + Handles system role detection and manual configuration via AI context sensing. + + This method supports two subcommands: + - 'detect': Analyzes the system and suggests appropriate roles based on + installed binaries, hardware, and activity patterns. + - 'set': Manually assigns a role slug and provides tailored package recommendations. + + Args: + args: The parsed command-line arguments containing the role_action + and optional role_slug. + + Returns: + int: Exit code - 0 on success, 1 on error. + """ + manager = RoleManager() + action = getattr(args, "role_action", None) + + # Step 1: Ensure a subcommand is provided to maintain a valid return state. + if not action: + self._print_error("Please specify a subcommand (detect/set)") + return 1 + + if action == "detect": + # Retrieve environmental facts including active persona and installation history. + context = manager.get_system_context() + + # Step 2: Extract the most recent patterns for AI analysis. + # Python handles list slicing gracefully even if the list has fewer than 10 items. + patterns = context.get("patterns", []) + limited_patterns = patterns[-10:] + patterns_str = ( + "\n".join([f" • {p}" for p in limited_patterns]) or " • No patterns sensed" + ) + + signals_str = ", ".join(context.get("binaries", [])) or "none detected" + gpu_status = ( + "GPU Acceleration available" if context.get("has_gpu") else "Standard CPU only" + ) + + # Generate a unique timestamp for cache-busting and session tracking. + timestamp = datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M") + + # Construct the architectural analysis prompt for the LLM. + question = ( + f"### SYSTEM ARCHITECT ANALYSIS [TIME: {timestamp}] ###\n" + f"ENVIRONMENTAL CONTEXT:\n" + f"- CURRENTLY SET ROLE: {context.get('active_role')}\n" + f"- Detected Binaries: [{signals_str}]\n" + f"- Hardware Acceleration: {gpu_status}\n" + f"- Installation History: {'Present' if context.get('has_install_history') else 'None'}\n\n" + f"OPERATIONAL_HISTORY (Technical Intents & Installed Packages):\n{patterns_str}\n\n" + f"TASK: Acting as a Senior Systems Architect, analyze the existing role and signals. " + f"Suggest 3-5 professional roles that complement the system.\n\n" + f"--- STRICT RESPONSE FORMAT ---\n" + f"YOUR RESPONSE MUST START WITH THE NUMBER '1.' AND CONTAIN ONLY THE LIST. " + f"DO NOT PROVIDE INTRODUCTIONS. DO NOT PROVIDE REASONING. DO NOT PROVIDE A SUMMARY. " + f"FAILURE TO COMPLY WILL BREAK THE CLI PARSER.\n\n" + f"Detected roles:\n" + f"1." + ) + + cx_print("🧠 AI is sensing system context and activity patterns...", "thinking") + if self._ask_ai_and_render(question) != 0: + return 1 + console.print() + + # Record the detection event in the installation history database for audit purposes. + history = InstallationHistory() + history.record_installation( + InstallationType.CONFIG, + ["system-detection"], + ["cortex role detect"], + datetime.now(timezone.utc), + ) + + console.print( + "\n[dim italic]💡 To install any recommended packages, simply run:[/dim italic]" + ) + console.print("[bold cyan] cortex install [/bold cyan]\n") + return 0 + + elif action == "set": + if not args.role_slug: + self._print_error("Role slug is required for 'set' command.") + return 1 + + role_slug = args.role_slug + + # Step 3: Persist the role and handle both validation and persistence errors. + try: + manager.save_role(role_slug) + history = InstallationHistory() + history.record_installation( + InstallationType.CONFIG, + [role_slug], + [f"cortex role set {role_slug}"], + datetime.now(timezone.utc), + ) + except ValueError as e: + self._print_error(f"Invalid role slug: {e}") + return 1 + except RuntimeError as e: + self._print_error(f"Failed to persist role: {e}") + return 1 + + cx_print(f"✓ Role set to: [bold cyan]{role_slug}[/bold cyan]", "success") + + context = manager.get_system_context() + # Generate a unique request ID for cache-busting and tracking purposes. + req_id = f"{datetime.now().strftime('%H:%M:%S.%f')}-{uuid.uuid4().hex[:4]}" + + cx_print(f"🔍 Fetching tailored AI recommendations for {role_slug}...", "info") + + # Construct the recommendation prompt for the LLM. + rec_question = ( + f"### ARCHITECTURAL ADVISORY [ID: {req_id}] ###\n" + f"NEW_TARGET_PERSONA: {role_slug}\n" + f"OS: {sys.platform} | GPU: {'Enabled' if context.get('has_gpu') else 'None'}\n\n" + f"TASK: Generate 3-5 unique packages for '{role_slug}' ONLY.\n" + f"--- PREFERRED RESPONSE FORMAT ---\n" + f"Please start with '1.' and provide only the list of roles. " + f"Omit introductions, reasoning, and summaries.\n\n" + f"💡 Recommended packages for {role_slug}:\n" + f" - " + ) + + if self._ask_ai_and_render(rec_question) != 0: + return 1 + + console.print( + "\n[dim italic]💡 Ready to upgrade? Install any of these using:[/dim italic]" + ) + console.print("[bold cyan] cortex install [/bold cyan]\n") + return 0 + + else: + self._print_error("Unknown role command") + return 1 + def demo(self): """ Run the one-command investor demo @@ -213,21 +521,21 @@ def stack(self, args: argparse.Namespace) -> int: def _handle_stack_list(self, manager: StackManager) -> int: """List all available stacks.""" stacks = manager.list_stacks() - cx_print("\n📦 Available Stacks:\n", "info") + cx_print(f"\n📦 {t('stack.available')}:\n", "info") for stack in stacks: pkg_count = len(stack.get("packages", [])) console.print(f" [green]{stack.get('id', 'unknown')}[/green]") - console.print(f" {stack.get('name', 'Unnamed Stack')}") - console.print(f" {stack.get('description', 'No description')}") + console.print(f" {stack.get('name', t('stack.unnamed'))}") + console.print(f" {stack.get('description', t('stack.no_description'))}") console.print(f" [dim]({pkg_count} packages)[/dim]\n") - cx_print("Use: cortex stack to install a stack", "info") + cx_print(t("stack.use_command"), "info") return 0 def _handle_stack_describe(self, manager: StackManager, stack_id: str) -> int: """Describe a specific stack.""" stack = manager.find_stack(stack_id) if not stack: - self._print_error(f"Stack '{stack_id}' not found. Use --list to see available stacks.") + self._print_error(t("stack.not_found", name=stack_id)) return 1 description = manager.describe_stack(stack_id) console.print(description) @@ -240,20 +548,18 @@ def _handle_stack_install(self, manager: StackManager, args: argparse.Namespace) if suggested_name != original_name: cx_print( - f"💡 No GPU detected, using '{suggested_name}' instead of '{original_name}'", + f"💡 {t('stack.gpu_fallback', original=original_name, suggested=suggested_name)}", "info", ) stack = manager.find_stack(suggested_name) if not stack: - self._print_error( - f"Stack '{suggested_name}' not found. Use --list to see available stacks." - ) + self._print_error(t("stack.not_found", name=suggested_name)) return 1 packages = stack.get("packages", []) if not packages: - self._print_error(f"Stack '{suggested_name}' has no packages configured.") + self._print_error(t("stack.no_packages", name=suggested_name)) return 1 if args.dry_run: @@ -263,28 +569,28 @@ def _handle_stack_install(self, manager: StackManager, args: argparse.Namespace) def _handle_stack_dry_run(self, stack: dict[str, Any], packages: list[str]) -> int: """Preview packages that would be installed without executing.""" - cx_print(f"\n📋 Stack: {stack['name']}", "info") - console.print("\nPackages that would be installed:") + cx_print(f"\n📋 {t('stack.installing', name=stack['name'])}", "info") + console.print(f"\n{t('stack.dry_run_preview')}:") for pkg in packages: console.print(f" • {pkg}") - console.print(f"\nTotal: {len(packages)} packages") - cx_print("\nDry run only - no commands executed", "warning") + console.print(f"\n{t('stack.packages_total', count=len(packages))}") + cx_print(f"\n{t('stack.dry_run_note')}", "warning") return 0 def _handle_stack_real_install(self, stack: dict[str, Any], packages: list[str]) -> int: """Install all packages in the stack.""" - cx_print(f"\n🚀 Installing stack: {stack['name']}\n", "success") + cx_print(f"\n🚀 {t('stack.installing', name=stack['name'])}\n", "success") # Batch into a single LLM request packages_str = " ".join(packages) result = self.install(software=packages_str, execute=True, dry_run=False) if result != 0: - self._print_error(f"Failed to install stack '{stack['name']}'") + self._print_error(t("stack.failed", name=stack["name"])) return 1 - self._print_success(f"\n✅ Stack '{stack['name']}' installed successfully!") - console.print(f"Installed {len(packages)} packages") + self._print_success(f"\n✅ {t('stack.installed', name=stack['name'])}") + console.print(t("stack.packages_installed", count=len(packages))) return 0 # --- Sandbox Commands (Docker-based package testing) --- @@ -295,23 +601,22 @@ def sandbox(self, args: argparse.Namespace) -> int: DockerSandbox, SandboxAlreadyExistsError, SandboxNotFoundError, - SandboxTestStatus, ) action = getattr(args, "sandbox_action", None) if not action: - cx_print("\n🐳 Docker Sandbox - Test packages safely before installing\n", "info") - console.print("Usage: cortex sandbox [options]") - console.print("\nCommands:") - console.print(" create Create a sandbox environment") - console.print(" install Install package in sandbox") - console.print(" test [package] Run tests in sandbox") - console.print(" promote Install tested package on main system") - console.print(" cleanup Remove sandbox environment") - console.print(" list List all sandboxes") - console.print(" exec Execute command in sandbox") - console.print("\nExample workflow:") + cx_print(f"\n🐳 {t('sandbox.header')}\n", "info") + console.print(t("sandbox.usage")) + console.print(f"\n{t('sandbox.commands_header')}:") + console.print(f" create {t('sandbox.cmd_create')}") + console.print(f" install {t('sandbox.cmd_install')}") + console.print(f" test [package] {t('sandbox.cmd_test')}") + console.print(f" promote {t('sandbox.cmd_promote')}") + console.print(f" cleanup {t('sandbox.cmd_cleanup')}") + console.print(f" list {t('sandbox.cmd_list')}") + console.print(f" exec {t('sandbox.cmd_exec')}") + console.print(f"\n{t('sandbox.example_workflow')}:") console.print(" cortex sandbox create test-env") console.print(" cortex sandbox install test-env nginx") console.print(" cortex sandbox test test-env") @@ -434,8 +739,8 @@ def _sandbox_promote(self, sandbox, args: argparse.Namespace) -> int: if not skip_confirm: console.print(f"\nPromote '{package}' to main system? [Y/n]: ", end="") try: - response = input().strip().lower() - if response and response not in ("y", "yes"): + response = StdinHandler.get_input() + if response and response.lower() not in ("y", "yes"): cx_print("Promotion cancelled", "warning") return 0 except (EOFError, KeyboardInterrupt): @@ -494,7 +799,7 @@ def _sandbox_list(self, sandbox) -> int: def _sandbox_exec(self, sandbox, args: argparse.Namespace) -> int: """Execute command in sandbox.""" name = args.name - command = args.command + command = args.cmd result = sandbox.exec_command(name, command) @@ -505,13 +810,54 @@ def _sandbox_exec(self, sandbox, args: argparse.Namespace) -> int: return result.exit_code + def _display_prediction_warning(self, prediction: FailurePrediction) -> None: + """Display formatted prediction warning.""" + color = self.RISK_COLORS.get(prediction.risk_level, "white") + label = self.risk_labels.get(prediction.risk_level, "Unknown") + + console.print() + if prediction.risk_level >= RiskLevel.HIGH: + console.print(f"⚠️ [bold red]{t('predictive.risks_detected')}:[/bold red]") + else: + console.print(f"ℹ️ [bold {color}]{t('predictive.risks_detected')}:[/bold {color}]") + + if prediction.reasons: + console.print(f"\n[bold]{label}:[/bold]") + for reason in prediction.reasons: + console.print(f" - {reason}") + + if prediction.recommendations: + console.print(f"\n[bold]{t('predictive.recommendation')}:[/bold]") + for i, rec in enumerate(prediction.recommendations, 1): + console.print(f" {i}. {rec}") + + if prediction.predicted_errors: + console.print(f"\n[bold]{t('predictive.predicted_errors')}:[/bold]") + for err in prediction.predicted_errors: + msg = f"{err[:100]}..." if len(err) > 100 else err + console.print(f" ! [dim]{msg}[/dim]") + + def _confirm_risky_operation(self, prediction: FailurePrediction) -> bool: + """Prompt user for confirmation of a risky operation.""" + if prediction.risk_level == RiskLevel.HIGH or prediction.risk_level == RiskLevel.CRITICAL: + cx_print(f"\n{t('predictive.high_risk_warning')}", "warning") + + console.print(f"\n{t('predictive.continue_anyway')} [y/N]: ", end="", markup=False) + try: + response = StdinHandler.get_input().lower() + return response in ("y", "yes") + except (EOFError, KeyboardInterrupt): + console.print() + return False + # --- End Sandbox Commands --- - def ask(self, question: str | None, debug: bool = False, do_mode: bool = False) -> int: + def ask(self, question: str, do_mode: bool = False) -> int: """Answer a natural language question about the system. - - In --do mode, Cortex can execute write and modify commands with user confirmation. - If no question is provided in --do mode, starts an interactive session. + + Args: + question: The natural language question to answer + do_mode: If True, enable execution mode where AI can run commands """ api_key = self._get_api_key() if not api_key: @@ -520,40 +866,93 @@ def ask(self, question: str | None, debug: bool = False, do_mode: bool = False) provider = self._get_provider() self._debug(f"Using provider: {provider}") - # Setup cortex user if in do mode - if do_mode: + try: + handler = AskHandler( + api_key=api_key, + provider=provider, + do_mode=do_mode, + ) + + if do_mode: + # Interactive execution mode + return self._run_interactive_do_session(handler, question) + else: + # Standard ask mode + answer = handler.ask(question) + # Render as markdown for proper formatting in terminal + console.print(Markdown(answer)) + return 0 + except ImportError as e: + # Provide a helpful message if provider SDK is missing + self._print_error(str(e)) + cx_print( + "Install the required SDK or set CORTEX_PROVIDER=ollama for local mode.", "info" + ) + return 1 + except ValueError as e: + self._print_error(str(e)) + return 1 + except RuntimeError as e: + self._print_error(str(e)) + return 1 + + def _run_interactive_do_session(self, handler: AskHandler, initial_question: str | None) -> int: + """Run an interactive session with execution capabilities. + + Args: + handler: The AskHandler configured for do_mode + initial_question: Optional initial question to start with + """ + from rich.prompt import Prompt + + console.print() + console.print("[bold cyan]🤖 Cortex AI Assistant (Execution Mode)[/bold cyan]") + console.print("[dim]Commands will be shown for your approval before execution.[/dim]") + console.print("[dim]Type 'exit' or press Ctrl+C to quit.[/dim]") + console.print() + + question = initial_question + + while True: try: - from cortex.do_runner import setup_cortex_user - cx_print("🔧 Do mode enabled - Cortex can execute commands to solve problems", "info") - # Don't fail if user creation fails - we have fallbacks - setup_cortex_user() + if not question: + question = Prompt.ask("[bold green]What would you like to do?[/bold green]") + + if not question or question.lower() in ["exit", "quit", "q"]: + console.print("[dim]Goodbye![/dim]") + return 0 + + # Process the question + result = handler.ask(question) + if result: + console.print(Markdown(result)) + + # Reset for next iteration + question = None + + except KeyboardInterrupt: + console.print("\n[dim]Session ended.[/dim]") + return 0 except Exception as e: - self._debug(f"Cortex user setup skipped: {e}") + self._print_error(f"Error: {e}") + question = None + + def _ask_with_session_key(self, question: str, api_key: str, provider: str) -> int: + """Answer a question using provided session API key without re-prompting. + + This wrapper is used by continuous voice mode to avoid re-calling _get_api_key(). + """ + self._debug(f"Using provider: {provider}") try: handler = AskHandler( api_key=api_key, provider=provider, - debug=debug, - do_mode=do_mode, ) - - # If no question and in do mode, start interactive session - if question is None and do_mode: - return self._run_interactive_do_session(handler) - elif question is None: - self._print_error("Please provide a question or use --do for interactive mode") - return 1 - answer = handler.ask(question) - # Don't print raw JSON or processing messages - if answer and not (answer.strip().startswith('{') or - "I'm processing your request" in answer or - "I have a plan to execute" in answer): - console.print(answer) + console.print(answer) return 0 except ImportError as e: - # Provide a helpful message if provider SDK is missing self._print_error(str(e)) cx_print( "Install the required SDK or set CORTEX_PROVIDER=ollama for local mode.", "info" @@ -565,330 +964,36 @@ def ask(self, question: str | None, debug: bool = False, do_mode: bool = False) except RuntimeError as e: self._print_error(str(e)) return 1 - - def _run_interactive_do_session(self, handler: AskHandler) -> int: - """Run an interactive --do session where user can type queries.""" - import signal - from rich.panel import Panel - from rich.prompt import Prompt - - # Create a session - from cortex.do_runner import DoRunDatabase - db = DoRunDatabase() - session_id = db.create_session() - - # Pass session_id to handler - if handler._do_handler: - handler._do_handler.current_session_id = session_id - - # Track if we're currently processing a request - processing_request = False - request_interrupted = False - - class SessionInterrupt(Exception): - """Exception raised to interrupt the current request and return to prompt.""" - pass - - class SessionExit(Exception): - """Exception raised to exit the session immediately (Ctrl+C).""" - pass - - def handle_ctrl_z(signum, frame): - """Handle Ctrl+Z - stop current operation, return to prompt.""" - nonlocal request_interrupted - - # Set interrupt flag on the handler - this will be checked in the loop - handler.interrupt() - - # If DoHandler has an active process, stop it - if handler._do_handler and handler._do_handler._current_process: - try: - handler._do_handler._current_process.terminate() - handler._do_handler._current_process.wait(timeout=1) - except: - try: - handler._do_handler._current_process.kill() - except: - pass - handler._do_handler._current_process = None - - # If we're processing a request, interrupt it immediately - if processing_request: - request_interrupted = True - console.print() - console.print(f"[yellow]⚠ Ctrl+Z - Stopping current operation...[/yellow]") - # Raise exception to break out and return to prompt - raise SessionInterrupt("Interrupted by Ctrl+Z") - else: - # Not processing anything, just inform the user - console.print() - console.print(f"[dim]Ctrl+Z - Type 'exit' to end the session[/dim]") - - def handle_ctrl_c(signum, frame): - """Handle Ctrl+C - exit the session immediately.""" - # Stop any active process first - if handler._do_handler and handler._do_handler._current_process: - try: - handler._do_handler._current_process.terminate() - handler._do_handler._current_process.wait(timeout=1) - except: - try: - handler._do_handler._current_process.kill() - except: - pass - handler._do_handler._current_process = None - - console.print() - console.print("[cyan]👋 Session ended (Ctrl+C).[/cyan]") - raise SessionExit("Exited by Ctrl+C") - - # Set up signal handlers for the entire session - # Ctrl+Z (SIGTSTP) -> stop current operation, return to prompt - # Ctrl+C (SIGINT) -> exit session immediately - original_sigtstp = signal.signal(signal.SIGTSTP, handle_ctrl_z) - original_sigint = signal.signal(signal.SIGINT, handle_ctrl_c) - - try: - console.print() - console.print(Panel( - "[bold cyan]🚀 Cortex Interactive Session[/bold cyan]\n\n" - f"[dim]Session ID: {session_id[:30]}...[/dim]\n\n" - "Type what you want to do and Cortex will help you.\n" - "Commands will be shown for approval before execution.\n\n" - "[dim]Examples:[/dim]\n" - " • install docker and run nginx\n" - " • setup a postgresql database\n" - " • configure nginx to proxy port 3000\n" - " • check system resources\n\n" - "[dim]Type 'exit' or 'quit' to end the session.[/dim]\n" - "[dim]Press Ctrl+Z to stop current operation | Ctrl+C to exit immediately[/dim]", - title="[bold green]Welcome[/bold green]", - border_style="cyan", - )) - console.print() - - session_history = [] # Track what was done in this session - run_count = 0 - - while True: - try: - # Show compact session status (not the full history panel) - if session_history: - console.print(f"[dim]Session: {len(session_history)} task(s) | {run_count} run(s) | Type 'history' to see details[/dim]") - - # Get user input - query = Prompt.ask("[bold cyan]What would you like to do?[/bold cyan]") - - if not query.strip(): - continue - - # Check for exit - if query.lower().strip() in ["exit", "quit", "bye", "q"]: - db.end_session(session_id) - console.print() - console.print(f"[cyan]👋 Session ended ({run_count} runs). Run 'cortex do history' to see past runs.[/cyan]") - break - - # Check for help - if query.lower().strip() in ["help", "?"]: - console.print() - console.print("[bold]Available commands:[/bold]") - console.print(" [green]exit[/green], [green]quit[/green] - End the session") - console.print(" [green]history[/green] - Show session history") - console.print(" [green]clear[/green] - Clear session history") - console.print(" Or type any request in natural language!") - console.print() - continue - - # Check for history - if query.lower().strip() == "history": - if session_history: - from rich.table import Table - from rich.panel import Panel - - console.print() - table = Table( - show_header=True, - header_style="bold cyan", - title=f"[bold]Session History[/bold]", - title_style="bold", - ) - table.add_column("#", style="dim", width=3) - table.add_column("Query", style="white", max_width=45) - table.add_column("Status", justify="center", width=8) - table.add_column("Commands", justify="center", width=10) - table.add_column("Run ID", style="dim", max_width=20) - - for i, item in enumerate(session_history, 1): - status = "[green]✓ Success[/green]" if item.get("success") else "[red]✗ Failed[/red]" - query_short = item['query'][:42] + "..." if len(item['query']) > 42 else item['query'] - cmd_count = str(item.get('commands_count', 0)) if item.get('success') else "-" - run_id = item.get('run_id', '-')[:18] + "..." if item.get('run_id') and len(item.get('run_id', '')) > 18 else item.get('run_id', '-') - table.add_row(str(i), query_short, status, cmd_count, run_id) - - console.print(table) - console.print() - console.print(f"[dim]Total: {len(session_history)} tasks | {run_count} runs | Session: {session_id[:20]}...[/dim]") - console.print() - else: - console.print("[dim]No tasks completed yet.[/dim]") - continue - - # Check for clear - if query.lower().strip() == "clear": - session_history.clear() - console.print("[dim]Session history cleared.[/dim]") - continue - - # Update session with query - db.update_session(session_id, query=query) - - # Process the query - console.print() - processing_request = True - request_interrupted = False - handler.reset_interrupt() # Reset interrupt flag before new request - - try: - answer = handler.ask(query) - - # Check if request was interrupted - if request_interrupted: - console.print("[yellow]⚠ Request was interrupted[/yellow]") - session_history.append({ - "query": query, - "success": False, - "error": "Interrupted by user", - }) - continue - - # Get the run_id and command count if one was created - run_id = None - commands_count = 0 - if handler._do_handler and handler._do_handler.current_run: - run_id = handler._do_handler.current_run.run_id - # Count commands from the run - if handler._do_handler.current_run.commands: - commands_count = len(handler._do_handler.current_run.commands) - run_count += 1 - db.update_session(session_id, increment_runs=True) - - # Track in session history - session_history.append({ - "query": query, - "success": True, - "answer": answer[:100] if answer else "", - "run_id": run_id, - "commands_count": commands_count, - }) - - # Print response if it's informational (filter out JSON) - if answer and not answer.startswith("USER_DECLINED"): - # Don't print raw JSON or processing messages - if not (answer.strip().startswith('{') or - "I'm processing your request" in answer or - "I have a plan to execute" in answer): - console.print(answer) - - except SessionInterrupt: - # Ctrl+Z/Ctrl+C pressed - return to prompt immediately - console.print() - session_history.append({ - "query": query, - "success": False, - "error": "Interrupted by user", - }) - continue # Go back to "What would you like to do?" prompt - except Exception as e: - if request_interrupted: - console.print("[yellow]⚠ Request was interrupted[/yellow]") - else: - # Show user-friendly error without internal details - error_msg = str(e) - if isinstance(e, AttributeError): - console.print("[yellow]⚠ Something went wrong. Please try again.[/yellow]") - # Log the actual error for debugging - import logging - logging.debug(f"Internal error: {e}") - else: - console.print(f"[red]⚠ {error_msg}[/red]") - session_history.append({ - "query": query, - "success": False, - "error": "Interrupted" if request_interrupted else str(e), - }) - finally: - processing_request = False - request_interrupted = False - - console.print() - - except SessionInterrupt: - # Ctrl+Z - just return to prompt - console.print() - continue - except SessionExit: - # Ctrl+C - exit session immediately - db.end_session(session_id) - break - except (KeyboardInterrupt, EOFError): - # Fallback for any other interrupts - db.end_session(session_id) - console.print() - console.print("[cyan]👋 Session ended.[/cyan]") - break - - finally: - # Always restore signal handlers when session ends - signal.signal(signal.SIGTSTP, original_sigtstp) - signal.signal(signal.SIGINT, original_sigint) - - return 0 - def install( + def _install_with_session_key( self, software: str, + api_key: str, + provider: str, execute: bool = False, dry_run: bool = False, - parallel: bool = False, - ): + ) -> int: + """Install software using provided session API key without re-prompting. + + This wrapper is used by continuous voice mode to avoid re-calling _get_api_key(). + """ + history = InstallationHistory() + install_id = None + start_time = datetime.now() + # Validate input first is_valid, error = validate_install_request(software) if not is_valid: self._print_error(error) return 1 - # Special-case the ml-cpu stack: - # The LLM sometimes generates outdated torch==1.8.1+cpu installs - # which fail on modern Python. For the "pytorch-cpu jupyter numpy pandas" - # combo, force a supported CPU-only PyTorch recipe instead. - normalized = " ".join(software.split()).lower() - - if normalized == "pytorch-cpu jupyter numpy pandas": - software = ( - "pip3 install torch torchvision torchaudio " - "--index-url https://download.pytorch.org/whl/cpu && " - "pip3 install jupyter numpy pandas" - ) - - api_key = self._get_api_key() - if not api_key: - return 1 - - provider = self._get_provider() + software = self._normalize_software_name(software) self._debug(f"Using provider: {provider}") - self._debug(f"API key: {api_key[:10]}...{api_key[-4:]}") - - # Initialize installation history - history = InstallationHistory() - install_id = None - start_time = datetime.now() + self._debug("Using session API key: ") try: self._print_status("🧠", "Understanding request...") - interpreter = CommandInterpreter(api_key=api_key, provider=provider) - self._print_status("📦", "Planning installation...") for _ in range(10): @@ -898,15 +1003,11 @@ def install( commands = interpreter.parse(f"install {software}") if not commands: - self._print_error( - "No commands generated. Please try again with a different request." - ) + self._print_error(t("install.no_commands")) return 1 - # Extract packages from commands for tracking packages = history._extract_packages_from_commands(commands) - # Record installation start if execute or dry_run: install_id = history.record_installation( InstallationType.INSTALL, packages, commands, start_time @@ -918,38 +1019,629 @@ def install( print(f" {i}. {cmd}") if dry_run: - print("\n(Dry run mode - commands not executed)") + print(f"\n({t('install.dry_run_message')})") if install_id: history.update_installation(install_id, InstallationStatus.SUCCESS) return 0 if execute: + print(f"\n{t('install.executing')}") + coordinator = InstallationCoordinator(commands=commands) + result = coordinator.execute() - def progress_callback(current, total, step): - status_emoji = "⏳" - if step.status == StepStatus.SUCCESS: - status_emoji = "✅" - elif step.status == StepStatus.FAILED: - status_emoji = "❌" - print(f"\n[{current}/{total}] {status_emoji} {step.description}") - print(f" Command: {step.command}") - - print("\nExecuting commands...") + if result.success: + if install_id: + history.update_installation(install_id, InstallationStatus.SUCCESS) + return 0 + else: + error_msg = result.message or "Installation failed" + if install_id: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + self._print_error(error_msg) + return 1 + else: + # Neither dry_run nor execute - just show commands + return 0 - if parallel: - import asyncio + except Exception as e: + error_msg = str(e) + if install_id: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + self._print_error(error_msg) + return 1 - from cortex.install_parallel import run_parallel_install + def voice(self, continuous: bool = False, model: str | None = None) -> int: + """Handle voice input mode. - def parallel_log_callback(message: str, level: str = "info"): - if level == "success": - cx_print(f" ✅ {message}", "success") - elif level == "error": - cx_print(f" ❌ {message}", "error") - else: - cx_print(f" ℹ {message}", "info") + Args: + continuous: If True, stay in voice mode until Ctrl+C. + If False, record single input and exit. + model: Whisper model name (e.g., 'base.en', 'small.en'). + If None, uses CORTEX_WHISPER_MODEL env var or 'base.en'. + """ + import queue + import threading - try: + try: + from cortex.voice import ( + MicrophoneNotFoundError, + ModelNotFoundError, + VoiceInputError, + VoiceInputHandler, + ) + except ImportError: + self._print_error("Voice dependencies not installed.") + cx_print("Install with: pip install cortex-linux[voice]", "info") + return 1 + + api_key = self._get_api_key() + if not api_key: + return 1 + + # Capture provider once for session + provider = self._get_provider() + self._debug(f"Session using provider: {provider}") + + # Display model information if specified + if model: + model_info = { + "tiny.en": "(39 MB, fastest, good for clear speech)", + "base.en": "(140 MB, balanced speed/accuracy)", + "small.en": "(466 MB, better accuracy)", + "medium.en": "(1.5 GB, high accuracy)", + "tiny": "(39 MB, multilingual)", + "base": "(290 MB, multilingual)", + "small": "(968 MB, multilingual)", + "medium": "(3 GB, multilingual)", + "large": "(6 GB, best accuracy, multilingual)", + } + cx_print(f"Using Whisper model: {model} {model_info.get(model, '')}", "info") + + # Queue for thread-safe communication between worker and main thread + input_queue = queue.Queue() + response_queue = queue.Queue() + + def process_voice_command(text: str) -> None: + """Process transcribed voice command.""" + if not text: + return + + # Determine if this is an install command or a question + text_lower = text.lower().strip() + is_install = any( + text_lower.startswith(word) for word in ["install", "setup", "add", "get", "put"] + ) + + if is_install: + # Remove the command verb for install + software = text + for verb in ["install", "setup", "add", "get", "put"]: + if text_lower.startswith(verb): + software = text[len(verb) :].strip() + break + + # Validate software name + if not software or len(software) > 200: + cx_print("Invalid software name", "error") + return + + # Check for dangerous characters that shouldn't be in package names + dangerous_chars = [";", "&", "|", "`", "$", "(", ")"] + if any(char in software for char in dangerous_chars): + cx_print("Invalid characters detected in software name", "error") + return + + cx_print(f"Installing: {software}", "info") + + # Handle prompt based on mode + def _drain_queues() -> None: + """Clear any stale prompt/response messages from previous interactions.""" + + try: + while not response_queue.empty(): + response_queue.get_nowait() + except Exception: + pass + + try: + while not input_queue.empty(): + input_queue.get_nowait() + except Exception: + pass + + def _flush_stdin() -> None: + """Flush any pending input from stdin.""" + try: + # Use select to check for pending input without blocking + while select.select([sys.stdin], [], [], 0.0)[0]: + sys.stdin.read(1) + except (OSError, ValueError, TypeError): + # OSError: fd not valid, ValueError: fd negative, TypeError: not selectable + pass + + def _resolve_choice() -> str: + """Prompt user until a valid choice is provided.""" + + def _prompt_inline() -> str: + console.print() + console.print("[bold cyan]Choose an action:[/bold cyan]") + console.print(" [1] Dry run (preview commands)") + console.print(" [2] Execute (run commands)") + console.print(" [3] Cancel") + console.print(" [dim](Ctrl+C to cancel)[/dim]") + console.print() + + try: + _flush_stdin() # Clear any buffered input + choice = input("Enter choice [1/2/3]: ").strip() + # Blank input defaults to dry-run (1) + return choice or "1" + except (KeyboardInterrupt, EOFError): + return "3" + + if input_handler_thread is None: + # Single-shot mode: inline prompt handling (no input handler thread running) + _flush_stdin() # Clear any buffered input before prompting + choice_local = _prompt_inline() + while choice_local not in {"1", "2", "3"}: + cx_print("Invalid choice. Please enter 1, 2, or 3.", "warning") + choice_local = _prompt_inline() + return choice_local + + # Continuous mode: use queue-based communication with input handler thread + _drain_queues() + while True: + input_queue.put({"type": "prompt", "software": software}) + + try: + response = response_queue.get(timeout=60) + choice_local = response.get("choice") + except queue.Empty: + cx_print("\nInput timeout - cancelled.", "warning") + return "3" + + if choice_local in {"1", "2", "3"}: + return choice_local + + # Invalid or malformed response — re-prompt + cx_print("Invalid choice. Please enter 1, 2, or 3.", "warning") + + def _prompt_execute_after_dry_run() -> str: + """Prompt user to execute or cancel after dry-run preview.""" + console.print() + console.print("[bold cyan]Dry-run complete. What next?[/bold cyan]") + console.print(" [1] Execute (run commands)") + console.print(" [2] Cancel") + console.print(" [dim](Ctrl+C to cancel)[/dim]") + console.print() + + try: + _flush_stdin() # Clear any buffered input + choice_input = input("Enter choice [1/2]: ").strip() + return choice_input or "2" # Default to cancel + except (KeyboardInterrupt, EOFError): + return "2" + + choice = _resolve_choice() + + # Process choice (unified for both modes) + if choice == "1": + self._install_with_session_key( + software, api_key, provider, execute=False, dry_run=True + ) + # After dry-run, ask if user wants to execute + follow_up = _prompt_execute_after_dry_run() + while follow_up not in {"1", "2"}: + cx_print("Invalid choice. Please enter 1 or 2.", "warning") + follow_up = _prompt_execute_after_dry_run() + if follow_up == "1": + cx_print("Executing installation...", "info") + self._install_with_session_key( + software, api_key, provider, execute=True, dry_run=False + ) + else: + cx_print("Cancelled.", "info") + elif choice == "2": + cx_print("Executing installation...", "info") + self._install_with_session_key( + software, api_key, provider, execute=True, dry_run=False + ) + else: + cx_print("Cancelled.", "info") + else: + # Treat as a question + cx_print(f"Question: {text}", "info") + self._ask_with_session_key(text, api_key, provider) + + handler = None + input_handler_thread = None + stop_input_handler = threading.Event() + + def input_handler_loop(): + """Main thread loop to handle user input requests from worker thread.""" + while not stop_input_handler.is_set(): + try: + request = input_queue.get(timeout=0.5) + if request.get("type") == "prompt": + console.print() + console.print("[bold cyan]Choose an action:[/bold cyan]") + console.print(" [1] Dry run (preview commands)") + console.print(" [2] Execute (run commands)") + console.print(" [3] Cancel") + console.print() + + while True: + try: + choice = input("Enter choice [1/2/3]: ").strip() + # Blank input defaults to dry-run (1) + choice = choice or "1" + except (KeyboardInterrupt, EOFError): + response_queue.put({"choice": "3"}) + cx_print("\nCancelled.", "info") + break + + if choice in {"1", "2", "3"}: + response_queue.put({"choice": choice}) + break + + cx_print("Invalid choice. Please enter 1, 2, or 3.", "warning") + except queue.Empty: + continue + except Exception as e: + logging.debug(f"Input handler error: {e}") + continue + + try: + handler = VoiceInputHandler(model_name=model) + + if continuous: + # Start input handler thread + input_handler_thread = threading.Thread(target=input_handler_loop, daemon=True) + input_handler_thread.start() + + # Continuous voice mode + handler.start_voice_mode(process_voice_command) + else: + # Single recording mode + text = handler.record_single() + if text: + process_voice_command(text) + else: + cx_print("No speech detected.", "warning") + + return 0 + + except (VoiceInputError, MicrophoneNotFoundError, ModelNotFoundError) as e: + self._print_error(str(e)) + return 1 + except KeyboardInterrupt: + cx_print("\nVoice mode exited.", "info") + return 0 + finally: + # Stop input handler thread + stop_input_handler.set() + if input_handler_thread is not None and input_handler_thread.is_alive(): + input_handler_thread.join(timeout=1.0) + + # Ensure cleanup even if exceptions occur + if handler is not None: + try: + handler.stop() + except Exception as e: + # Log cleanup errors but don't raise + logging.debug("Error during voice handler cleanup: %s", e) + + def _normalize_software_name(self, software: str) -> str: + """Normalize software name by cleaning whitespace. + + Returns a natural-language description suitable for LLM interpretation. + Does NOT return shell commands - all command generation must go through + the LLM and validation pipeline. + """ + # Just normalize whitespace - return natural language description + return " ".join(software.split()) + + def _record_history_error( + self, + history: InstallationHistory, + install_id: str | None, + error: str, + ) -> None: + """Record installation error to history.""" + if install_id: + history.update_installation(install_id, InstallationStatus.FAILED, error) + + def _handle_parallel_execution( + self, + commands: list[str], + software: str, + install_id: str | None, + history: InstallationHistory, + ) -> int: + """Handle parallel installation execution.""" + import asyncio + + from cortex.install_parallel import run_parallel_install + + def parallel_log_callback(message: str, level: str = "info"): + if level == "success": + cx_print(f" ✅ {message}", "success") + elif level == "error": + cx_print(f" ❌ {message}", "error") + else: + cx_print(f" ℹ {message}", "info") + + try: + success, parallel_tasks = asyncio.run( + run_parallel_install( + commands=commands, + descriptions=[f"Step {i + 1}" for i in range(len(commands))], + timeout=300, + stop_on_error=True, + log_callback=parallel_log_callback, + ) + ) + + if success: + total_duration = self._calculate_duration(parallel_tasks) + self._print_success(f"{software} installed successfully!") + print(f"\nCompleted in {total_duration:.2f} seconds (parallel mode)") + if install_id: + history.update_installation(install_id, InstallationStatus.SUCCESS) + print(f"\n📝 Installation recorded (ID: {install_id})") + print(f" To rollback: cortex rollback {install_id}") + return 0 + + error_msg = self._get_parallel_error_msg(parallel_tasks) + self._record_history_error(history, install_id, error_msg) + self._print_error(self.INSTALL_FAIL_MSG) + if error_msg: + print(f" Error: {error_msg}", file=sys.stderr) + if install_id: + print(f"\n📝 Installation recorded (ID: {install_id})") + print(f" View details: cortex history {install_id}") + return 1 + + except (ValueError, OSError) as e: + self._record_history_error(history, install_id, str(e)) + self._print_error(f"Parallel execution failed: {str(e)}") + return 1 + except Exception as e: + self._record_history_error(history, install_id, str(e)) + self._print_error(f"Unexpected parallel execution error: {str(e)}") + if self.verbose: + import traceback + + traceback.print_exc() + return 1 + + def _calculate_duration(self, parallel_tasks: list) -> float: + """Calculate total duration from parallel tasks.""" + if not parallel_tasks: + return 0.0 + + max_end = max( + (t.end_time for t in parallel_tasks if t.end_time is not None), + default=None, + ) + min_start = min( + (t.start_time for t in parallel_tasks if t.start_time is not None), + default=None, + ) + if max_end is not None and min_start is not None: + return max_end - min_start + return 0.0 + + def _get_parallel_error_msg(self, parallel_tasks: list) -> str: + """Extract error message from failed parallel tasks.""" + failed_tasks = [t for t in parallel_tasks if getattr(t.status, "value", "") == "failed"] + return failed_tasks[0].error if failed_tasks else self.INSTALL_FAIL_MSG + + def _handle_sequential_execution( + self, + commands: list[str], + software: str, + install_id: str | None, + history: InstallationHistory, + ) -> int: + """Handle sequential installation execution.""" + + def progress_callback(current, total, step): + status_emoji = "⏳" + if step.status == StepStatus.SUCCESS: + status_emoji = "✅" + elif step.status == StepStatus.FAILED: + status_emoji = "❌" + print(f"\n[{current}/{total}] {status_emoji} {step.description}") + print(f" Command: {step.command}") + + coordinator = InstallationCoordinator( + commands=commands, + descriptions=[f"Step {i + 1}" for i in range(len(commands))], + timeout=300, + stop_on_error=True, + progress_callback=progress_callback, + ) + + result = coordinator.execute() + + if result.success: + self._print_success(f"{software} installed successfully!") + print(f"\nCompleted in {result.total_duration:.2f} seconds") + if install_id: + history.update_installation(install_id, InstallationStatus.SUCCESS) + print(f"\n📝 Installation recorded (ID: {install_id})") + print(f" To rollback: cortex rollback {install_id}") + return 0 + + # Handle failure + self._record_history_error( + history, install_id, result.error_message or self.INSTALL_FAIL_MSG + ) + if result.failed_step is not None: + self._print_error(f"{self.INSTALL_FAIL_MSG} at step {result.failed_step + 1}") + else: + self._print_error(self.INSTALL_FAIL_MSG) + if result.error_message: + print(f" Error: {result.error_message}", file=sys.stderr) + if install_id: + print(f"\n📝 Installation recorded (ID: {install_id})") + print(f" View details: cortex history {install_id}") + return 1 + + def install( + self, + software: str, + execute: bool = False, + dry_run: bool = False, + parallel: bool = False, + json_output: bool = False, + ) -> int: + """Install software using the LLM-powered package manager.""" + # Initialize installation history + history = InstallationHistory() + install_id = None + start_time = datetime.now() + # Validate input first + is_valid, error = validate_install_request(software) + if not is_valid: + if json_output: + print(json.dumps({"success": False, "error": error, "error_type": "ValueError"})) + else: + self._print_error(error) + return 1 + + software = self._normalize_software_name(software) + + api_key = self._get_api_key() + if not api_key: + error_msg = "No API key found. Please configure an API provider." + # Record installation attempt before failing if we have packages + try: + packages = [software.split()[0]] # Basic package extraction + install_id = history.record_installation( + InstallationType.INSTALL, packages, [], start_time + ) + except Exception: + pass # If recording fails, continue with error reporting + + if install_id: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + + if json_output: + print( + json.dumps({"success": False, "error": error_msg, "error_type": "RuntimeError"}) + ) + else: + self._print_error(error_msg) + return 1 + + provider = self._get_provider() + self._debug(f"Using provider: {provider}") + self._debug(f"API key: {api_key[:10]}...{api_key[-4:]}") + + try: + if not json_output: + self._print_status("🧠", "Understanding request...") + + interpreter = CommandInterpreter(api_key=api_key, provider=provider) + + if not json_output: + self._print_status("📦", "Planning installation...") + for _ in range(10): + self._animate_spinner("Analyzing system requirements...") + self._clear_line() + + commands = interpreter.parse(f"install {software}") + + if not commands: + self._print_error(t("install.no_commands")) + return 1 + + # Predictive Analysis + if not json_output: + self._print_status("🔮", t("predictive.analyzing")) + if not self.predict_manager: + self.predict_manager = PredictiveErrorManager(api_key=api_key, provider=provider) + prediction = self.predict_manager.analyze_installation(software, commands) + if not json_output: + self._clear_line() + + if not json_output: + if prediction.risk_level != RiskLevel.NONE: + self._display_prediction_warning(prediction) + if execute and not self._confirm_risky_operation(prediction): + cx_print(f"\n{t('ui.operation_cancelled')}", "warning") + return 0 + else: + cx_print(t("predictive.no_issues_detected"), "success") + + # Extract packages from commands for tracking + packages = history._extract_packages_from_commands(commands) + + # Record installation start + if execute or dry_run: + install_id = history.record_installation( + InstallationType.INSTALL, packages, commands, start_time + ) + + # If JSON output requested, return structured data and exit early + if json_output: + output = { + "success": True, + "commands": commands, + "packages": packages, + "install_id": install_id, + "prediction": { + "risk_level": prediction.risk_level.name, + "reasons": prediction.reasons, + "recommendations": prediction.recommendations, + "predicted_errors": prediction.predicted_errors, + }, + } + print(json.dumps(output, indent=2)) + return 0 + + self._print_status("⚙️", f"Installing {software}...") + print("\nGenerated commands:") + for i, cmd in enumerate(commands, 1): + print(f" {i}. {cmd}") + + if dry_run: + print(f"\n({t('install.dry_run_message')})") + if install_id: + history.update_installation(install_id, InstallationStatus.SUCCESS) + return 0 + + if execute: + + def progress_callback(current, total, step): + status_emoji = "⏳" + if step.status == StepStatus.SUCCESS: + status_emoji = "✅" + elif step.status == StepStatus.FAILED: + status_emoji = "❌" + print(f"\n[{current}/{total}] {status_emoji} {step.description}") + print(f" Command: {step.command}") + + print(f"\n{t('install.executing')}") + + if parallel: + import asyncio + + from cortex.install_parallel import run_parallel_install + + def parallel_log_callback(message: str, level: str = "info"): + if level == "success": + cx_print(f" ✅ {message}", "success") + elif level == "error": + cx_print(f" ❌ {message}", "error") + else: + cx_print(f" ℹ {message}", "info") + + try: success, parallel_tasks = asyncio.run( run_parallel_install( commands=commands, @@ -974,8 +1666,10 @@ def parallel_log_callback(message: str, level: str = "info"): total_duration = max_end - min_start if success: - self._print_success(f"{software} installed successfully!") - print(f"\nCompleted in {total_duration:.2f} seconds (parallel mode)") + self._print_success(t("install.package_installed", package=software)) + print( + f"\n{t('progress.completed_in', seconds=f'{total_duration:.2f}')}" + ) if install_id: history.update_installation(install_id, InstallationStatus.SUCCESS) @@ -996,9 +1690,9 @@ def parallel_log_callback(message: str, level: str = "info"): error_msg, ) - self._print_error("Installation failed") + self._print_error(t("install.failed")) if error_msg: - print(f" Error: {error_msg}", file=sys.stderr) + print(f" {t('common.error')}: {error_msg}", file=sys.stderr) if install_id: print(f"\n📝 Installation recorded (ID: {install_id})") print(f" View details: cortex history {install_id}") @@ -1034,8 +1728,8 @@ def parallel_log_callback(message: str, level: str = "info"): result = coordinator.execute() if result.success: - self._print_success(f"{software} installed successfully!") - print(f"\nCompleted in {result.total_duration:.2f} seconds") + self._print_success(t("install.package_installed", package=software)) + print(f"\n{t('progress.completed_in', seconds=f'{result.total_duration:.2f}')}") # Record successful installation if install_id: @@ -1065,27 +1759,43 @@ def parallel_log_callback(message: str, level: str = "info"): else: print("\nTo execute these commands, run with --execute flag") print("Example: cortex install docker --execute") + return 0 - return 0 + print("\nExecuting commands...") + if parallel: + return self._handle_parallel_execution(commands, software, install_id, history) + + return self._handle_sequential_execution(commands, software, install_id, history) except ValueError as e: if install_id: history.update_installation(install_id, InstallationStatus.FAILED, str(e)) - self._print_error(str(e)) + if json_output: + + print(json.dumps({"success": False, "error": str(e), "error_type": "ValueError"})) + else: + self._print_error(str(e)) return 1 except RuntimeError as e: if install_id: history.update_installation(install_id, InstallationStatus.FAILED, str(e)) - self._print_error(f"API call failed: {str(e)}") + if json_output: + + print(json.dumps({"success": False, "error": str(e), "error_type": "RuntimeError"})) + else: + self._print_error(f"API call failed: {str(e)}") return 1 except OSError as e: if install_id: history.update_installation(install_id, InstallationStatus.FAILED, str(e)) - self._print_error(f"System error: {str(e)}") + if json_output: + + print(json.dumps({"success": False, "error": str(e), "error_type": "OSError"})) + else: + self._print_error(f"System error: {str(e)}") return 1 except Exception as e: - if install_id: - history.update_installation(install_id, InstallationStatus.FAILED, str(e)) + self._record_history_error(history, install_id, str(e)) self._print_error(f"Unexpected error: {str(e)}") if self.verbose: import traceback @@ -1093,33 +1803,556 @@ def parallel_log_callback(message: str, level: str = "info"): traceback.print_exc() return 1 - def cache_stats(self) -> int: - try: - from cortex.semantic_cache import SemanticCache - - cache = SemanticCache() - stats = cache.stats() - hit_rate = f"{stats.hit_rate * 100:.1f}%" if stats.total else "0.0%" + def remove(self, args: argparse.Namespace) -> int: + """Handle package removal with impact analysis""" + package = args.package + dry_run = getattr(args, "dry_run", True) # Default to dry-run for safety + purge = getattr(args, "purge", False) + force = getattr(args, "force", False) + json_output = getattr(args, "json", False) - cx_header("Cache Stats") - cx_print(f"Hits: {stats.hits}", "info") - cx_print(f"Misses: {stats.misses}", "info") - cx_print(f"Hit rate: {hit_rate}", "info") - cx_print(f"Saved calls (approx): {stats.hits}", "info") - return 0 - except (ImportError, OSError) as e: - self._print_error(f"Unable to read cache stats: {e}") + # Initialize and analyze + result = self._analyze_package_removal(package) + if result is None: return 1 - except Exception as e: - self._print_error(f"Unexpected error reading cache stats: {e}") - if self.verbose: - import traceback - traceback.print_exc() + # Check if package doesn't exist at all (not in repos) + if self._check_package_not_found(result): return 1 - def history(self, limit: int = 20, status: str | None = None, show_id: str | None = None): - """Show installation history""" + # Output results + self._output_impact_result(result, json_output) + + # Dry-run mode - stop here + if dry_run: + console.print() + cx_print("Dry run mode - no changes made", "info") + cx_print(f"To proceed with removal: cortex remove {package} --execute", "info") + return 0 + + # Safety check and confirmation + if not self._can_proceed_with_removal(result, force, args, package, purge): + return self._removal_blocked_or_cancelled(result, force) + + return self._execute_removal(package, purge) + + def _analyze_package_removal(self, package: str): + """Initialize analyzer and perform impact analysis. Returns None on failure.""" + try: + analyzer = UninstallImpactAnalyzer() + except Exception as e: + self._print_error(f"Failed to initialize impact analyzer: {e}") + return None + + cx_print(f"Analyzing impact of removing '{package}'...", "info") + try: + return analyzer.analyze(package) + except Exception as e: + self._print_error(f"Impact analysis failed: {e}") + if self.verbose: + import traceback + + traceback.print_exc() + return None + + def _check_package_not_found(self, result) -> bool: + """Check if package doesn't exist in repos and print warnings.""" + if result.warnings and "not found in repositories" in str(result.warnings): + for warning in result.warnings: + cx_print(warning, "warning") + for rec in result.recommendations: + cx_print(rec, "info") + return True + return False + + def _output_impact_result(self, result, json_output: bool) -> None: + """Output the impact result in JSON or rich format.""" + if json_output: + import json as json_module + + data = { + "target_package": result.target_package, + "direct_dependents": result.direct_dependents, + "transitive_dependents": result.transitive_dependents, + "affected_services": [ + { + "name": s.name, + "status": s.status.value, + "package": s.package, + "is_critical": s.is_critical, + } + for s in result.affected_services + ], + "orphaned_packages": result.orphaned_packages, + "cascade_packages": result.cascade_packages, + "severity": result.severity.value, + "total_affected": result.total_affected, + "cascade_depth": result.cascade_depth, + "recommendations": result.recommendations, + "warnings": result.warnings, + "safe_to_remove": result.safe_to_remove, + } + console.print(json_module.dumps(data, indent=2)) + else: + self._display_impact_report(result) + + def _can_proceed_with_removal( + self, result, force: bool, args, package: str, purge: bool + ) -> bool: + """Check safety and get user confirmation. Returns True if can proceed.""" + if not result.safe_to_remove and not force: + return False + + skip_confirm = getattr(args, "yes", False) + if skip_confirm: + return True + + return self._confirm_removal(package, purge) + + def _confirm_removal(self, package: str, purge: bool) -> bool: + """Prompt user for removal confirmation.""" + console.print() + confirm_msg = f"Remove '{package}'" + if purge: + confirm_msg += " and purge configuration" + confirm_msg += "? [y/N]: " + try: + response = StdinHandler.get_input(confirm_msg).lower() + return response in ("y", "yes") + except (EOFError, KeyboardInterrupt): + console.print() + return False + + def _removal_blocked_or_cancelled(self, result, force: bool) -> int: + """Handle blocked or cancelled removal.""" + if not result.safe_to_remove and not force: + console.print() + self._print_error( + "Package removal has high impact. Use --force to proceed anyway, " + "or address the recommendations first." + ) + return 1 + cx_print("Removal cancelled", "info") + return 0 + + def _display_impact_report(self, result: ImpactResult) -> None: + """Display formatted impact analysis report""" + + # Severity styling + severity_styles = { + ImpactSeverity.SAFE: ("green", "✅"), + ImpactSeverity.LOW: ("green", "💚"), + ImpactSeverity.MEDIUM: ("yellow", "🟡"), + ImpactSeverity.HIGH: ("orange1", "🟠"), + ImpactSeverity.CRITICAL: ("red", "🔴"), + } + style, icon = severity_styles.get(result.severity, ("white", "❓")) + + # Header + console.print() + console.print( + Panel(f"[bold]{icon} Impact Analysis: {result.target_package}[/bold]", style=style) + ) + + # Display sections + self._display_warnings(result.warnings) + self._display_package_list(result.direct_dependents, "cyan", "📦 Direct dependents", 10) + self._display_services(result.affected_services) + self._display_summary_table(result, style, Table) + self._display_package_list(result.cascade_packages, "yellow", "🗑️ Cascade removal", 5) + self._display_package_list(result.orphaned_packages, "white", "👻 Would become orphaned", 5) + self._display_recommendations(result.recommendations) + + # Final verdict + console.print() + if result.safe_to_remove: + console.print("[bold green]✅ Safe to remove[/bold green]") + else: + console.print("[bold yellow]⚠️ Review recommendations before proceeding[/bold yellow]") + + def _display_warnings(self, warnings: list) -> None: + """Display warnings with appropriate styling.""" + for warning in warnings: + if "not currently installed" in warning: + console.print(f"\n[bold yellow]ℹ️ {warning}[/bold yellow]") + console.print("[dim] Showing potential impact analysis for this package.[/dim]") + else: + console.print(f"\n[bold red]⚠️ {warning}[/bold red]") + + def _display_package_list(self, packages: list, color: str, title: str, limit: int) -> None: + """Display a list of packages with truncation.""" + if packages: + console.print(f"\n[bold {color}]{title} ({len(packages)}):[/bold {color}]") + for pkg in packages[:limit]: + console.print(f" • {pkg}") + if len(packages) > limit: + console.print(f" [dim]... and {len(packages) - limit} more[/dim]") + elif "dependents" in title: + console.print(f"\n[bold {color}]{title}:[/bold {color}] None") + + def _display_services(self, services: list) -> None: + """Display affected services.""" + if services: + console.print(f"\n[bold magenta]🔧 Affected services ({len(services)}):[/bold magenta]") + for service in services: + status_icon = "🟢" if service.status == ServiceStatus.RUNNING else "⚪" + critical_marker = " [red][CRITICAL][/red]" if service.is_critical else "" + console.print(f" {status_icon} {service.name}{critical_marker}") + else: + console.print("\n[bold magenta]🔧 Affected services:[/bold magenta] None") + + def _display_summary_table(self, result, style: str, table_class) -> None: + """Display the impact summary table.""" + summary_table = table_class(show_header=False, box=None, padding=(0, 2)) + summary_table.add_column("Metric", style="dim") + summary_table.add_column("Value") + summary_table.add_row("Total packages affected", str(result.total_affected)) + summary_table.add_row("Cascade depth", str(result.cascade_depth)) + summary_table.add_row("Services at risk", str(len(result.affected_services))) + summary_table.add_row("Severity", f"[{style}]{result.severity.value.upper()}[/{style}]") + console.print("\n[bold]📊 Impact Summary:[/bold]") + console.print(summary_table) + + def _display_recommendations(self, recommendations: list) -> None: + """Display recommendations.""" + if recommendations: + console.print("\n[bold green]💡 Recommendations:[/bold green]") + for rec in recommendations: + console.print(f" • {rec}") + + def _execute_removal(self, package: str, purge: bool = False) -> int: + """Execute the actual package removal with audit logging""" + import datetime + import subprocess + + cx_print(f"Removing '{package}'...", "info") + + # Initialize history for audit logging + history = InstallationHistory() + start_time = datetime.datetime.now() + operation_type = InstallationType.PURGE if purge else InstallationType.REMOVE + + # Build removal command (with -y since user already confirmed) + if purge: + cmd = ["sudo", "apt-get", "purge", "-y", package] + else: + cmd = ["sudo", "apt-get", "remove", "-y", package] + + # Record the operation start + try: + install_id = history.record_installation( + operation_type=operation_type, + packages=[package], + commands=[" ".join(cmd)], + start_time=start_time, + ) + except Exception as e: + self._debug(f"Failed to record installation start: {e}") + install_id = None + + try: + result = subprocess.run(cmd, capture_output=True, text=True, timeout=300) + + if result.returncode == 0: + self._print_success(f"'{package}' removed successfully") + + # Record successful removal + if install_id: + try: + history.update_installation(install_id, InstallationStatus.SUCCESS) + except Exception as e: + self._debug(f"Failed to update installation record: {e}") + + # Run autoremove to clean up orphaned packages + console.print() + cx_print("Running autoremove to clean up orphaned packages...", "info") + autoremove_cmd = ["sudo", "apt-get", "autoremove", "-y"] + autoremove_start = datetime.datetime.now() + + # Record autoremove operation start + autoremove_id = None + try: + autoremove_id = history.record_installation( + operation_type=InstallationType.REMOVE, + packages=[f"{package}-autoremove"], + commands=[" ".join(autoremove_cmd)], + start_time=autoremove_start, + ) + except Exception as e: + self._debug(f"Failed to record autoremove start: {e}") + + try: + autoremove_result = subprocess.run( + autoremove_cmd, + capture_output=True, + text=True, + timeout=300, + ) + + if autoremove_result.returncode == 0: + cx_print("Cleanup complete", "success") + if autoremove_id: + try: + history.update_installation( + autoremove_id, InstallationStatus.SUCCESS + ) + except Exception as e: + self._debug(f"Failed to update autoremove record: {e}") + else: + cx_print("Autoremove completed with warnings", "warning") + if autoremove_id: + try: + history.update_installation( + autoremove_id, + InstallationStatus.FAILED, + error_message=( + autoremove_result.stderr[:500] + if autoremove_result.stderr + else "Autoremove returned non-zero exit code" + ), + ) + except Exception as e: + self._debug(f"Failed to update autoremove record: {e}") + except subprocess.TimeoutExpired: + cx_print("Autoremove timed out", "warning") + if autoremove_id: + try: + history.update_installation( + autoremove_id, + InstallationStatus.FAILED, + error_message="Autoremove timed out after 300 seconds", + ) + except Exception: + pass + except Exception as e: + cx_print(f"Autoremove failed: {e}", "warning") + if autoremove_id: + try: + history.update_installation( + autoremove_id, + InstallationStatus.FAILED, + error_message=str(e)[:500], + ) + except Exception: + pass + + return 0 + else: + self._print_error(f"Removal failed: {result.stderr}") + # Record failed removal + if install_id: + try: + history.update_installation( + install_id, + InstallationStatus.FAILED, + error_message=result.stderr[:500], + ) + except Exception as e: + self._debug(f"Failed to update installation record: {e}") + return 1 + + except subprocess.TimeoutExpired: + self._print_error("Removal timed out") + # Record timeout failure + if install_id: + try: + history.update_installation( + install_id, + InstallationStatus.FAILED, + error_message="Operation timed out after 300 seconds", + ) + except Exception: + pass + return 1 + except Exception as e: + self._print_error(f"Removal failed: {e}") + # Record exception failure + if install_id: + try: + history.update_installation( + install_id, + InstallationStatus.FAILED, + error_message=str(e)[:500], + ) + except Exception: + pass + return 1 + + def cache_stats(self) -> int: + try: + from cortex.semantic_cache import SemanticCache + + cache = SemanticCache() + stats = cache.stats() + hit_rate_value = f"{stats.hit_rate * 100:.1f}" if stats.total else "0.0" + + cx_header(t("cache.stats_header")) + cx_print(f"{t('cache.hits')}: {stats.hits}", "info") + cx_print(f"{t('cache.misses')}: {stats.misses}", "info") + cx_print(t("cache.hit_rate", rate=hit_rate_value), "info") + cx_print(f"{t('cache.saved_calls')}: {stats.saved_calls}", "info") + return 0 + except (ImportError, OSError) as e: + self._print_error(t("cache.read_error", error=str(e))) + return 1 + except Exception as e: + self._print_error(t("cache.unexpected_error", error=str(e))) + if self.verbose: + import traceback + + traceback.print_exc() + return 1 + + def config(self, args: argparse.Namespace) -> int: + """Handle configuration commands including language settings.""" + action = getattr(args, "config_action", None) + + if not action: + cx_print(t("config.missing_subcommand"), "error") + return 1 + + if action == "language": + return self._config_language(args) + elif action == "show": + return self._config_show() + else: + self._print_error(t("config.unknown_action", action=action)) + return 1 + + def _config_language(self, args: argparse.Namespace) -> int: + """Handle language configuration.""" + lang_config = LanguageConfig() + + # List available languages + if getattr(args, "list", False): + cx_header(t("language.available")) + for code, info in SUPPORTED_LANGUAGES.items(): + current_marker = " ✓" if code == get_language() else "" + console.print( + f" [green]{code}[/green] - {info['name']} ({info['native']}){current_marker}" + ) + return 0 + + # Show language info + if getattr(args, "info", False): + info = lang_config.get_language_info() + cx_header(t("language.current")) + console.print(f" [bold]{info['name']}[/bold] ({info['native_name']})") + console.print(f" [dim]{t('config.code_label')}: {info['language']}[/dim]") + # Translate the source value using proper key mapping + source_translation_keys = { + "environment": "language.set_from_env", + "config": "language.set_from_config", + "auto-detected": "language.auto_detected", + "default": "language.default", + } + source = info.get("source", "") + source_key = source_translation_keys.get(source) + source_display = t(source_key) if source_key else source + console.print(f" [dim]{t('config.source_label')}: {source_display}[/dim]") + + if info.get("env_override"): + console.print(f" [dim]{t('language.set_from_env')}: {info['env_override']}[/dim]") + if info.get("detected_language"): + console.print( + f" [dim]{t('language.auto_detected')}: {info['detected_language']}[/dim]" + ) + return 0 + + # Set language + code = getattr(args, "code", None) + if not code: + # No code provided, show current language and list + current = get_language() + current_info = SUPPORTED_LANGUAGES.get(current, {}) + cx_print( + f"{t('language.current')}: {current_info.get('name', current)} " + f"({current_info.get('native', '')})", + "info", + ) + console.print() + console.print( + f"[dim]{t('language.supported_codes')}: {', '.join(SUPPORTED_LANGUAGES.keys())}[/dim]" + ) + console.print(f"[dim]{t('config.use_command_hint')}[/dim]") + console.print(f"[dim]{t('config.list_hint')}[/dim]") + return 0 + + # Handle 'auto' to clear saved preference + if code.lower() == "auto": + lang_config.clear_language() + from cortex.i18n.translator import reset_translator + + reset_translator() + new_lang = get_language() + new_info = SUPPORTED_LANGUAGES.get(new_lang, {}) + cx_print(t("language.changed", language=new_info.get("native", new_lang)), "success") + console.print(f"[dim]({t('language.auto_detected')})[/dim]") + return 0 + + # Validate and set language + code = code.lower() + if code not in SUPPORTED_LANGUAGES: + self._print_error(t("language.invalid_code", code=code)) + console.print( + f"[dim]{t('language.supported_codes')}: {', '.join(SUPPORTED_LANGUAGES.keys())}[/dim]" + ) + return 1 + + try: + lang_config.set_language(code) + # Reset the global translator to pick up the new language + from cortex.i18n.translator import reset_translator + + reset_translator() + set_language(code) + + lang_info = SUPPORTED_LANGUAGES[code] + cx_print(t("language.changed", language=lang_info["native"]), "success") + return 0 + except (ValueError, RuntimeError) as e: + self._print_error(t("language.set_failed", error=str(e))) + return 1 + + def _config_show(self) -> int: + """Show all current configuration.""" + cx_header(t("config.header")) + + # Language + lang_config = LanguageConfig() + lang_info = lang_config.get_language_info() + console.print(f"[bold]{t('config.language_label')}:[/bold]") + console.print( + f" {lang_info['name']} ({lang_info['native_name']}) " + f"[dim][{lang_info['language']}][/dim]" + ) + # Translate the source identifier to user-friendly text + source_translations = { + "environment": t("language.set_from_env"), + "config": t("language.set_from_config"), + "auto-detected": t("language.auto_detected"), + "default": t("language.default"), + } + source_display = source_translations.get(lang_info["source"], lang_info["source"]) + console.print(f" [dim]{t('config.source_label')}: {source_display}[/dim]") + console.print() + + # API Provider + provider = self._get_provider() + console.print(f"[bold]{t('config.llm_provider_label')}:[/bold]") + console.print(f" {provider}") + console.print() + + # Config paths + console.print(f"[bold]{t('config.config_paths_label')}:[/bold]") + console.print(f" {t('config.preferences_path')}: ~/.cortex/preferences.yaml") + console.print(f" {t('config.history_path')}: ~/.cortex/history.db") + console.print() + + return 0 + + def history(self, limit: int = 20, status: str | None = None, show_id: str | None = None): + """Show installation history""" history = InstallationHistory() try: @@ -1203,28 +2436,1012 @@ def rollback(self, install_id: str, dry_run: bool = False): self._print_success(message) return 0 else: - self._print_error(message) + self._print_error(message) + return 1 + except (ValueError, OSError) as e: + self._print_error(f"Rollback failed: {str(e)}") + return 1 + except Exception as e: + self._print_error(f"Unexpected rollback error: {str(e)}") + if self.verbose: + import traceback + + traceback.print_exc() + return 1 + + def status(self): + """Show comprehensive system status and run health checks""" + from cortex.doctor import SystemDoctor + + # Run the comprehensive system health checks + # This now includes all functionality from the old status command + # plus all the detailed health checks from doctor + doctor = SystemDoctor() + return doctor.run_checks() + + def update(self, args: argparse.Namespace) -> int: + """Handle the update command for self-updating Cortex.""" + from rich.progress import Progress, SpinnerColumn, TextColumn + + # Parse channel + channel_str = getattr(args, "channel", "stable") + try: + channel = UpdateChannel(channel_str) + except ValueError: + channel = UpdateChannel.STABLE + + updater = Updater(channel=channel) + + # Handle subcommands + action = getattr(args, "update_action", None) + + if action == "check" or (not action and getattr(args, "check", False)): + # Check for updates only + cx_print("Checking for updates...", "thinking") + result = updater.check_update_available(force=True) + + if result.error: + self._print_error(f"Update check failed: {result.error}") + return 1 + + console.print() + cx_print(f"Current version: [cyan]{result.current_version}[/cyan]", "info") + + if result.update_available and result.latest_release: + cx_print( + f"Update available: [green]{result.latest_version}[/green]", + "success", + ) + console.print() + console.print("[bold]Release notes:[/bold]") + console.print(result.latest_release.release_notes_summary) + console.print() + cx_print( + "Run [bold]cortex update install[/bold] to upgrade", + "info", + ) + else: + cx_print("Cortex is up to date!", "success") + + return 0 + + elif action == "install": + # Install update + target = getattr(args, "version", None) + dry_run = getattr(args, "dry_run", False) + + if dry_run: + cx_print("Dry run mode - no changes will be made", "warning") + + cx_header("Cortex Self-Update") + + def progress_callback(message: str, percent: float) -> None: + if percent >= 0: + cx_print(f"{message} ({percent:.0f}%)", "info") + else: + cx_print(message, "info") + + updater.progress_callback = progress_callback + + result = updater.update(target_version=target, dry_run=dry_run) + + console.print() + + if result.success: + if result.status == UpdateStatus.SUCCESS: + if result.new_version == result.previous_version: + cx_print("Already up to date!", "success") + else: + cx_print( + f"Updated: {result.previous_version} → {result.new_version}", + "success", + ) + if result.duration_seconds: + console.print(f"[dim]Completed in {result.duration_seconds:.1f}s[/dim]") + elif result.status == UpdateStatus.PENDING: + # Dry run + cx_print( + f"Would update: {result.previous_version} → {result.new_version}", + "info", + ) + return 0 + else: + if result.status == UpdateStatus.ROLLED_BACK: + cx_print("Update failed - rolled back to previous version", "warning") + else: + self._print_error(f"Update failed: {result.error}") + return 1 + + elif action == "rollback": + # Rollback to previous version + backup_id = getattr(args, "backup_id", None) + + backups = updater.list_backups() + + if not backups: + self._print_error("No backups available for rollback") + return 1 + + if backup_id: + # Find specific backup + target_backup = None + for b in backups: + if b.version == backup_id or str(b.path).endswith(backup_id): + target_backup = b + break + + if not target_backup: + self._print_error(f"Backup '{backup_id}' not found") + return 1 + + backup_path = target_backup.path + else: + # Use most recent backup + backup_path = backups[0].path + + cx_print(f"Rolling back to backup: {backup_path.name}", "info") + result = updater.rollback_to_backup(backup_path) + + if result.success: + cx_print( + f"Rolled back: {result.previous_version} → {result.new_version}", + "success", + ) + return 0 + else: + self._print_error(f"Rollback failed: {result.error}") + return 1 + + elif action == "list" or getattr(args, "list_releases", False): + # List available versions + from cortex.update_checker import UpdateChecker + + checker = UpdateChecker(channel=channel) + releases = checker.get_all_releases(limit=10) + + if not releases: + cx_print("No releases found", "warning") + return 1 + + cx_header(f"Available Releases ({channel.value} channel)") + + table = Table(show_header=True, header_style="bold cyan", box=None) + table.add_column("Version", style="green") + table.add_column("Date") + table.add_column("Channel") + table.add_column("Notes") + + current = get_version_string() + + for release in releases: + version_str = str(release.version) + if version_str == current: + version_str = f"{version_str} [dim](current)[/dim]" + + # Truncate notes + notes = release.name or release.body[:50] if release.body else "" + if len(notes) > 50: + notes = notes[:47] + "..." + + table.add_row( + version_str, + release.formatted_date, + release.version.channel.value, + notes, + ) + + console.print(table) + return 0 + + elif action == "backups": + # List backups + backups = updater.list_backups() + + if not backups: + cx_print("No backups available", "info") + return 0 + + cx_header("Available Backups") + + table = Table(show_header=True, header_style="bold cyan", box=None) + table.add_column("Version", style="green") + table.add_column("Date") + table.add_column("Size") + table.add_column("Path") + + for backup in backups: + # Format size + size_mb = backup.size_bytes / (1024 * 1024) + size_str = f"{size_mb:.1f} MB" + + # Format date + try: + dt = datetime.fromisoformat(backup.timestamp) + date_str = dt.strftime("%Y-%m-%d %H:%M") + except ValueError: + date_str = backup.timestamp[:16] + + table.add_row( + backup.version, + date_str, + size_str, + str(backup.path.name), + ) + + console.print(table) + console.print() + cx_print( + "Use [bold]cortex update rollback [/bold] to restore", + "info", + ) + return 0 + + else: + # Default: show current version and check for updates + cx_print(f"Current version: [cyan]{get_version_string()}[/cyan]", "info") + cx_print("Checking for updates...", "thinking") + + result = updater.check_update_available() + + if result.update_available and result.latest_release: + console.print() + cx_print( + f"Update available: [green]{result.latest_version}[/green]", + "success", + ) + console.print() + console.print("[bold]What's new:[/bold]") + console.print(result.latest_release.release_notes_summary) + console.print() + cx_print( + "Run [bold]cortex update install[/bold] to upgrade", + "info", + ) + else: + cx_print("Cortex is up to date!", "success") + + return 0 + + # Daemon Commands + # -------------------------- + + def daemon(self, args: argparse.Namespace) -> int: + """Handle daemon commands: install, uninstall, config, reload-config, version, ping, shutdown. + + Available commands: + - install/uninstall: Manage systemd service files (Python-side) + - config: Get daemon configuration via IPC + - reload-config: Reload daemon configuration via IPC + - version: Get daemon version via IPC + - ping: Test daemon connectivity via IPC + - shutdown: Request daemon shutdown via IPC + - run-tests: Run daemon test suite + """ + action = getattr(args, "daemon_action", None) + + if action == "install": + return self._daemon_install(args) + elif action == "uninstall": + return self._daemon_uninstall(args) + elif action == "config": + return self._daemon_config() + elif action == "reload-config": + return self._daemon_reload_config() + elif action == "version": + return self._daemon_version() + elif action == "ping": + return self._daemon_ping() + elif action == "shutdown": + return self._daemon_shutdown() + elif action == "run-tests": + return self._daemon_run_tests(args) + else: + cx_print("Usage: cortex daemon ", "info") + cx_print("", "info") + cx_print("Available commands:", "info") + cx_print(" install Install and enable the daemon service", "info") + cx_print(" uninstall Remove the daemon service", "info") + cx_print(" config Show daemon configuration", "info") + cx_print(" reload-config Reload daemon configuration", "info") + cx_print(" version Show daemon version", "info") + cx_print(" ping Test daemon connectivity", "info") + cx_print(" shutdown Request daemon shutdown", "info") + cx_print(" run-tests Run daemon test suite", "info") + return 0 + + def _update_history_on_failure( + self, history: InstallationHistory, install_id: str | None, error_msg: str + ) -> None: + """ + Helper method to update installation history on failure. + + Args: + history: InstallationHistory instance. + install_id: Installation ID to update, or None if not available. + error_msg: Error message to record. + """ + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + + def _daemon_ipc_call( + self, + operation_name: str, + ipc_func: "Callable[[DaemonClient], DaemonResponse]", + ) -> tuple[bool, "DaemonResponse | None"]: + """ + Helper method for daemon IPC calls with centralized error handling. + + Args: + operation_name: Human-readable name of the operation for error messages. + ipc_func: A callable that takes a DaemonClient and returns a DaemonResponse. + + Returns: + Tuple of (success: bool, response: DaemonResponse | None) + On error, response is None and an error message is printed. + """ + # Initialize audit logging + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + [f"daemon.{operation_name}"], + start_time, + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + + try: + from cortex.daemon_client import ( + DaemonClient, + DaemonConnectionError, + DaemonNotInstalledError, + DaemonResponse, + ) + + client = DaemonClient() + response = ipc_func(client) + + # Update history with success/failure + if install_id: + try: + if response and response.success: + history.update_installation(install_id, InstallationStatus.SUCCESS) + else: + error_msg = ( + response.error if response and response.error else "IPC call failed" + ) + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + + return True, response + + except DaemonNotInstalledError as e: + error_msg = str(e) + cx_print(f"{error_msg}", "error") + self._update_history_on_failure(history, install_id, error_msg) + return False, None + except DaemonConnectionError as e: + error_msg = str(e) + cx_print(f"{error_msg}", "error") + self._update_history_on_failure(history, install_id, error_msg) + return False, None + except ImportError: + error_msg = "Daemon client not available." + cx_print(error_msg, "error") + self._update_history_on_failure(history, install_id, error_msg) + return False, None + except Exception as e: + error_msg = f"Unexpected error during {operation_name}: {e}" + cx_print(error_msg, "error") + self._update_history_on_failure(history, install_id, error_msg) + return False, None + + def _daemon_install(self, args: argparse.Namespace) -> int: + """Install the cortexd daemon using setup_daemon.py.""" + import subprocess + from pathlib import Path + + cx_header("Installing Cortex Daemon") + + # Find setup_daemon.py + daemon_dir = Path(__file__).parent.parent / "daemon" + setup_script = daemon_dir / "scripts" / "setup_daemon.py" + + if not setup_script.exists(): + error_msg = f"Setup script not found at {setup_script}" + cx_print(error_msg, "error") + cx_print("Please ensure the daemon directory is present.", "error") + return 1 + + execute = getattr(args, "execute", False) + + if not execute: + cx_print("This will build and install the cortexd daemon.", "info") + cx_print("", "info") + cx_print("The setup wizard will:", "info") + cx_print(" 1. Check and install build dependencies", "info") + cx_print(" 2. Build the daemon from source", "info") + cx_print(" 3. Install systemd service files", "info") + cx_print(" 4. Enable and start the service", "info") + cx_print("", "info") + cx_print("Run with --execute to proceed:", "info") + cx_print(" cortex daemon install --execute", "dim") + # Don't record dry-runs in audit history + return 0 + + # Initialize audit logging only when execution will actually run + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + ["cortex daemon install"], + start_time, + ) + except Exception as e: + cx_print(f"Warning: Could not initialize audit logging: {e}", "warning") + + # Run setup_daemon.py + cx_print("Running daemon setup wizard...", "info") + try: + result = subprocess.run( + [sys.executable, str(setup_script)], + check=False, + ) + + # Record completion + if install_id: + try: + if result.returncode == 0: + history.update_installation(install_id, InstallationStatus.SUCCESS) + else: + error_msg = f"Setup script returned exit code {result.returncode}" + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + + return result.returncode + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during daemon install: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + except Exception as e: + error_msg = f"Unexpected error during daemon install: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + + def _daemon_uninstall(self, args: argparse.Namespace) -> int: + """Uninstall the cortexd daemon.""" + import subprocess + from pathlib import Path + + cx_header("Uninstalling Cortex Daemon") + + execute = getattr(args, "execute", False) + + if not execute: + cx_print("This will stop and remove the cortexd daemon.", "warning") + cx_print("", "info") + cx_print("This will:", "info") + cx_print(" 1. Stop the cortexd service", "info") + cx_print(" 2. Disable the service", "info") + cx_print(" 3. Remove systemd unit files", "info") + cx_print(" 4. Remove the daemon binary", "info") + cx_print("", "info") + cx_print("Run with --execute to proceed:", "info") + cx_print(" cortex daemon uninstall --execute", "dim") + # Don't record dry-runs in audit history + return 0 + + # Initialize audit logging only when execution will actually run + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + ["cortex daemon uninstall"], + start_time, + ) + except Exception as e: + cx_print(f"Warning: Could not initialize audit logging: {e}", "warning") + + # Find uninstall script + daemon_dir = Path(__file__).parent.parent / "daemon" + uninstall_script = daemon_dir / "scripts" / "uninstall.sh" + + if uninstall_script.exists(): + cx_print("Running uninstall script...", "info") + try: + # Security: Lock down script permissions before execution + # Set read-only permissions for non-root users to prevent tampering + import stat + + script_stat = uninstall_script.stat() + # Remove write permissions for group and others, keep owner read/execute + uninstall_script.chmod(stat.S_IRUSR | stat.S_IXUSR) + + result = subprocess.run( + ["sudo", "bash", str(uninstall_script)], + check=False, + capture_output=True, + text=True, + ) + + # Record completion + if install_id: + try: + if result.returncode == 0: + history.update_installation(install_id, InstallationStatus.SUCCESS) + else: + error_msg = f"Uninstall script returned exit code {result.returncode}" + if result.stderr: + error_msg += f": {result.stderr[:500]}" + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + + return result.returncode + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during daemon uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + except Exception as e: + error_msg = f"Unexpected error during daemon uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + else: + # Manual uninstall + cx_print("Running manual uninstall...", "info") + commands = [ + ["sudo", "systemctl", "stop", "cortexd"], + ["sudo", "systemctl", "disable", "cortexd"], + ["sudo", "rm", "-f", "/etc/systemd/system/cortexd.service"], + ["sudo", "rm", "-f", "/etc/systemd/system/cortexd.socket"], + ["sudo", "rm", "-f", "/usr/local/bin/cortexd"], + ["sudo", "systemctl", "daemon-reload"], + ] + + try: + any_failed = False + error_messages = [] + + for cmd in commands: + cmd_str = " ".join(cmd) + cx_print(f" Running: {cmd_str}", "dim") + + # Update installation history with command info (append to existing record) + if install_id: + try: + # Append command info to existing installation record + # instead of creating orphan records + history.update_installation( + install_id, + InstallationStatus.IN_PROGRESS, + f"Executing: {cmd_str}", + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + + result = subprocess.run(cmd, check=False, capture_output=True, text=True) + + # Track failures + if result.returncode != 0: + any_failed = True + error_msg = ( + f"Command '{cmd_str}' failed with return code {result.returncode}" + ) + if result.stderr: + error_msg += f": {result.stderr[:500]}" + error_messages.append(error_msg) + cx_print(f" Failed: {error_msg}", "error") + + # Update history and return based on overall success + if any_failed: + combined_error = "; ".join(error_messages) + cx_print("Daemon uninstall failed.", "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, combined_error + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + else: + cx_print("Daemon uninstalled.", "success") + # Record success + if install_id: + try: + history.update_installation(install_id, InstallationStatus.SUCCESS) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 0 + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during manual uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + except Exception as e: + error_msg = f"Unexpected error during manual uninstall: {str(e)}" + cx_print(error_msg, "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + + def _daemon_config(self) -> int: + """Get daemon configuration via IPC.""" + from rich.table import Table + + cx_header("Daemon Configuration") + + success, response = self._daemon_ipc_call("config.get", lambda c: c.config_get()) + if not success: + return 1 + + if response.success and response.result: + table = Table(title="Current Configuration", show_header=True) + table.add_column("Setting", style="cyan") + table.add_column("Value", style="green") + + for key, value in response.result.items(): + table.add_row(key, str(value)) + + console.print(table) + return 0 + else: + cx_print(f"Failed to get config: {response.error}", "error") + return 1 + + def _daemon_reload_config(self) -> int: + """Reload daemon configuration via IPC.""" + cx_header("Reloading Daemon Configuration") + + success, response = self._daemon_ipc_call("config.reload", lambda c: c.config_reload()) + if not success: + return 1 + + if response.success: + cx_print("Configuration reloaded successfully!", "success") + return 0 + else: + cx_print(f"Failed to reload config: {response.error}", "error") + return 1 + + def _daemon_version(self) -> int: + """Get daemon version via IPC.""" + cx_header("Daemon Version") + + success, response = self._daemon_ipc_call("version", lambda c: c.version()) + if not success: + return 1 + + if response.success and response.result: + name = response.result.get("name", "cortexd") + version = response.result.get("version", "unknown") + cx_print(f"{name} version {version}", "success") + return 0 + else: + cx_print(f"Failed to get version: {response.error}", "error") + return 1 + + def _daemon_ping(self) -> int: + """Test daemon connectivity via IPC.""" + import time + + cx_header("Daemon Ping") + + start = time.time() + success, response = self._daemon_ipc_call("ping", lambda c: c.ping()) + elapsed = (time.time() - start) * 1000 # ms + + if not success: + return 1 + + if response.success: + cx_print(f"Pong! Response time: {elapsed:.1f}ms", "success") + return 0 + else: + cx_print(f"Ping failed: {response.error}", "error") + return 1 + + def _daemon_shutdown(self) -> int: + """Request daemon shutdown via IPC.""" + cx_header("Requesting Daemon Shutdown") + + success, response = self._daemon_ipc_call("shutdown", lambda c: c.shutdown()) + if not success: + return 1 + + if response.success: + cx_print("Daemon shutdown requested successfully!", "success") + return 0 + cx_print(f"Failed to request shutdown: {response.error}", "error") + return 1 + + def _daemon_run_tests(self, args: argparse.Namespace) -> int: + """Run the daemon test suite.""" + import subprocess + + cx_header("Daemon Tests") + + # Initialize audit logging + history = InstallationHistory() + start_time = datetime.now(timezone.utc) + install_id = None + + try: + # Record operation start + install_id = history.record_installation( + InstallationType.CONFIG, + ["cortexd"], + ["daemon.run-tests"], + start_time, + ) + except Exception: + # Continue even if audit logging fails + pass + + # Find daemon directory + daemon_dir = Path(__file__).parent.parent / "daemon" + build_dir = daemon_dir / "build" + tests_dir = build_dir / "tests" # Test binaries are in build/tests/ + + # Define test binaries + unit_tests = [ + "test_config", + "test_protocol", + "test_rate_limiter", + "test_logger", + "test_common", + ] + integration_tests = ["test_ipc_server", "test_handlers", "test_daemon"] + all_tests = unit_tests + integration_tests + + # Check if tests are built + def check_tests_built() -> tuple[bool, list[str]]: + """Check which test binaries exist.""" + existing = [] + for test in all_tests: + if (tests_dir / test).exists(): + existing.append(test) + return len(existing) > 0, existing + + tests_built, existing_tests = check_tests_built() + + if not tests_built: + error_msg = "Tests are not built." + cx_print(error_msg, "warning") + cx_print("", "info") + cx_print("To build tests, run the setup wizard with test building enabled:", "info") + cx_print("", "info") + cx_print(" [bold]python daemon/scripts/setup_daemon.py[/bold]", "info") + cx_print("", "info") + cx_print("When prompted, answer 'yes' to build the test suite.", "info") + cx_print("", "info") + cx_print("Or build manually:", "info") + cx_print(" cd daemon && ./scripts/build.sh Release --with-tests", "dim") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.FAILED, error_msg) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + + # Determine which tests to run + test_filter = getattr(args, "test", None) + run_unit = getattr(args, "unit", False) + run_integration = getattr(args, "integration", False) + verbose = getattr(args, "verbose", False) + + tests_to_run = [] + + if test_filter: + # Run a specific test + # Allow partial matching (e.g., "config" matches "test_config") + test_name = test_filter if test_filter.startswith("test_") else f"test_{test_filter}" + if test_name in existing_tests: + tests_to_run = [test_name] + else: + error_msg = f"Test '{test_filter}' not found or not built." + cx_print(error_msg, "error") + cx_print("", "info") + cx_print("Available tests:", "info") + for t in existing_tests: + cx_print(f" • {t}", "info") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + elif run_unit and not run_integration: + tests_to_run = [t for t in unit_tests if t in existing_tests] + if not tests_to_run: + error_msg = "No unit tests built." + cx_print(error_msg, "warning") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + elif run_integration and not run_unit: + tests_to_run = [t for t in integration_tests if t in existing_tests] + if not tests_to_run: + error_msg = "No integration tests built." + cx_print(error_msg, "warning") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 1 + else: + # Run all available tests + tests_to_run = existing_tests + + # Show what we're running + cx_print(f"Running {len(tests_to_run)} test(s)...", "info") + cx_print("", "info") + + # Use ctest for running tests + ctest_args = ["ctest", "--output-on-failure"] + + if verbose: + ctest_args.append("-V") + + # Filter specific tests if not running all + if test_filter or run_unit or run_integration: + # ctest uses -R for regex filtering + test_regex = "|".join(tests_to_run) + ctest_args.extend(["-R", test_regex]) + + try: + result = subprocess.run( + ctest_args, + cwd=str(build_dir), + check=False, + ) + + if result.returncode == 0: + cx_print("", "info") + cx_print("All tests passed!", "success") + if install_id: + try: + history.update_installation(install_id, InstallationStatus.SUCCESS) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass + return 0 + else: + error_msg = f"Test execution failed with return code {result.returncode}" + cx_print("", "info") + cx_print("Some tests failed.", "error") + if install_id: + try: + history.update_installation( + install_id, InstallationStatus.FAILED, error_msg + ) + except Exception: + # Continue even if audit logging fails - don't break the main flow + pass return 1 - except (ValueError, OSError) as e: - self._print_error(f"Rollback failed: {str(e)}") + except subprocess.SubprocessError as e: + error_msg = f"Subprocess error during test execution: {str(e)}" + cx_print(error_msg, "error") + self._update_history_on_failure(history, install_id, error_msg) return 1 except Exception as e: - self._print_error(f"Unexpected rollback error: {str(e)}") - if self.verbose: - import traceback - - traceback.print_exc() + error_msg = f"Unexpected error during test execution: {str(e)}" + cx_print(error_msg, "error") + self._update_history_on_failure(history, install_id, error_msg) return 1 - def status(self): - """Show comprehensive system status and run health checks""" - from cortex.doctor import SystemDoctor + def benchmark(self, verbose: bool = False): + """Run AI performance benchmark and display scores""" + from cortex.benchmark import run_benchmark - # Run the comprehensive system health checks - # This now includes all functionality from the old status command - # plus all the detailed health checks from doctor - doctor = SystemDoctor() - return doctor.run_checks() + return run_benchmark(verbose=verbose) + + def systemd(self, service: str, action: str = "status", verbose: bool = False): + """Systemd service helper with plain English explanations""" + from cortex.systemd_helper import run_systemd_helper + + return run_systemd_helper(service, action, verbose) + + def gpu(self, action: str = "status", mode: str = None, verbose: bool = False): + """Hybrid GPU (Optimus) manager""" + from cortex.gpu_manager import run_gpu_manager + + return run_gpu_manager(action, mode, verbose) + + def printer(self, action: str = "status", verbose: bool = False): + """Printer/Scanner auto-setup""" + from cortex.printer_setup import run_printer_setup + + return run_printer_setup(action, verbose) def wizard(self): """Interactive setup wizard for API key configuration""" @@ -1245,7 +3462,7 @@ def env(self, args: argparse.Namespace) -> int: if not action: self._print_error( - "Please specify a subcommand (set/get/list/delete/export/import/clear/template)" + "Please specify a subcommand (set/get/list/delete/export/import/clear/template/audit/check/path)" ) return 1 @@ -1270,6 +3487,13 @@ def env(self, args: argparse.Namespace) -> int: return self._env_list_apps(env_mgr, args) elif action == "load": return self._env_load(env_mgr, args) + # Shell environment analyzer commands + elif action == "audit": + return self._env_audit(args) + elif action == "check": + return self._env_check(args) + elif action == "path": + return self._env_path(args) else: self._print_error(f"Unknown env subcommand: {action}") return 1 @@ -1463,7 +3687,9 @@ def _env_clear(self, env_mgr: EnvironmentManager, args: argparse.Namespace) -> i # Confirm unless --force is used if not force: - confirm = input(f"⚠️ Clear ALL environment variables for '{app}'? (y/n): ") + confirm = StdinHandler.get_input( + f"⚠️ Clear ALL environment variables for '{app}'? (y/n): " + ) if confirm.lower() != "y": cx_print("Operation cancelled", "info") return 0 @@ -1594,612 +3820,382 @@ def _env_load(self, env_mgr: EnvironmentManager, args: argparse.Namespace) -> in return 0 - # --- Do Command (manage do-mode runs) --- - def do_cmd(self, args: argparse.Namespace) -> int: - """Handle `cortex do` commands for managing do-mode runs.""" - from cortex.do_runner import DoHandler, ProtectedPathsManager, CortexUserManager - - action = getattr(args, "do_action", None) - - if not action: - cx_print("\n🔧 Do Mode - Execute commands to solve problems\n", "info") - console.print("Usage: cortex ask --do ") - console.print(" cortex do [options]") - console.print("\nCommands:") - console.print(" history [run_id] View do-mode run history") - console.print(" setup Setup cortex user for privilege management") - console.print(" protected Manage protected paths") - console.print("\nExample:") - console.print(" cortex ask --do 'Fix my nginx configuration'") - console.print(" cortex do history") - return 0 - - if action == "history": - return self._do_history(args) - elif action == "setup": - return self._do_setup() - elif action == "protected": - return self._do_protected(args) - else: - self._print_error(f"Unknown do action: {action}") - return 1 - - def _do_history(self, args: argparse.Namespace) -> int: - """Show do-mode run history.""" - from cortex.do_runner import DoHandler - - handler = DoHandler() - run_id = getattr(args, "run_id", None) - - if run_id: - # Show specific run details - run = handler.get_run(run_id) - if not run: - self._print_error(f"Run {run_id} not found") - return 1 - - # Get statistics from database - stats = handler.db.get_run_stats(run_id) - - console.print(f"\n[bold]Do Run: {run.run_id}[/bold]") - console.print("=" * 70) - - # Show session ID if available - session_id = getattr(run, "session_id", None) - if session_id: - console.print(f"[bold]Session:[/bold] [magenta]{session_id}[/magenta]") - - console.print(f"[bold]Query:[/bold] {run.user_query}") - console.print(f"[bold]Mode:[/bold] {run.mode.value}") - console.print(f"[bold]Started:[/bold] {run.started_at}") - console.print(f"[bold]Completed:[/bold] {run.completed_at}") - console.print(f"\n[bold]Summary:[/bold] {run.summary}") - - # Show statistics - if stats: - console.print(f"\n[bold cyan]📊 Command Statistics:[/bold cyan]") - total = stats.get("total_commands", 0) - success = stats.get("successful_commands", 0) - failed = stats.get("failed_commands", 0) - skipped = stats.get("skipped_commands", 0) - console.print(f" Total: {total} | [green]✓ Success: {success}[/green] | [red]✗ Failed: {failed}[/red] | [yellow]○ Skipped: {skipped}[/yellow]") - - if run.files_accessed: - console.print(f"\n[bold]Files Accessed:[/bold] {', '.join(run.files_accessed)}") - - # Get detailed commands from database - commands_detail = handler.db.get_run_commands(run_id) - - console.print(f"\n[bold cyan]📋 Commands Executed:[/bold cyan]") - console.print("-" * 70) - - if commands_detail: - for cmd in commands_detail: - status = cmd["status"] - if status == "success": - status_icon = "[green]✓[/green]" - elif status == "failed": - status_icon = "[red]✗[/red]" - elif status == "skipped": - status_icon = "[yellow]○[/yellow]" - else: - status_icon = "[dim]?[/dim]" - - console.print(f"\n{status_icon} [bold]Command {cmd['index'] + 1}:[/bold] {cmd['command']}") - console.print(f" [dim]Purpose:[/dim] {cmd['purpose']}") - console.print(f" [dim]Status:[/dim] {status} | [dim]Duration:[/dim] {cmd['duration']:.2f}s") - - if cmd["output"]: - console.print(f" [dim]Output:[/dim] {cmd['output']}") - if cmd["error"]: - console.print(f" [red]Error:[/red] {cmd['error']}") - else: - # Fallback to run.commands if database commands not available - for i, cmd in enumerate(run.commands): - status_icon = "[green]✓[/green]" if cmd.status.value == "success" else "[red]✗[/red]" - console.print(f"\n{status_icon} [bold]Command {i + 1}:[/bold] {cmd.command}") - console.print(f" [dim]Purpose:[/dim] {cmd.purpose}") - console.print(f" [dim]Status:[/dim] {cmd.status.value} | [dim]Duration:[/dim] {cmd.duration_seconds:.2f}s") - if cmd.output: - output_truncated = cmd.output[:250] + "..." if len(cmd.output) > 250 else cmd.output - console.print(f" [dim]Output:[/dim] {output_truncated}") - if cmd.error: - console.print(f" [red]Error:[/red] {cmd.error}") - - return 0 - - # List recent runs - limit = getattr(args, "limit", 20) - runs = handler.get_run_history(limit) - - if not runs: - cx_print("No do-mode runs found", "info") - return 0 - - # Group runs by session - sessions = {} - standalone_runs = [] - - for run in runs: - session_id = getattr(run, "session_id", None) - if session_id: - if session_id not in sessions: - sessions[session_id] = [] - sessions[session_id].append(run) - else: - standalone_runs.append(run) - - console.print(f"\n[bold]📜 Recent Do Runs:[/bold]") - console.print(f"[dim]Sessions: {len(sessions)} | Standalone runs: {len(standalone_runs)}[/dim]\n") - - import json as json_module - - # Show sessions first - for session_id, session_runs in sessions.items(): - console.print(f"[bold magenta]╭{'─' * 68}╮[/bold magenta]") - console.print(f"[bold magenta]│ 📂 Session: {session_id[:40]}...{' ' * 15}│[/bold magenta]") - console.print(f"[bold magenta]│ Runs: {len(session_runs)}{' ' * 57}│[/bold magenta]") - console.print(f"[bold magenta]╰{'─' * 68}╯[/bold magenta]") - - for run in session_runs: - self._display_run_summary(handler, run, indent=" ") - console.print() - - # Show standalone runs - if standalone_runs: - if sessions: - console.print(f"[bold cyan]{'─' * 70}[/bold cyan]") - console.print("[bold]📋 Standalone Runs (no session):[/bold]") - - for run in standalone_runs: - self._display_run_summary(handler, run) - - console.print(f"[dim]Use 'cortex do history ' for full details[/dim]") + # --- Shell Environment Analyzer Commands --- + def _env_audit(self, args: argparse.Namespace) -> int: + """Audit shell environment variables and show their sources.""" + from cortex.shell_env_analyzer import Shell, ShellEnvironmentAnalyzer + + shell = None + if hasattr(args, "shell") and args.shell: + shell = Shell(args.shell) + + analyzer = ShellEnvironmentAnalyzer(shell=shell) + include_system = not getattr(args, "no_system", False) + as_json = getattr(args, "json", False) + + audit = analyzer.audit(include_system=include_system) + + if as_json: + import json + + print(json.dumps(audit.to_dict(), indent=2)) + return 0 + + # Display audit results + cx_header(f"Environment Audit ({audit.shell.value} shell)") + + console.print("\n[bold]Config Files Scanned:[/bold]") + for f in audit.config_files_scanned: + console.print(f" • {f}") + + if audit.variables: + console.print("\n[bold]Variables with Definitions:[/bold]") + # Sort by number of sources (most definitions first) + sorted_vars = sorted(audit.variables.items(), key=lambda x: len(x[1]), reverse=True) + for var_name, sources in sorted_vars[:20]: # Limit to top 20 + console.print(f"\n [cyan]{var_name}[/cyan] ({len(sources)} definition(s))") + for src in sources: + console.print(f" [dim]{src.file}:{src.line_number}[/dim]") + # Show truncated value + val_preview = src.value[:50] + "..." if len(src.value) > 50 else src.value + console.print(f" → {val_preview}") + + if len(audit.variables) > 20: + console.print(f"\n [dim]... and {len(audit.variables) - 20} more variables[/dim]") + + if audit.conflicts: + console.print("\n[bold]⚠️ Conflicts Detected:[/bold]") + for conflict in audit.conflicts: + severity_color = { + "info": "blue", + "warning": "yellow", + "error": "red", + }.get(conflict.severity.value, "white") + console.print( + f" [{severity_color}]{conflict.severity.value.upper()}[/{severity_color}]: {conflict.description}" + ) + + console.print(f"\n[dim]Total: {len(audit.variables)} variable(s) found[/dim]") return 0 - - def _display_run_summary(self, handler, run, indent: str = "") -> None: - """Display a single run summary.""" - stats = handler.db.get_run_stats(run.run_id) - if stats: - total = stats.get("total_commands", 0) - success = stats.get("successful_commands", 0) - failed = stats.get("failed_commands", 0) - status_str = f"[green]✓{success}[/green]/[red]✗{failed}[/red]/{total}" - else: - cmd_count = len(run.commands) - success_count = sum(1 for c in run.commands if c.status.value == "success") - failed_count = sum(1 for c in run.commands if c.status.value == "failed") - status_str = f"[green]✓{success_count}[/green]/[red]✗{failed_count}[/red]/{cmd_count}" - - commands_list = handler.db.get_commands_list(run.run_id) - - console.print(f"{indent}[bold cyan]{'─' * 60}[/bold cyan]") - console.print(f"{indent}[bold]Run ID:[/bold] {run.run_id}") - console.print(f"{indent}[bold]Query:[/bold] {run.user_query[:60]}{'...' if len(run.user_query) > 60 else ''}") - console.print(f"{indent}[bold]Status:[/bold] {status_str} | [bold]Started:[/bold] {run.started_at[:19] if run.started_at else '-'}") - - if commands_list and len(commands_list) <= 3: - console.print(f"{indent}[bold]Commands:[/bold] {', '.join(cmd[:30] for cmd in commands_list)}") - elif commands_list: - console.print(f"{indent}[bold]Commands:[/bold] {len(commands_list)} commands") - - def _do_setup(self) -> int: - """Setup cortex user for privilege management.""" - from cortex.do_runner import CortexUserManager - - cx_print("Setting up Cortex user for privilege management...", "info") - - if CortexUserManager.user_exists(): - cx_print("✓ Cortex user already exists", "success") - return 0 - - success, message = CortexUserManager.create_user() - if success: - cx_print(f"✓ {message}", "success") + + def _env_check(self, args: argparse.Namespace) -> int: + """Check for environment variable conflicts and issues.""" + from cortex.shell_env_analyzer import Shell, ShellEnvironmentAnalyzer + + shell = None + if hasattr(args, "shell") and args.shell: + shell = Shell(args.shell) + + analyzer = ShellEnvironmentAnalyzer(shell=shell) + audit = analyzer.audit() + + cx_header(f"Environment Health Check ({audit.shell.value})") + + issues_found = 0 + + # Check for conflicts + if audit.conflicts: + console.print("\n[bold]Variable Conflicts:[/bold]") + for conflict in audit.conflicts: + issues_found += 1 + severity_color = { + "info": "blue", + "warning": "yellow", + "error": "red", + }.get(conflict.severity.value, "white") + console.print( + f" [{severity_color}]●[/{severity_color}] {conflict.variable_name}: {conflict.description}" + ) + for src in conflict.sources: + console.print(f" [dim]• {src.file}:{src.line_number}[/dim]") + + # Check PATH + duplicates = analyzer.get_path_duplicates() + missing = analyzer.get_missing_paths() + + if duplicates: + console.print("\n[bold]PATH Duplicates:[/bold]") + for dup in duplicates: + issues_found += 1 + console.print(f" [yellow]●[/yellow] {dup}") + + if missing: + console.print("\n[bold]Missing PATH Entries:[/bold]") + for m in missing: + issues_found += 1 + console.print(f" [red]●[/red] {m}") + + if issues_found == 0: + cx_print("\n✓ No issues found! Environment looks healthy.", "success") return 0 else: - self._print_error(message) - return 1 - - def _do_protected(self, args: argparse.Namespace) -> int: - """Manage protected paths.""" - from cortex.do_runner import ProtectedPathsManager - - manager = ProtectedPathsManager() - - add_path = getattr(args, "add", None) - remove_path = getattr(args, "remove", None) - list_paths = getattr(args, "list", False) - - if add_path: - manager.add_protected_path(add_path) - cx_print(f"✓ Added '{add_path}' to protected paths", "success") - return 0 - - if remove_path: - if manager.remove_protected_path(remove_path): - cx_print(f"✓ Removed '{remove_path}' from protected paths", "success") - else: - self._print_error(f"Path '{remove_path}' not found in user-defined protected paths") - return 0 - - # Default: list all protected paths - paths = manager.get_all_protected() - console.print("\n[bold]Protected Paths:[/bold]") - console.print("[dim](These paths require user confirmation for access)[/dim]\n") - - for path in paths: - is_system = path in manager.SYSTEM_PROTECTED_PATHS - tag = "[system]" if is_system else "[user]" - console.print(f" {path} [dim]{tag}[/dim]") - - console.print(f"\n[dim]Total: {len(paths)} paths[/dim]") - console.print("[dim]Use --add to add custom paths[/dim]") - return 0 + console.print(f"\n[yellow]Found {issues_found} issue(s)[/yellow]") + cx_print("Run 'cortex env path dedupe' to fix PATH duplicates", "info") + return 1 - # --- Info Command --- - def info_cmd(self, args: argparse.Namespace) -> int: - """Get system and application information using read-only commands.""" - from rich.panel import Panel - from rich.table import Table - - try: - from cortex.system_info_generator import ( - SystemInfoGenerator, - get_system_info_generator, - COMMON_INFO_COMMANDS, - APP_INFO_TEMPLATES, - ) - except ImportError as e: - self._print_error(f"System info generator not available: {e}") + def _env_path(self, args: argparse.Namespace) -> int: + """Handle PATH management subcommands.""" + from cortex.shell_env_analyzer import Shell, ShellEnvironmentAnalyzer + + path_action = getattr(args, "path_action", None) + + if not path_action: + self._print_error("Please specify a path action (list/add/remove/dedupe/clean)") return 1 - - debug = getattr(args, "debug", False) - - # Handle --list - if getattr(args, "list", False): - console.print("\n[bold]📊 Available Information Types[/bold]\n") - - console.print("[bold cyan]Quick Info Types (--quick):[/bold cyan]") - for name in sorted(COMMON_INFO_COMMANDS.keys()): - console.print(f" • {name}") - - console.print("\n[bold cyan]Application Templates (--app):[/bold cyan]") - for name in sorted(APP_INFO_TEMPLATES.keys()): - aspects = ", ".join(APP_INFO_TEMPLATES[name].keys()) - console.print(f" • {name}: [dim]{aspects}[/dim]") - - console.print("\n[bold cyan]Categories (--category):[/bold cyan]") - console.print(" hardware, software, network, services, security, storage, performance, configuration") - - console.print("\n[dim]Examples:[/dim]") - console.print(" cortex info --quick cpu") - console.print(" cortex info --app nginx") - console.print(" cortex info --category hardware") - console.print(" cortex info What version of Python is installed?") - return 0 - - # Handle --quick - quick_type = getattr(args, "quick", None) - if quick_type: - console.print(f"\n[bold]🔍 Quick Info: {quick_type.upper()}[/bold]\n") - - if quick_type in COMMON_INFO_COMMANDS: - for cmd_info in COMMON_INFO_COMMANDS[quick_type]: - from cortex.ask import CommandValidator - success, stdout, stderr = CommandValidator.execute_command(cmd_info.command) - - if success and stdout: - console.print(Panel( - stdout[:1000] + ("..." if len(stdout) > 1000 else ""), - title=f"[cyan]{cmd_info.purpose}[/cyan]", - subtitle=f"[dim]{cmd_info.command[:60]}...[/dim]" if len(cmd_info.command) > 60 else f"[dim]{cmd_info.command}[/dim]", - )) - elif stderr: - console.print(f"[yellow]⚠ {cmd_info.purpose}: {stderr[:100]}[/yellow]") - else: - self._print_error(f"Unknown quick info type: {quick_type}") - return 1 - return 0 - - # Handle --app - app_name = getattr(args, "app", None) - if app_name: - console.print(f"\n[bold]📦 Application Info: {app_name.upper()}[/bold]\n") - - if app_name.lower() in APP_INFO_TEMPLATES: - templates = APP_INFO_TEMPLATES[app_name.lower()] - for aspect, commands in templates.items(): - console.print(f"[bold cyan]─── {aspect.upper()} ───[/bold cyan]") - for cmd_info in commands: - from cortex.ask import CommandValidator - success, stdout, stderr = CommandValidator.execute_command(cmd_info.command, timeout=15) - - if success and stdout: - output = stdout[:500] + ("..." if len(stdout) > 500 else "") - console.print(f"[dim]{cmd_info.purpose}:[/dim]") - console.print(output) - elif stderr: - console.print(f"[yellow]{cmd_info.purpose}: {stderr[:100]}[/yellow]") - console.print() - else: - # Try using LLM for unknown apps - api_key = self._get_api_key() - if api_key: - try: - generator = SystemInfoGenerator( - api_key=api_key, - provider=self._get_provider(), - debug=debug, - ) - result = generator.get_app_info(app_name) - console.print(result.answer) - except Exception as e: - self._print_error(f"Could not get info for {app_name}: {e}") - return 1 - else: - self._print_error(f"Unknown app '{app_name}' and no API key for LLM lookup") - return 1 + + shell = None + if hasattr(args, "shell") and args.shell: + shell = Shell(args.shell) + + analyzer = ShellEnvironmentAnalyzer(shell=shell) + + if path_action == "list": + return self._env_path_list(analyzer, args) + elif path_action == "add": + return self._env_path_add(analyzer, args) + elif path_action == "remove": + return self._env_path_remove(analyzer, args) + elif path_action == "dedupe": + return self._env_path_dedupe(analyzer, args) + elif path_action == "clean": + return self._env_path_clean(analyzer, args) + else: + self._print_error(f"Unknown path action: {path_action}") + return 1 + + def _env_path_list(self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace) -> int: + """List PATH entries with status.""" + as_json = getattr(args, "json", False) + + current_path = os.environ.get("PATH", "") + entries = current_path.split(os.pathsep) + + # Get analysis + audit = analyzer.audit() + + if as_json: + import json + + print(json.dumps([e.to_dict() for e in audit.path_entries], indent=2)) return 0 - - # Handle --category - category = getattr(args, "category", None) - if category: - console.print(f"\n[bold]📊 Category Info: {category.upper()}[/bold]\n") - - api_key = self._get_api_key() - if not api_key: - # Fall back to running common commands without LLM - category_mapping = { - "hardware": ["cpu", "memory", "disk", "gpu"], - "software": ["os", "kernel"], - "network": ["network", "dns"], - "services": ["services"], - "security": ["security"], - "storage": ["disk"], - "performance": ["cpu", "memory", "processes"], - "configuration": ["environment"], - } - aspects = category_mapping.get(category, []) - for aspect in aspects: - if aspect in COMMON_INFO_COMMANDS: - console.print(f"[bold cyan]─── {aspect.upper()} ───[/bold cyan]") - for cmd_info in COMMON_INFO_COMMANDS[aspect]: - from cortex.ask import CommandValidator - success, stdout, _ = CommandValidator.execute_command(cmd_info.command) - if success and stdout: - console.print(stdout[:400]) - console.print() - return 0 - + + cx_header("PATH Entries") + + seen: set = set() + for i, entry in enumerate(entries, 1): + if not entry: + continue + + status_icons = [] + + # Check if exists + if not Path(entry).exists(): + status_icons.append("[red]✗ missing[/red]") + + # Check if duplicate + if entry in seen: + status_icons.append("[yellow]⚠ duplicate[/yellow]") + seen.add(entry) + + status = " ".join(status_icons) if status_icons else "[green]✓[/green]" + console.print(f" {i:2d}. {entry} {status}") + + duplicates = analyzer.get_path_duplicates() + missing = analyzer.get_missing_paths() + + console.print() + console.print( + f"[dim]Total: {len(entries)} entries, {len(duplicates)} duplicates, {len(missing)} missing[/dim]" + ) + + return 0 + + def _env_path_add(self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace) -> int: + """Add a path entry.""" + import os + from pathlib import Path + + new_path = args.path + prepend = not getattr(args, "append", False) + persist = getattr(args, "persist", False) + + # Resolve to absolute path + new_path = str(Path(new_path).expanduser().resolve()) + + if persist: + # When persisting, check the config file, not current PATH try: - generator = SystemInfoGenerator( - api_key=api_key, - provider=self._get_provider(), - debug=debug, - ) - result = generator.get_structured_info(category) - console.print(result.answer) + config_path = analyzer.get_shell_config_path() + # Check if already in config + config_content = "" + if os.path.exists(config_path): + with open(config_path) as f: + config_content = f.read() + + # Check if path is in a cortex-managed block + if ( + f'export PATH="{new_path}:$PATH"' in config_content + or f'export PATH="$PATH:{new_path}"' in config_content + ): + cx_print(f"'{new_path}' is already in {config_path}", "info") + return 0 + + analyzer.add_path_to_config(new_path, prepend=prepend) + cx_print(f"✓ Added '{new_path}' to {config_path}", "success") + console.print(f"[dim]To use in current shell: source {config_path}[/dim]") except Exception as e: - self._print_error(f"Could not get category info: {e}") - return 1 - return 0 - - # Handle natural language query - query_parts = getattr(args, "query", []) - if query_parts: - query = " ".join(query_parts) - console.print(f"\n[bold]🔍 System Info Query[/bold]\n") - console.print(f"[dim]Query: {query}[/dim]\n") - - api_key = self._get_api_key() - if not api_key: - self._print_error("Natural language queries require an API key. Use --quick or --app instead.") + self._print_error(f"Failed to persist: {e}") return 1 - + else: + # Check if already in current PATH (for non-persist mode) + current_path = os.environ.get("PATH", "") + if new_path in current_path.split(os.pathsep): + cx_print(f"'{new_path}' is already in PATH", "info") + return 0 + + # Only modify current process env (won't persist across commands) + updated = analyzer.safe_add_path(new_path, prepend=prepend) + os.environ["PATH"] = updated + position = "prepended to" if prepend else "appended to" + cx_print(f"✓ '{new_path}' {position} PATH (this process only)", "success") + cx_print("Note: Add --persist to make this permanent", "info") + + return 0 + + def _env_path_remove( + self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace + ) -> int: + """Remove a path entry.""" + import os + + target_path = args.path + persist = getattr(args, "persist", False) + + if persist: + # When persisting, remove from config file try: - generator = SystemInfoGenerator( - api_key=api_key, - provider=self._get_provider(), - debug=debug, - ) - result = generator.get_info(query) - - console.print(Panel(result.answer, title="[bold green]Answer[/bold green]")) - - if debug and result.commands_executed: - table = Table(title="Commands Executed") - table.add_column("Command", style="cyan", max_width=50) - table.add_column("Status", style="green") - table.add_column("Time", style="dim") - for cmd in result.commands_executed: - status = "✓" if cmd.success else "✗" - table.add_row( - cmd.command[:50] + "..." if len(cmd.command) > 50 else cmd.command, - status, - f"{cmd.execution_time:.2f}s" - ) - console.print(table) - + config_path = analyzer.get_shell_config_path() + result = analyzer.remove_path_from_config(target_path) + if result: + cx_print(f"✓ Removed '{target_path}' from {config_path}", "success") + console.print(f"[dim]To update current shell: source {config_path}[/dim]") + else: + cx_print(f"'{target_path}' was not in cortex-managed config block", "info") except Exception as e: - self._print_error(f"Query failed: {e}") - if debug: - import traceback - traceback.print_exc() + self._print_error(f"Failed to persist removal: {e}") return 1 - return 0 - - # No arguments - show help - console.print("\n[bold]📊 Cortex Info - System Information Generator[/bold]\n") - console.print("Get system and application information using read-only commands.\n") - console.print("[bold cyan]Usage:[/bold cyan]") - console.print(" cortex info --list List available info types") - console.print(" cortex info --quick Quick lookup (cpu, memory, etc.)") - console.print(" cortex info --app Application info (nginx, docker, etc.)") - console.print(" cortex info --category Category info (hardware, network, etc.)") - console.print(" cortex info Natural language query (requires API key)") - console.print("\n[bold cyan]Examples:[/bold cyan]") - console.print(" cortex info --quick memory") - console.print(" cortex info --app nginx") - console.print(" cortex info --category hardware") - console.print(" cortex info What Python packages are installed?") + else: + # Only modify current process env (won't persist across commands) + current_path = os.environ.get("PATH", "") + if target_path not in current_path.split(os.pathsep): + cx_print(f"'{target_path}' is not in current PATH", "info") + return 0 + + updated = analyzer.safe_remove_path(target_path) + os.environ["PATH"] = updated + cx_print(f"✓ Removed '{target_path}' from PATH (this process only)", "success") + cx_print("Note: Add --persist to make this permanent", "info") + return 0 - # --- Watch Command --- - def watch_cmd(self, args: argparse.Namespace) -> int: - """Manage terminal watching for manual intervention mode.""" - from rich.panel import Panel - from cortex.do_runner.terminal import TerminalMonitor - - monitor = TerminalMonitor(use_llm=False) - system_wide = getattr(args, "system", False) - as_service = getattr(args, "service", False) - - if getattr(args, "install", False): - if as_service: - # Install as systemd service - console.print("\n[bold cyan]🔧 Installing Cortex Watch Service[/bold cyan]") - console.print("[dim]This will create a systemd user service that runs automatically[/dim]\n") - - from cortex.watch_service import install_service - success, msg = install_service() - console.print(msg) - return 0 if success else 1 - elif system_wide: - console.print("\n[bold cyan]🔧 Installing System-Wide Terminal Watch Hook[/bold cyan]") - console.print("[dim]This will install to /etc/profile.d/ (requires sudo)[/dim]\n") - success, msg = monitor.setup_system_wide_watch() - if success: - console.print(f"[green]{msg}[/green]") - console.print("\n[bold green]✓ All new terminals will automatically have Cortex watching![/bold green]") - else: - console.print(f"[red]✗ {msg}[/red]") - return 1 - else: - console.print("\n[bold cyan]🔧 Installing Terminal Watch Hook[/bold cyan]\n") - success, msg = monitor.setup_auto_watch(permanent=True) - if success: - console.print(f"[green]✓ {msg}[/green]") - console.print("\n[yellow]Note: New terminals will have the hook automatically.[/yellow]") - console.print("[yellow]For existing terminals, run:[/yellow]") - console.print(f"[green]source ~/.cortex/watch_hook.sh[/green]") - console.print("\n[dim]Tip: For automatic activation in ALL terminals, run:[/dim]") - console.print("[cyan]cortex watch --install --system[/cyan]") - else: - console.print(f"[red]✗ {msg}[/red]") - return 1 + def _env_path_dedupe( + self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace + ) -> int: + """Remove duplicate PATH entries.""" + import os + + dry_run = getattr(args, "dry_run", False) + persist = getattr(args, "persist", False) + + duplicates = analyzer.get_path_duplicates() + + if not duplicates: + cx_print("✓ No duplicate PATH entries found", "success") return 0 - - if getattr(args, "uninstall", False): - if as_service: - console.print("\n[bold cyan]🔧 Removing Cortex Watch Service[/bold cyan]\n") - from cortex.watch_service import uninstall_service - success, msg = uninstall_service() - elif system_wide: - console.print("\n[bold cyan]🔧 Removing System-Wide Terminal Watch Hook[/bold cyan]\n") - success, msg = monitor.uninstall_system_wide_watch() - else: - console.print("\n[bold cyan]🔧 Removing Terminal Watch Hook[/bold cyan]\n") - success, msg = monitor.remove_auto_watch() - if success: - console.print(f"[green]{msg}[/green]") - else: - console.print(f"[red]✗ {msg}[/red]") - return 1 + + cx_header("PATH Deduplication") + console.print(f"[yellow]Found {len(duplicates)} duplicate(s):[/yellow]") + for dup in duplicates: + console.print(f" • {dup}") + + if dry_run: + console.print("\n[dim]Dry run - no changes made[/dim]") + clean_path = analyzer.dedupe_path() + console.print("\n[bold]Cleaned PATH would be:[/bold]") + for entry in clean_path.split(os.pathsep)[:10]: + console.print(f" {entry}") + if len(clean_path.split(os.pathsep)) > 10: + console.print(" [dim]... and more[/dim]") return 0 - - if getattr(args, "test", False): - console.print("\n[bold cyan]🧪 Testing Terminal Monitoring[/bold cyan]\n") - monitor.test_monitoring() - return 0 - - if getattr(args, "status", False): - console.print("\n[bold cyan]📊 Terminal Watch Status[/bold cyan]\n") - - from pathlib import Path - bashrc = Path.home() / ".bashrc" - zshrc = Path.home() / ".zshrc" - source_file = Path.home() / ".cortex" / "watch_hook.sh" - watch_log = Path.home() / ".cortex" / "terminal_watch.log" - system_hook = Path("/etc/profile.d/cortex-watch.sh") - service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" - - console.print("[bold]Service Status:[/bold]") - - # Check systemd service - if service_file.exists(): - try: - result = subprocess.run( - ["systemctl", "--user", "is-active", "cortex-watch.service"], - capture_output=True, text=True, timeout=5 - ) - is_active = result.stdout.strip() == "active" - if is_active: - console.print(" [bold green]✓ SYSTEMD SERVICE RUNNING[/bold green]") - console.print(" [dim]Automatic terminal monitoring active[/dim]") - else: - console.print(" [yellow]○ Systemd service installed but not running[/yellow]") - console.print(" [dim]Run: systemctl --user start cortex-watch[/dim]") - except Exception: - console.print(" [yellow]○ Systemd service installed (status unknown)[/yellow]") - else: - console.print(" [dim]○ Systemd service not installed[/dim]") - console.print(" [dim]Run: cortex watch --install --service (recommended)[/dim]") - - console.print() - console.print("[bold]Hook Status:[/bold]") - - # System-wide check - if system_hook.exists(): - console.print(" [green]✓ System-wide hook installed[/green]") - else: - console.print(" [dim]○ System-wide hook not installed[/dim]") - - # User-level checks - if bashrc.exists() and "Cortex Terminal Watch Hook" in bashrc.read_text(): - console.print(" [green]✓ Hook installed in .bashrc[/green]") - else: - console.print(" [dim]○ Not installed in .bashrc[/dim]") - - if zshrc.exists() and "Cortex Terminal Watch Hook" in zshrc.read_text(): - console.print(" [green]✓ Hook installed in .zshrc[/green]") - else: - console.print(" [dim]○ Not installed in .zshrc[/dim]") - - console.print("\n[bold]Watch Log:[/bold]") - if watch_log.exists(): - size = watch_log.stat().st_size - lines = len(watch_log.read_text().strip().split('\n')) if size > 0 else 0 - console.print(f" [green]✓ Log file exists: {watch_log}[/green]") - console.print(f" [dim] Size: {size} bytes, {lines} commands logged[/dim]") - else: - console.print(f" [dim]○ No log file yet (created when commands are run)[/dim]") - + + # Apply deduplication + clean_path = analyzer.dedupe_path() + os.environ["PATH"] = clean_path + cx_print(f"✓ Removed {len(duplicates)} duplicate(s) from PATH (current session)", "success") + + if persist: + script = analyzer.generate_path_fix_script() + console.print("\n[bold]Add this to your shell config for persistence:[/bold]") + console.print(f"[dim]{script}[/dim]") + + return 0 + + def _env_path_clean( + self, analyzer: "ShellEnvironmentAnalyzer", args: argparse.Namespace + ) -> int: + """Clean PATH by removing duplicates and optionally missing paths.""" + import os + + remove_missing = getattr(args, "remove_missing", False) + dry_run = getattr(args, "dry_run", False) + + duplicates = analyzer.get_path_duplicates() + missing = analyzer.get_missing_paths() if remove_missing else [] + + total_issues = len(duplicates) + len(missing) + + if total_issues == 0: + cx_print("✓ PATH is already clean", "success") return 0 - - # Default: show help - console.print() - console.print(Panel( - "[bold cyan]Terminal Watch[/bold cyan] - Real-time monitoring for manual intervention mode\n\n" - "When Cortex enters manual intervention mode, it watches your other terminals\n" - "to provide real-time feedback and AI-powered suggestions.\n\n" - "[bold]Commands:[/bold]\n" - " [cyan]cortex watch --install --service[/cyan] Install as systemd service (RECOMMENDED)\n" - " [cyan]cortex watch --install --system[/cyan] Install system-wide hook (requires sudo)\n" - " [cyan]cortex watch --install[/cyan] Install hook to .bashrc/.zshrc\n" - " [cyan]cortex watch --uninstall --service[/cyan] Remove systemd service\n" - " [cyan]cortex watch --status[/cyan] Show installation status\n" - " [cyan]cortex watch --test[/cyan] Test monitoring setup\n\n" - "[bold green]Recommended Setup:[/bold green]\n" - " Run [green]cortex watch --install --service[/green]\n\n" - " This creates a background service that:\n" - " • Starts automatically on login\n" - " • Restarts if it crashes\n" - " • Monitors ALL terminal activity\n" - " • No manual setup in each terminal!", - title="[green]🔍 Cortex Watch[/green]", - border_style="cyan", - )) + + cx_header("PATH Cleanup") + + if duplicates: + console.print(f"[yellow]Duplicates ({len(duplicates)}):[/yellow]") + for d in duplicates[:5]: + console.print(f" • {d}") + if len(duplicates) > 5: + console.print(f" [dim]... and {len(duplicates) - 5} more[/dim]") + + if missing: + console.print(f"\n[red]Missing paths ({len(missing)}):[/red]") + for m in missing[:5]: + console.print(f" • {m}") + if len(missing) > 5: + console.print(f" [dim]... and {len(missing) - 5} more[/dim]") + + if dry_run: + clean_path = analyzer.clean_path(remove_missing=remove_missing) + console.print("\n[dim]Dry run - no changes made[/dim]") + console.print( + f"[bold]Would reduce PATH from {len(os.environ.get('PATH', '').split(os.pathsep))} to {len(clean_path.split(os.pathsep))} entries[/bold]" + ) + return 0 + + # Apply cleanup + clean_path = analyzer.clean_path(remove_missing=remove_missing) + old_count = len(os.environ.get("PATH", "").split(os.pathsep)) + new_count = len(clean_path.split(os.pathsep)) + os.environ["PATH"] = clean_path + + cx_print(f"✓ Cleaned PATH: {old_count} → {new_count} entries", "success") + + # Show fix script + script = analyzer.generate_path_fix_script() + if "no fixes needed" not in script: + console.print("\n[bold]To make permanent, add to your shell config:[/bold]") + console.print(f"[dim]{script}[/dim]") + return 0 # --- Import Dependencies Command --- @@ -2314,7 +4310,7 @@ def _import_all(self, importer: DependencyImporter, execute: bool, include_dev: # Execute mode - confirm before installing total = total_packages + total_dev_packages - confirm = input(f"\nInstall all {total} packages? [Y/n]: ") + confirm = StdinHandler.get_input(f"\nInstall all {total} packages? [Y/n]: ") if confirm.lower() not in ["", "y", "yes"]: cx_print("Installation cancelled", "info") return 0 @@ -2333,7 +4329,6 @@ def _display_parse_result(self, result: ParseResult, include_dev: bool) -> None: } ecosystem_name = ecosystem_names.get(result.ecosystem, "Unknown") - filename = os.path.basename(result.file_path) cx_print(f"\n📋 Found {result.prod_count} {ecosystem_name} packages", "info") @@ -2387,62 +4382,241 @@ def progress_callback(current: int, total: int, step: InstallationStep) -> None: progress_callback=progress_callback, ) - result = coordinator.execute() + result = coordinator.execute() + + if result.success: + self._print_success(f"{ecosystem_name} packages installed successfully!") + console.print(f"Completed in {result.total_duration:.2f} seconds") + return 0 + else: + self._print_error(self.INSTALL_FAIL_MSG) + if result.error_message: + console.print(f"Error: {result.error_message}", style="red") + return 1 + + def _execute_multi_install(self, commands: list[dict[str, str]]) -> int: + """Execute multiple install commands.""" + all_commands = [cmd["command"] for cmd in commands] + all_descriptions = [cmd["description"] for cmd in commands] + + def progress_callback(current: int, total: int, step: InstallationStep) -> None: + status_emoji = "⏳" + if step.status == StepStatus.SUCCESS: + status_emoji = "✅" + elif step.status == StepStatus.FAILED: + status_emoji = "❌" + console.print(f"\n[{current}/{total}] {status_emoji} {step.description}") + console.print(f" Command: {step.command}") + + coordinator = InstallationCoordinator( + commands=all_commands, + descriptions=all_descriptions, + timeout=600, + stop_on_error=True, + progress_callback=progress_callback, + ) + + console.print("\n[bold]Installing packages...[/bold]") + result = coordinator.execute() + + if result.success: + self._print_success("\nAll packages installed successfully!") + console.print(f"Completed in {result.total_duration:.2f} seconds") + return 0 + else: + if result.failed_step is not None: + self._print_error(f"\n{self.INSTALL_FAIL_MSG} at step {result.failed_step + 1}") + else: + self._print_error(f"\n{self.INSTALL_FAIL_MSG}") + if result.error_message: + console.print(f"Error: {result.error_message}", style="red") + return 1 + + def doctor(self) -> int: + """Run system health checks.""" + from cortex.doctor import SystemDoctor + + doc = SystemDoctor() + return doc.run_checks() + + def troubleshoot(self, no_execute: bool = False) -> int: + """Run interactive troubleshooter.""" + from cortex.troubleshoot import Troubleshooter + + troubleshooter = Troubleshooter(no_execute=no_execute) + return troubleshooter.start() + + # -------------------------- + + +def _is_ascii(s: str) -> bool: + """Check if a string contains only ASCII characters.""" + try: + s.encode("ascii") + return True + except UnicodeEncodeError: + return False + + +def _normalize_for_lookup(s: str) -> str: + """ + Normalize a string for lookup, handling Latin and non-Latin scripts differently. + + For ASCII/Latin text: casefold for case-insensitive matching (handles accented chars) + For non-Latin text (e.g., 中文): keep unchanged to preserve meaning + + Uses casefold() instead of lower() because: + - casefold() handles accented Latin characters better (e.g., "Español", "Français") + - casefold() is more aggressive and handles edge cases like German ß -> ss + + This prevents issues like: + - "中文".lower() producing the same string but creating duplicate keys + - Meaningless normalization of non-Latin scripts + """ + if _is_ascii(s): + return s.casefold() + # For non-ASCII Latin scripts (accented chars like é, ñ, ü), use casefold + # Only keep unchanged for truly non-Latin scripts (CJK, Arabic, etc.) + try: + # Check if string contains any Latin characters (a-z, A-Z, or accented) + # If it does, it's likely a Latin-based language name + import unicodedata + + has_latin = any(unicodedata.category(c).startswith("L") and ord(c) < 0x3000 for c in s) + if has_latin: + return s.casefold() + except Exception: + pass + return s + + +def _resolve_language_name(name: str) -> str | None: + """ + Resolve a language name or code to a supported language code. + + Accepts: + - Language codes: en, es, fr, de, zh + - English names: English, Spanish, French, German, Chinese + - Native names: Español, Français, Deutsch, 中文 + + Args: + name: Language name or code (case-insensitive for Latin scripts) + + Returns: + Language code if found, None otherwise + + Note: + Non-Latin scripts (e.g., Chinese 中文) are matched exactly without + case normalization, since .lower() is meaningless for these scripts + and could create key collisions. + """ + name = name.strip() + name_normalized = _normalize_for_lookup(name) + + # Direct code match (codes are always ASCII/lowercase) + if name_normalized in SUPPORTED_LANGUAGES: + return name_normalized + + # Build lookup tables for names + # Using a list of tuples to handle potential key collisions properly + name_to_code: dict[str, str] = {} + + for code, info in SUPPORTED_LANGUAGES.items(): + english_name = info["name"] + native_name = info["native"] + + # English names are always ASCII, use casefold for case-insensitive matching + name_to_code[english_name.casefold()] = code + + # Native names: normalize using _normalize_for_lookup + # - Latin scripts (Español, Français): casefold for case-insensitive matching + # - Non-Latin scripts (中文): store as-is only + native_normalized = _normalize_for_lookup(native_name) + name_to_code[native_normalized] = code + + # Also store original native name for exact match + # (handles case where user types exactly "Español" with correct accent) + if native_name != native_normalized: + name_to_code[native_name] = code + + # Try to find a match using normalized input + if name_normalized in name_to_code: + return name_to_code[name_normalized] + + # Try exact match for non-ASCII input + if name in name_to_code: + return name_to_code[name] + + return None + - if result.success: - self._print_success(f"{ecosystem_name} packages installed successfully!") - console.print(f"Completed in {result.total_duration:.2f} seconds") - return 0 - else: - self._print_error("Installation failed") - if result.error_message: - console.print(f"Error: {result.error_message}", style="red") - return 1 +def _handle_set_language(language_input: str) -> int: + """ + Handle the --set-language global flag. - def _execute_multi_install(self, commands: list[dict[str, str]]) -> int: - """Execute multiple install commands.""" - all_commands = [cmd["command"] for cmd in commands] - all_descriptions = [cmd["description"] for cmd in commands] + Args: + language_input: Language name or code from user - def progress_callback(current: int, total: int, step: InstallationStep) -> None: - status_emoji = "⏳" - if step.status == StepStatus.SUCCESS: - status_emoji = "✅" - elif step.status == StepStatus.FAILED: - status_emoji = "❌" - console.print(f"\n[{current}/{total}] {status_emoji} {step.description}") - console.print(f" Command: {step.command}") + Returns: + Exit code (0 for success, 1 for error) + """ + # Resolve the language name to a code + lang_code = _resolve_language_name(language_input) - coordinator = InstallationCoordinator( - commands=all_commands, - descriptions=all_descriptions, - timeout=600, - stop_on_error=True, - progress_callback=progress_callback, - ) + if not lang_code: + # Show error with available options + cx_print(t("language.invalid_code", code=language_input), "error") + console.print() + console.print(f"[bold]{t('language.supported_languages_header')}[/bold]") + for code, info in SUPPORTED_LANGUAGES.items(): + console.print(f" • {info['name']} ({info['native']}) - code: [green]{code}[/green]") + return 1 - console.print("\n[bold]Installing packages...[/bold]") - result = coordinator.execute() + # Set the language + try: + lang_config = LanguageConfig() + lang_config.set_language(lang_code) - if result.success: - self._print_success("\nAll packages installed successfully!") - console.print(f"Completed in {result.total_duration:.2f} seconds") + # Reset and update global translator + from cortex.i18n.translator import reset_translator + + reset_translator() + set_language(lang_code) + + lang_info = SUPPORTED_LANGUAGES[lang_code] + cx_print(t("language.changed", language=lang_info["native"]), "success") + return 0 + except Exception as e: + cx_print(t("language.set_failed", error=str(e)), "error") + return 1 + + def dashboard(self) -> int: + """Launch the real-time system monitoring dashboard""" + try: + from cortex.dashboard import DashboardApp + + app = DashboardApp() + rc = app.run() + return rc if isinstance(rc, int) else 0 + except ImportError as e: + self._print_error(f"Dashboard dependencies not available: {e}") + cx_print("Install required packages with:", "info") + cx_print(" pip install psutil>=5.9.0 nvidia-ml-py>=12.0.0", "info") + return 1 + except KeyboardInterrupt: return 0 - else: - if result.failed_step is not None: - self._print_error(f"\nInstallation failed at step {result.failed_step + 1}") - else: - self._print_error("\nInstallation failed") - if result.error_message: - console.print(f"Error: {result.error_message}", style="red") + except Exception as e: + self._print_error(f"Dashboard error: {e}") return 1 - # -------------------------- - def show_rich_help(): - """Display beautifully formatted help using Rich""" - from rich.table import Table + """Display a beautifully formatted help table using the Rich library. + + This function outputs the primary command menu, providing descriptions + for all core Cortex utilities including installation, environment + management, and container tools. + """ show_banner(show_version=True) console.print() @@ -2451,27 +4625,35 @@ def show_rich_help(): console.print("[dim]Just tell Cortex what you want to install.[/dim]") console.print() - # Commands table + # Initialize a table to display commands with specific column styling table = Table(show_header=True, header_style="bold cyan", box=None) table.add_column("Command", style="green") table.add_column("Description") + # Command Rows table.add_row("ask ", "Ask about your system") - table.add_row("ask --do ", "Solve problems (can write/execute)") - table.add_row("do history", "View do-mode run history") + table.add_row("voice", "Voice input mode (F9 to speak)") table.add_row("demo", "See Cortex in action") table.add_row("wizard", "Configure API key") table.add_row("status", "System status") table.add_row("install ", "Install software") + table.add_row("remove ", "Remove packages with impact analysis") + table.add_row("install --mic", "Install via voice input") table.add_row("import ", "Import deps from package files") table.add_row("history", "View history") table.add_row("rollback ", "Undo installation") + table.add_row("role", "AI-driven system role detection") + table.add_row("stack ", "Install the stack") + table.add_row("dashboard", "Real-time system monitoring dashboard") table.add_row("notify", "Manage desktop notifications") table.add_row("env", "Manage environment variables") table.add_row("cache stats", "Show LLM cache statistics") - table.add_row("stack ", "Install the stack") + table.add_row("docker permissions", "Fix Docker bind-mount permissions") table.add_row("sandbox ", "Test packages in Docker sandbox") + table.add_row("update", "Check for and install updates") + table.add_row("daemon ", "Manage the cortexd background daemon") table.add_row("doctor", "System health check") + table.add_row("troubleshoot", "Interactive system troubleshooter") console.print(table) console.print() @@ -2524,6 +4706,21 @@ def main(): # Network config is optional - don't block execution if it fails console.print(f"[yellow]⚠️ Network auto-config failed: {e}[/yellow]") + # Check for updates on startup (cached, non-blocking) + # Only show notification for commands that aren't 'update' itself + try: + if temp_args.command not in ["update", None] and "--json" not in sys.argv: + update_release = should_notify_update() + if update_release: + console.print( + f"[cyan]🔔 Cortex update available:[/cyan] " + f"[green]{update_release.version}[/green]" + ) + console.print(" [dim]Run 'cortex update' to upgrade[/dim]") + console.print() + except Exception: + pass # Don't block CLI on update check failures + parser = argparse.ArgumentParser( prog="cortex", description="AI-powered Linux command interpreter", @@ -2533,31 +4730,136 @@ def main(): # Global flags parser.add_argument("--version", "-V", action="version", version=f"cortex {VERSION}") parser.add_argument("--verbose", "-v", action="store_true", help="Show detailed output") + parser.add_argument( + "--set-language", + "--language", + dest="set_language", + metavar="LANG", + help="Set display language (e.g., English, Spanish, Español, es, zh)", + ) subparsers = parser.add_subparsers(dest="command", help="Available commands") + # Define the docker command and its associated sub-actions + docker_parser = subparsers.add_parser("docker", help="Docker and container utilities") + docker_subs = docker_parser.add_subparsers(dest="docker_action", help="Docker actions") + + # Add the permissions action to allow fixing file ownership issues + perm_parser = docker_subs.add_parser( + "permissions", help="Fix file permissions from bind mounts" + ) + + # Provide an option to skip the manual confirmation prompt + perm_parser.add_argument("--yes", "-y", action="store_true", help=HELP_SKIP_CONFIRM) + + perm_parser.add_argument( + "--execute", "-e", action="store_true", help="Apply ownership changes (default: dry-run)" + ) + # Demo command - demo_parser = subparsers.add_parser("demo", help="See Cortex in action") + subparsers.add_parser("demo", help="See Cortex in action") + + # Dashboard command + dashboard_parser = subparsers.add_parser( + "dashboard", help="Real-time system monitoring dashboard" + ) # Wizard command - wizard_parser = subparsers.add_parser("wizard", help="Configure API key interactively") + subparsers.add_parser("wizard", help="Configure API key interactively") # Status command (includes comprehensive health checks) subparsers.add_parser("status", help="Show comprehensive system status and health checks") + # Benchmark command + benchmark_parser = subparsers.add_parser("benchmark", help="Run AI performance benchmark") + benchmark_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") + + # Systemd helper command + systemd_parser = subparsers.add_parser("systemd", help="Systemd service helper (plain English)") + systemd_parser.add_argument("service", help="Service name") + systemd_parser.add_argument( + "action", + nargs="?", + default="status", + choices=["status", "diagnose", "deps"], + help="Action: status (default), diagnose, deps", + ) + systemd_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") + + # GPU manager command + gpu_parser = subparsers.add_parser("gpu", help="Hybrid GPU (Optimus) manager") + gpu_parser.add_argument( + "action", + nargs="?", + default="status", + choices=["status", "modes", "switch", "apps"], + help="Action: status (default), modes, switch, apps", + ) + gpu_parser.add_argument( + "mode", nargs="?", help="Mode for switch action (integrated/hybrid/nvidia)" + ) + gpu_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") + + # Printer/Scanner setup command + printer_parser = subparsers.add_parser("printer", help="Printer/Scanner auto-setup") + printer_parser.add_argument( + "action", + nargs="?", + default="status", + choices=["status", "detect"], + help="Action: status (default), detect", + ) + printer_parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output") + # Ask command ask_parser = subparsers.add_parser("ask", help="Ask a question about your system") - ask_parser.add_argument("question", type=str, nargs="?", default=None, help="Natural language question (optional with --do)") - ask_parser.add_argument("--debug", action="store_true", help="Show debug output for agentic loop") + ask_parser.add_argument("question", nargs="?", type=str, help="Natural language question") + ask_parser.add_argument( + "--mic", + action="store_true", + help="Use voice input (press F9 to record)", + ) ask_parser.add_argument( - "--do", - action="store_true", - help="Enable do mode - Cortex can write, read, and execute commands to solve problems. If no question is provided, starts interactive session." + "--do", + action="store_true", + help="Enable execution mode - AI can execute commands with your approval", + ) + + # Voice command - continuous voice mode + voice_parser = subparsers.add_parser( + "voice", help="Voice input mode (F9 to speak, Ctrl+C to exit)" + ) + voice_parser.add_argument( + "--single", + "-s", + action="store_true", + help="Record single input and exit (default: continuous mode)", + ) + voice_parser.add_argument( + "--model", + "-m", + type=str, + default=None, + metavar="MODEL", + choices=[ + "tiny.en", + "base.en", + "small.en", + "medium.en", + "tiny", + "base", + "small", + "medium", + "large", + ], + help="Whisper model to use (default: base.en or CORTEX_WHISPER_MODEL env var). " + "Available models: tiny.en (39MB), base.en (140MB), small.en (466MB), " + "medium.en (1.5GB), tiny/base/small/medium (multilingual), large (6GB).", ) # Install command install_parser = subparsers.add_parser("install", help="Install software") - install_parser.add_argument("software", type=str, help="Software to install") + install_parser.add_argument("software", nargs="?", type=str, help="Software to install") install_parser.add_argument("--execute", action="store_true", help="Execute commands") install_parser.add_argument("--dry-run", action="store_true", help="Show commands only") install_parser.add_argument( @@ -2565,6 +4867,57 @@ def main(): action="store_true", help="Enable parallel execution for multi-step installs", ) + install_parser.add_argument( + "--json", + action="store_true", + help="Output as JSON", + ) + install_parser.add_argument( + "--mic", + action="store_true", + help="Use voice input for software name (press F9 to record)", + ) + + # Remove command - uninstall with impact analysis + remove_parser = subparsers.add_parser( + "remove", + help="Remove packages with impact analysis", + description="Analyze and remove packages safely with dependency impact analysis.", + ) + remove_parser.add_argument("package", type=str, help="Package to remove") + remove_parser.add_argument( + "--dry-run", + action="store_true", + default=True, + help="Show impact analysis without removing (default)", + ) + remove_parser.add_argument( + "--execute", + action="store_true", + help="Actually remove the package after analysis", + ) + remove_parser.add_argument( + "--purge", + action="store_true", + help="Also remove configuration files", + ) + remove_parser.add_argument( + "--force", + "-f", + action="store_true", + help="Force removal even if impact is high", + ) + remove_parser.add_argument( + "-y", + "--yes", + action="store_true", + help=HELP_SKIP_CONFIRM, + ) + remove_parser.add_argument( + "--json", + action="store_true", + help="Output impact analysis as JSON", + ) # Import command - import dependencies from package manager files import_parser = subparsers.add_parser( @@ -2625,6 +4978,29 @@ def main(): send_parser.add_argument("--actions", nargs="*", help="Action buttons") # -------------------------- + # Role Management Commands + # This parser defines the primary interface for system personality and contextual sensing. + role_parser = subparsers.add_parser( + "role", help="AI-driven system personality and context management" + ) + role_subs = role_parser.add_subparsers(dest="role_action", help="Role actions") + + # Subcommand: role detect + # Dynamically triggers the sensing layer to analyze system context and suggest roles. + role_subs.add_parser( + "detect", help="Dynamically sense system context and shell patterns to suggest an AI role" + ) + + # Subcommand: role set + # Allows manual override for role persistence and provides tailored recommendations. + role_set_parser = role_subs.add_parser( + "set", help="Manually override the system role and receive tailored recommendations" + ) + role_set_parser.add_argument( + "role_slug", + help="The role identifier (e.g., 'data-scientist', 'web-server', 'ml-workstation')", + ) + # Stack command stack_parser = subparsers.add_parser("stack", help="Manage pre-built package stacks") stack_parser.add_argument( @@ -2641,6 +5017,83 @@ def main(): cache_subs = cache_parser.add_subparsers(dest="cache_action", help="Cache actions") cache_subs.add_parser("stats", help="Show cache statistics") + # --- Config commands (including language settings) --- + config_parser = subparsers.add_parser("config", help="Configure Cortex settings") + config_subs = config_parser.add_subparsers(dest="config_action", help="Configuration actions") + + # config language - set language + config_lang_parser = config_subs.add_parser("language", help="Set display language") + config_lang_parser.add_argument( + "code", + nargs="?", + help="Language code (en, es, fr, de, zh) or 'auto' for auto-detection", + ) + config_lang_parser.add_argument( + "--list", "-l", action="store_true", help="List available languages" + ) + config_lang_parser.add_argument( + "--info", "-i", action="store_true", help="Show current language configuration" + ) + + # config show - show all configuration + config_subs.add_parser("show", help="Show all current configuration") + + # --- Daemon Commands --- + daemon_parser = subparsers.add_parser("daemon", help="Manage the cortexd background daemon") + daemon_subs = daemon_parser.add_subparsers(dest="daemon_action", help="Daemon actions") + + # daemon install [--execute] + daemon_install_parser = daemon_subs.add_parser( + "install", help="Install and enable the daemon service" + ) + daemon_install_parser.add_argument( + "--execute", action="store_true", help="Actually run the installation" + ) + + # daemon uninstall [--execute] + daemon_uninstall_parser = daemon_subs.add_parser( + "uninstall", help="Stop and remove the daemon service" + ) + daemon_uninstall_parser.add_argument( + "--execute", action="store_true", help="Actually run the uninstallation" + ) + + # daemon config - uses config.get IPC handler + daemon_subs.add_parser("config", help="Show current daemon configuration") + + # daemon reload-config - uses config.reload IPC handler + daemon_subs.add_parser("reload-config", help="Reload daemon configuration from disk") + + # daemon version - uses version IPC handler + daemon_subs.add_parser("version", help="Show daemon version") + + # daemon ping - uses ping IPC handler + daemon_subs.add_parser("ping", help="Test daemon connectivity") + + # daemon shutdown - uses shutdown IPC handler + daemon_subs.add_parser("shutdown", help="Request daemon shutdown") + + # daemon run-tests - run daemon test suite + daemon_run_tests_parser = daemon_subs.add_parser( + "run-tests", + help="Run daemon test suite (runs all tests by default when no filters are provided)", + ) + daemon_run_tests_parser.add_argument("--unit", action="store_true", help="Run only unit tests") + daemon_run_tests_parser.add_argument( + "--integration", action="store_true", help="Run only integration tests" + ) + daemon_run_tests_parser.add_argument( + "--test", + "-t", + type=str, + metavar="NAME", + help="Run a specific test (e.g., test_config, test_daemon)", + ) + daemon_run_tests_parser.add_argument( + "--verbose", "-v", action="store_true", help="Show verbose test output" + ) + # -------------------------- + # --- Sandbox Commands (Docker-based package testing) --- sandbox_parser = subparsers.add_parser( "sandbox", help="Test packages in isolated Docker sandbox" @@ -2673,9 +5126,7 @@ def main(): sandbox_promote_parser.add_argument( "--dry-run", action="store_true", help="Show command without executing" ) - sandbox_promote_parser.add_argument( - "-y", "--yes", action="store_true", help="Skip confirmation prompt" - ) + sandbox_promote_parser.add_argument("-y", "--yes", action="store_true", help=HELP_SKIP_CONFIRM) # sandbox cleanup [--force] sandbox_cleanup_parser = sandbox_subs.add_parser("cleanup", help="Remove a sandbox environment") @@ -2688,7 +5139,7 @@ def main(): # sandbox exec sandbox_exec_parser = sandbox_subs.add_parser("exec", help="Execute command in sandbox") sandbox_exec_parser.add_argument("name", help="Sandbox name") - sandbox_exec_parser.add_argument("command", nargs="+", help="Command to execute") + sandbox_exec_parser.add_argument("cmd", nargs="+", help="Command to execute") # -------------------------- # --- Environment Variable Management Commands --- @@ -2781,103 +5232,410 @@ def main(): env_template_apply_parser.add_argument( "--encrypt-keys", help="Comma-separated list of keys to encrypt" ) - # --- Info Command (system information queries) --- - info_parser = subparsers.add_parser("info", help="Get system and application information") - info_parser.add_argument("query", nargs="*", help="Information query (natural language)") - info_parser.add_argument( - "--app", "-a", - type=str, - help="Get info about a specific application (nginx, docker, etc.)" + + # --- Shell Environment Analyzer Commands --- + # env audit - show all shell variables with sources + env_audit_parser = env_subs.add_parser( + "audit", help="Audit shell environment variables and show their sources" ) - info_parser.add_argument( - "--quick", "-q", - type=str, - choices=["cpu", "memory", "disk", "gpu", "os", "kernel", "network", "dns", - "services", "security", "processes", "environment"], - help="Quick lookup for common info types" + env_audit_parser.add_argument( + "--shell", + choices=["bash", "zsh", "fish"], + help="Shell to analyze (default: auto-detect)", ) - info_parser.add_argument( - "--category", "-c", - type=str, - choices=["hardware", "software", "network", "services", "security", - "storage", "performance", "configuration"], - help="Get structured info for a category" + env_audit_parser.add_argument( + "--no-system", + action="store_true", + help="Exclude system-wide config files", ) - info_parser.add_argument( - "--list", "-l", + env_audit_parser.add_argument( + "--json", action="store_true", - help="List available info types and applications" + help="Output as JSON", ) - info_parser.add_argument( - "--debug", + + # env check - detect conflicts and issues + env_check_parser = env_subs.add_parser( + "check", help="Check for environment variable conflicts and issues" + ) + env_check_parser.add_argument( + "--shell", + choices=["bash", "zsh", "fish"], + help="Shell to check (default: auto-detect)", + ) + + # env path subcommands + env_path_parser = env_subs.add_parser("path", help="Manage PATH entries") + env_path_subs = env_path_parser.add_subparsers(dest="path_action", help="PATH actions") + + # env path list + env_path_list_parser = env_path_subs.add_parser("list", help="List PATH entries with status") + env_path_list_parser.add_argument( + "--json", action="store_true", - help="Show debug output" + help="Output as JSON", ) - # --- Do Command (manage do-mode runs) --- - do_parser = subparsers.add_parser("do", help="Manage do-mode execution runs") - do_subs = do_parser.add_subparsers(dest="do_action", help="Do actions") + # env path add [--prepend|--append] [--persist] + env_path_add_parser = env_path_subs.add_parser("add", help="Add a path entry (idempotent)") + env_path_add_parser.add_argument("path", help="Path to add") + env_path_add_parser.add_argument( + "--append", + action="store_true", + help="Append to end of PATH (default: prepend)", + ) + env_path_add_parser.add_argument( + "--persist", + action="store_true", + help="Add to shell config file for persistence", + ) + env_path_add_parser.add_argument( + "--shell", + choices=["bash", "zsh", "fish"], + help="Shell config to modify (default: auto-detect)", + ) - # do history [--limit N] - do_history_parser = do_subs.add_parser("history", help="View do-mode run history") - do_history_parser.add_argument("--limit", "-n", type=int, default=20, help="Number of runs to show") - do_history_parser.add_argument("run_id", nargs="?", help="Show details for specific run ID") + # env path remove [--persist] + env_path_remove_parser = env_path_subs.add_parser("remove", help="Remove a path entry") + env_path_remove_parser.add_argument("path", help="Path to remove") + env_path_remove_parser.add_argument( + "--persist", + action="store_true", + help="Remove from shell config file", + ) + env_path_remove_parser.add_argument( + "--shell", + choices=["bash", "zsh", "fish"], + help="Shell config to modify (default: auto-detect)", + ) - # do setup - setup cortex user - do_subs.add_parser("setup", help="Setup cortex user for privilege management") + # env path dedupe [--dry-run] [--persist] + env_path_dedupe_parser = env_path_subs.add_parser( + "dedupe", help="Remove duplicate PATH entries" + ) + env_path_dedupe_parser.add_argument( + "--dry-run", + action="store_true", + help="Show what would be removed without making changes", + ) + env_path_dedupe_parser.add_argument( + "--persist", + action="store_true", + help="Generate shell config to persist deduplication", + ) + env_path_dedupe_parser.add_argument( + "--shell", + choices=["bash", "zsh", "fish"], + help="Shell for generated config (default: auto-detect)", + ) - # do protected - manage protected paths - do_protected_parser = do_subs.add_parser("protected", help="Manage protected paths") - do_protected_parser.add_argument("--add", help="Add a path to protected list") - do_protected_parser.add_argument("--remove", help="Remove a path from protected list") - do_protected_parser.add_argument("--list", action="store_true", help="List all protected paths") + # env path clean [--remove-missing] [--dry-run] + env_path_clean_parser = env_path_subs.add_parser( + "clean", help="Clean PATH (remove duplicates and optionally missing paths)" + ) + env_path_clean_parser.add_argument( + "--remove-missing", + action="store_true", + help="Also remove paths that don't exist", + ) + env_path_clean_parser.add_argument( + "--dry-run", + action="store_true", + help="Show what would be cleaned without making changes", + ) + env_path_clean_parser.add_argument( + "--shell", + choices=["bash", "zsh", "fish"], + help="Shell for generated fix script (default: auto-detect)", + ) # -------------------------- - # --- Watch Command (terminal monitoring setup) --- - watch_parser = subparsers.add_parser("watch", help="Manage terminal watching for manual intervention mode") - watch_parser.add_argument("--install", action="store_true", help="Install terminal watch hook to .bashrc/.zshrc") - watch_parser.add_argument("--uninstall", action="store_true", help="Remove terminal watch hook from shell configs") - watch_parser.add_argument("--system", action="store_true", help="Install/uninstall system-wide (requires sudo)") - watch_parser.add_argument("--service", action="store_true", help="Install/uninstall as systemd service (recommended)") - watch_parser.add_argument("--status", action="store_true", help="Show terminal watch status") - watch_parser.add_argument("--test", action="store_true", help="Test terminal monitoring") + # Doctor command + doctor_parser = subparsers.add_parser("doctor", help="System health check") + + # Troubleshoot command + troubleshoot_parser = subparsers.add_parser( + "troubleshoot", help="Interactive system troubleshooter" + ) + troubleshoot_parser.add_argument( + "--no-execute", + action="store_true", + help="Disable automatic command execution (read-only mode)", + ) + # License and upgrade commands + subparsers.add_parser("upgrade", help="Upgrade to Cortex Pro") + subparsers.add_parser("license", help="Show license status") + + activate_parser = subparsers.add_parser("activate", help="Activate a license key") + activate_parser.add_argument("license_key", help="Your license key") + + # --- Update Command --- + update_parser = subparsers.add_parser("update", help="Check for and install Cortex updates") + update_parser.add_argument( + "--channel", + "-c", + choices=["stable", "beta", "dev"], + default="stable", + help="Update channel (default: stable)", + ) + update_subs = update_parser.add_subparsers(dest="update_action", help="Update actions") + + # update check + update_check_parser = update_subs.add_parser("check", help="Check for available updates") + + # update install [version] [--dry-run] + update_install_parser = update_subs.add_parser("install", help="Install available update") + update_install_parser.add_argument( + "version", nargs="?", help="Specific version to install (default: latest)" + ) + update_install_parser.add_argument( + "--dry-run", action="store_true", help="Show what would be updated without installing" + ) + + # update rollback [backup_id] + update_rollback_parser = update_subs.add_parser("rollback", help="Rollback to previous version") + update_rollback_parser.add_argument( + "backup_id", nargs="?", help="Backup ID or version to restore (default: most recent)" + ) + + # update list + update_subs.add_parser("list", help="List available versions") + + # update backups + update_subs.add_parser("backups", help="List available backups for rollback") # -------------------------- + # WiFi/Bluetooth Driver Matcher + wifi_parser = subparsers.add_parser("wifi", help="WiFi/Bluetooth driver auto-matcher") + wifi_parser.add_argument( + "action", + nargs="?", + default="status", + choices=["status", "detect", "recommend", "install", "connectivity"], + help="Action to perform (default: status)", + ) + wifi_parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + + # Stdin Piping Support + stdin_parser = subparsers.add_parser("stdin", help="Process piped stdin data") + stdin_parser.add_argument( + "action", + nargs="?", + default="info", + choices=["info", "analyze", "passthrough", "stats"], + help="Action to perform (default: info)", + ) + stdin_parser.add_argument( + "--max-lines", + type=int, + default=1000, + help="Maximum lines to process (default: 1000)", + ) + stdin_parser.add_argument( + "--truncation", + choices=["head", "tail", "middle", "sample"], + default="middle", + help="Truncation mode for large input (default: middle)", + ) + stdin_parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + + # Semantic Version Resolver + deps_parser = subparsers.add_parser("deps", help="Dependency version resolver") + deps_parser.add_argument( + "action", + nargs="?", + default="analyze", + choices=["analyze", "parse", "check", "compare"], + help="Action to perform (default: analyze)", + ) + deps_parser.add_argument( + "packages", + nargs="*", + help="Package constraints (format: pkg:constraint:source)", + ) + deps_parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + + # System Health Score + health_parser = subparsers.add_parser("health", help="System health score and recommendations") + health_parser.add_argument( + "action", + nargs="?", + default="check", + choices=["check", "history", "factors", "quick"], + help="Action to perform (default: check)", + ) + health_parser.add_argument( + "-v", + "--verbose", + action="store_true", + help="Enable verbose output", + ) + args = parser.parse_args() + # Configure logging based on parsed arguments + if getattr(args, "json", False): + logging.getLogger("cortex").setLevel(logging.ERROR) + # Also suppress common SDK loggers + logging.getLogger("anthropic").setLevel(logging.ERROR) + logging.getLogger("openai").setLevel(logging.ERROR) + logging.getLogger("httpcore").setLevel(logging.ERROR) + + # Handle --set-language global flag first (before any command) + if getattr(args, "set_language", None): + result = _handle_set_language(args.set_language) + # Only return early if no command is specified + # This allows: cortex --set-language es install nginx + if not args.command: + return result + # If language setting failed, still return the error + if result != 0: + return result + # Otherwise continue with the command execution + + # The Guard: Check for empty commands before starting the CLI if not args.command: show_rich_help() return 0 + # Initialize the CLI handler cli = CortexCLI(verbose=args.verbose) try: + # Route the command to the appropriate method inside the cli object + if args.command == "docker": + if args.docker_action == "permissions": + return cli.docker_permissions(args) + parser.print_help() + return 1 + if args.command == "demo": return cli.demo() + elif args.command == "dashboard": + return cli.dashboard() elif args.command == "wizard": return cli.wizard() elif args.command == "status": return cli.status() - elif args.command == "ask": - return cli.ask( - getattr(args, "question", None), - debug=args.debug, - do_mode=getattr(args, "do", False) + elif args.command == "benchmark": + return cli.benchmark(verbose=getattr(args, "verbose", False)) + elif args.command == "systemd": + return cli.systemd( + args.service, + action=getattr(args, "action", "status"), + verbose=getattr(args, "verbose", False), + ) + elif args.command == "gpu": + return cli.gpu( + action=getattr(args, "action", "status"), + mode=getattr(args, "mode", None), + verbose=getattr(args, "verbose", False), ) + elif args.command == "printer": + return cli.printer( + action=getattr(args, "action", "status"), verbose=getattr(args, "verbose", False) + ) + elif args.command == "voice": + model = getattr(args, "model", None) + return cli.voice(continuous=not getattr(args, "single", False), model=model) + elif args.command == "ask": + do_mode = getattr(args, "do", False) + # Handle --mic flag for voice input + if getattr(args, "mic", False): + try: + from cortex.voice import VoiceInputError, VoiceInputHandler + + handler = VoiceInputHandler() + cx_print("Press F9 to speak your question...", "info") + transcript = handler.record_single() + + if not transcript: + cli._print_error("No speech detected") + return 1 + + cx_print(f"Question: {transcript}", "info") + return cli.ask(transcript, do_mode=do_mode) + except ImportError: + cli._print_error("Voice dependencies not installed.") + cx_print("Install with: pip install cortex-linux[voice]", "info") + return 1 + except VoiceInputError as e: + cli._print_error(f"Voice input error: {e}") + return 1 + # In do_mode, question is optional (interactive mode) + if not args.question and not do_mode: + cli._print_error("Please provide a question or use --mic for voice input") + return 1 + return cli.ask(args.question, do_mode=do_mode) elif args.command == "install": + # Handle --mic flag for voice input + if getattr(args, "mic", False): + handler = None + try: + from cortex.voice import VoiceInputError, VoiceInputHandler + + handler = VoiceInputHandler() + cx_print("Press F9 to speak what you want to install...", "info") + software = handler.record_single() + if not software: + cx_print("No speech detected.", "warning") + return 1 + cx_print(f"Installing: {software}", "info") + except ImportError: + cli._print_error("Voice dependencies not installed.") + cx_print("Install with: pip install cortex-linux[voice]", "info") + return 1 + except VoiceInputError as e: + cli._print_error(f"Voice input error: {e}") + return 1 + finally: + # Always clean up resources + if handler is not None: + try: + handler.stop() + except Exception as e: + # Log cleanup errors but don't raise + logging.debug("Error during voice handler cleanup: %s", e) + else: + software = args.software + if not software: + cli._print_error("Please provide software name or use --mic for voice input") + return 1 return cli.install( - args.software, + software, execute=args.execute, dry_run=args.dry_run, parallel=args.parallel, + json_output=args.json, ) + elif args.command == "remove": + # Handle --execute flag to override default dry-run + if args.execute: + args.dry_run = False + return cli.remove(args) elif args.command == "import": return cli.import_deps(args) elif args.command == "history": return cli.history(limit=args.limit, status=args.status, show_id=args.show_id) elif args.command == "rollback": return cli.rollback(args.id, dry_run=args.dry_run) - # Handle the new notify command + elif args.command == "role": + return cli.role(args) elif args.command == "notify": return cli.notify(args) elif args.command == "stack": @@ -2891,12 +5649,63 @@ def main(): return 1 elif args.command == "env": return cli.env(args) - elif args.command == "do": - return cli.do_cmd(args) - elif args.command == "info": - return cli.info_cmd(args) - elif args.command == "watch": - return cli.watch_cmd(args) + elif args.command == "doctor": + return cli.doctor() + elif args.command == "troubleshoot": + return cli.troubleshoot( + no_execute=getattr(args, "no_execute", False), + ) + elif args.command == "config": + return cli.config(args) + elif args.command == "upgrade": + from cortex.licensing import open_upgrade_page + + open_upgrade_page() + return 0 + elif args.command == "license": + from cortex.licensing import show_license_status + + show_license_status() + return 0 + elif args.command == "activate": + from cortex.licensing import activate_license + + return 0 if activate_license(args.license_key) else 1 + elif args.command == "update": + return cli.update(args) + elif args.command == "daemon": + return cli.daemon(args) + elif args.command == "wifi": + from cortex.wifi_driver import run_wifi_driver + + return run_wifi_driver( + action=getattr(args, "action", "status"), + verbose=getattr(args, "verbose", False), + ) + elif args.command == "stdin": + from cortex.stdin_handler import run_stdin_handler + + return run_stdin_handler( + action=getattr(args, "action", "info"), + max_lines=getattr(args, "max_lines", 1000), + truncation=getattr(args, "truncation", "middle"), + verbose=getattr(args, "verbose", False), + ) + elif args.command == "deps": + from cortex.semver_resolver import run_semver_resolver + + return run_semver_resolver( + action=getattr(args, "action", "analyze"), + packages=getattr(args, "packages", None), + verbose=getattr(args, "verbose", False), + ) + elif args.command == "health": + from cortex.health_score import run_health_check + + return run_health_check( + action=getattr(args, "action", "check"), + verbose=getattr(args, "verbose", False), + ) else: parser.print_help() return 1 @@ -2906,13 +5715,6 @@ def main(): except (ValueError, ImportError, OSError) as e: print(f"❌ Error: {e}", file=sys.stderr) return 1 - except AttributeError as e: - # Internal errors - show friendly message - print("❌ Something went wrong. Please try again.", file=sys.stderr) - if "--verbose" in sys.argv or "-v" in sys.argv: - import traceback - traceback.print_exc() - return 1 except Exception as e: print(f"❌ Unexpected error: {e}", file=sys.stderr) # Print traceback if verbose mode was requested diff --git a/cortex/demo.py b/cortex/demo.py index 6fd487eeb..808e96465 100644 --- a/cortex/demo.py +++ b/cortex/demo.py @@ -4,9 +4,9 @@ import sys from rich.console import Console -from rich.panel import Panel -from rich.prompt import Prompt, Confirm from rich.markdown import Markdown +from rich.panel import Panel +from rich.prompt import Confirm, Prompt from cortex.branding import show_banner @@ -41,106 +41,95 @@ def run_demo() -> int: """Run the interactive Cortex demo.""" console.clear() show_banner() - + # ───────────────────────────────────────────────────────────────── # INTRODUCTION # ───────────────────────────────────────────────────────────────── - + intro = """ **Cortex** - The AI-native package manager for Linux. In this demo you'll try: • **Ask** - Query your system in natural language -• **Install** - Install packages with AI interpretation +• **Install** - Install packages with AI interpretation • **Rollback** - Undo installations safely """ console.print(Panel(Markdown(intro), title="[cyan]Demo[/cyan]", border_style="cyan")) _wait_for_enter() - + # ───────────────────────────────────────────────────────────────── # ASK COMMAND # ───────────────────────────────────────────────────────────────── - - _section( - "🔍 Ask Command", - "Query your system without memorizing Linux commands." - ) - + + _section("🔍 Ask Command", "Query your system without memorizing Linux commands.") + console.print("[dim]Examples: 'What Python version?', 'How much disk space?'[/dim]\n") - + user_question = Prompt.ask( - "[cyan]What would you like to ask?[/cyan]", - default="What version of Python is installed?" + "[cyan]What would you like to ask?[/cyan]", default="What version of Python is installed?" ) - - console.print(f"\n[yellow]$[/yellow] cortex ask \"{user_question}\"\n") + + console.print(f'\n[yellow]$[/yellow] cortex ask "{user_question}"\n') _run_cortex_command(["ask", user_question]) - + _wait_for_enter() - + # ───────────────────────────────────────────────────────────────── # INSTALL COMMAND # ───────────────────────────────────────────────────────────────── - - _section( - "📦 Install Command", - "Describe what you want - Cortex finds the right packages." - ) - + + _section("📦 Install Command", "Describe what you want - Cortex finds the right packages.") + console.print("[dim]Examples: 'a web server', 'python dev tools', 'docker'[/dim]\n") - + user_install = Prompt.ask( - "[cyan]What would you like to install?[/cyan]", - default="a lightweight text editor" + "[cyan]What would you like to install?[/cyan]", default="a lightweight text editor" ) - - console.print(f"\n[yellow]$[/yellow] cortex install \"{user_install}\" --dry-run\n") + + console.print(f'\n[yellow]$[/yellow] cortex install "{user_install}" --dry-run\n') _run_cortex_command(["install", user_install, "--dry-run"]) - + console.print() if Confirm.ask("Actually install this?", default=False): - console.print(f"\n[yellow]$[/yellow] cortex install \"{user_install}\" --execute\n") + console.print(f'\n[yellow]$[/yellow] cortex install "{user_install}" --execute\n') _run_cortex_command(["install", user_install, "--execute"]) - + _wait_for_enter() - + # ───────────────────────────────────────────────────────────────── # ROLLBACK COMMAND # ───────────────────────────────────────────────────────────────── - - _section( - "⏪ Rollback Command", - "Undo any installation by reverting to the previous state." - ) - + + _section("⏪ Rollback Command", "Undo any installation by reverting to the previous state.") + console.print("[dim]First, let's see your installation history with IDs:[/dim]\n") console.print("[yellow]$[/yellow] cortex history --limit 5\n") _run_cortex_command(["history", "--limit", "5"]) - + _wait_for_enter() - + if Confirm.ask("Preview a rollback?", default=False): console.print("\n[cyan]Copy an installation ID from the history above:[/cyan]") console.print("[dim]$ cortex rollback [/dim]", end="") rollback_id = input().strip() - + if rollback_id: console.print(f"\n[yellow]$[/yellow] cortex rollback {rollback_id} --dry-run\n") _run_cortex_command(["rollback", rollback_id, "--dry-run"]) - + if Confirm.ask("Actually rollback?", default=False): console.print(f"\n[yellow]$[/yellow] cortex rollback {rollback_id}\n") _run_cortex_command(["rollback", rollback_id]) - + # ───────────────────────────────────────────────────────────────── # SUMMARY # ───────────────────────────────────────────────────────────────── - + console.print(f"\n[bold cyan]{'─' * 50}[/bold cyan]") console.print("[bold green]✓ Demo Complete![/bold green]\n") console.print("[dim]Commands: ask, install, history, rollback, stack, status[/dim]") console.print("[dim]Run 'cortex --help' for more.[/dim]\n") - + return 0 diff --git a/cortex/do_runner.py b/cortex/do_runner.py index e5e2cea3f..ea56ecc41 100644 --- a/cortex/do_runner.py +++ b/cortex/do_runner.py @@ -8,33 +8,25 @@ """ # Re-export everything from the modular package -from cortex.do_runner import ( - # Models +from cortex.do_runner import ( # Diagnosis; Models; Verification; Managers; Handler; Database; Executor; Terminal + AutoFixer, CommandLog, CommandStatus, + ConflictDetector, + CortexUserManager, + DoHandler, DoRun, + DoRunDatabase, + ErrorDiagnoser, + FileUsefulnessAnalyzer, + ProtectedPathsManager, RunMode, TaskNode, TaskTree, + TaskTreeExecutor, TaskType, - # Database - DoRunDatabase, - # Managers - CortexUserManager, - ProtectedPathsManager, - # Terminal TerminalMonitor, - # Executor - TaskTreeExecutor, - # Diagnosis - AutoFixer, - ErrorDiagnoser, - # Verification - ConflictDetector, - FileUsefulnessAnalyzer, VerificationRunner, - # Handler - DoHandler, get_do_handler, setup_cortex_user, ) diff --git a/cortex/do_runner/__init__.py b/cortex/do_runner/__init__.py index 906fd1883..135159b2a 100644 --- a/cortex/do_runner/__init__.py +++ b/cortex/do_runner/__init__.py @@ -15,36 +15,16 @@ - handler: Main DoHandler class """ -from .models import ( - CommandLog, - CommandStatus, - DoRun, - RunMode, - TaskNode, - TaskTree, - TaskType, -) - from .database import DoRunDatabase - -from .managers import ( - CortexUserManager, - ProtectedPathsManager, -) - -from .terminal import TerminalMonitor - -from .executor import TaskTreeExecutor - from .diagnosis import ( + ALL_ERROR_PATTERNS, + LOGIN_REQUIREMENTS, + UBUNTU_PACKAGE_MAP, + UBUNTU_SERVICE_MAP, AutoFixer, ErrorDiagnoser, LoginHandler, LoginRequirement, - LOGIN_REQUIREMENTS, - UBUNTU_PACKAGE_MAP, - UBUNTU_SERVICE_MAP, - ALL_ERROR_PATTERNS, get_error_category, get_severity, is_critical_error, @@ -52,29 +32,42 @@ # New structured diagnosis engine from .diagnosis_v2 import ( + ERROR_PATTERNS, DiagnosisEngine, - ErrorCategory, DiagnosisResult, + ErrorCategory, + ErrorStackEntry, + ExecutionResult, FixCommand, FixPlan, VariableResolution, - ExecutionResult, - ErrorStackEntry, - ERROR_PATTERNS, get_diagnosis_engine, ) - -from .verification import ( - ConflictDetector, - FileUsefulnessAnalyzer, - VerificationRunner, -) - +from .executor import TaskTreeExecutor from .handler import ( DoHandler, get_do_handler, setup_cortex_user, ) +from .managers import ( + CortexUserManager, + ProtectedPathsManager, +) +from .models import ( + CommandLog, + CommandStatus, + DoRun, + RunMode, + TaskNode, + TaskTree, + TaskType, +) +from .terminal import TerminalMonitor +from .verification import ( + ConflictDetector, + FileUsefulnessAnalyzer, + VerificationRunner, +) __all__ = [ # Models @@ -126,4 +119,3 @@ "get_do_handler", "setup_cortex_user", ] - diff --git a/cortex/do_runner/database.py b/cortex/do_runner/database.py index b153fe1da..d5defe5a3 100644 --- a/cortex/do_runner/database.py +++ b/cortex/do_runner/database.py @@ -17,12 +17,12 @@ class DoRunDatabase: """SQLite database for storing do run history.""" - + def __init__(self, db_path: Path | None = None): self.db_path = db_path or Path.home() / ".cortex" / "do_runs.db" self._ensure_directory() self._init_db() - + def _ensure_directory(self): """Ensure the database directory exists with proper permissions.""" try: @@ -33,8 +33,10 @@ def _ensure_directory(self): alt_path = Path("/tmp") / ".cortex" / "do_runs.db" alt_path.parent.mkdir(parents=True, exist_ok=True) self.db_path = alt_path - console.print(f"[yellow]Warning: Using alternate database path: {self.db_path}[/yellow]") - + console.print( + f"[yellow]Warning: Using alternate database path: {self.db_path}[/yellow]" + ) + def _init_db(self): """Initialize the database schema.""" try: @@ -59,7 +61,7 @@ def _init_db(self): skipped_commands INTEGER DEFAULT 0 ) """) - + # Create sessions table conn.execute(""" CREATE TABLE IF NOT EXISTS do_sessions ( @@ -70,7 +72,7 @@ def _init_db(self): total_queries TEXT ) """) - + conn.execute(""" CREATE TABLE IF NOT EXISTS do_run_commands ( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -87,27 +89,27 @@ def _init_db(self): FOREIGN KEY (run_id) REFERENCES do_runs(run_id) ) """) - + conn.execute(""" CREATE INDEX IF NOT EXISTS idx_do_runs_started ON do_runs(started_at DESC) """) - + conn.execute(""" CREATE INDEX IF NOT EXISTS idx_do_run_commands_run_id ON do_run_commands(run_id) """) - + self._migrate_schema(conn) conn.commit() except sqlite3.OperationalError as e: raise OSError(f"Failed to initialize database at {self.db_path}: {e}") - + def _migrate_schema(self, conn: sqlite3.Connection): """Add new columns to existing tables if they don't exist.""" cursor = conn.execute("PRAGMA table_info(do_runs)") existing_columns = {row[1] for row in cursor.fetchall()} - + new_columns = [ ("total_commands", "INTEGER DEFAULT 0"), ("successful_commands", "INTEGER DEFAULT 0"), @@ -116,19 +118,19 @@ def _migrate_schema(self, conn: sqlite3.Connection): ("commands_list", "TEXT"), ("session_id", "TEXT"), ] - + for col_name, col_type in new_columns: if col_name not in existing_columns: try: conn.execute(f"ALTER TABLE do_runs ADD COLUMN {col_name} {col_type}") except sqlite3.OperationalError: pass - + cursor = conn.execute(""" - SELECT run_id, full_data FROM do_runs + SELECT run_id, full_data FROM do_runs WHERE total_commands IS NULL OR total_commands = 0 OR commands_list IS NULL """) - + for row in cursor.fetchall(): run_id = row[0] try: @@ -138,54 +140,60 @@ def _migrate_schema(self, conn: sqlite3.Connection): success = sum(1 for c in commands if c.get("status") == "success") failed = sum(1 for c in commands if c.get("status") == "failed") skipped = sum(1 for c in commands if c.get("status") == "skipped") - + commands_list = json.dumps([c.get("command", "") for c in commands]) - - conn.execute(""" - UPDATE do_runs SET - total_commands = ?, - successful_commands = ?, - failed_commands = ?, + + conn.execute( + """ + UPDATE do_runs SET + total_commands = ?, + successful_commands = ?, + failed_commands = ?, skipped_commands = ?, commands_list = ? WHERE run_id = ? - """, (total, success, failed, skipped, commands_list, run_id)) - + """, + (total, success, failed, skipped, commands_list, run_id), + ) + for idx, cmd in enumerate(commands): exists = conn.execute( "SELECT 1 FROM do_run_commands WHERE run_id = ? AND command_index = ?", - (run_id, idx) + (run_id, idx), ).fetchone() - + if not exists: output = cmd.get("output", "")[:250] if cmd.get("output") else "" error = cmd.get("error", "")[:250] if cmd.get("error") else "" - conn.execute(""" - INSERT INTO do_run_commands - (run_id, command_index, command, purpose, status, + conn.execute( + """ + INSERT INTO do_run_commands + (run_id, command_index, command, purpose, status, output_truncated, error_truncated, duration_seconds, timestamp, useful) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - run_id, - idx, - cmd.get("command", ""), - cmd.get("purpose", ""), - cmd.get("status", "pending"), - output, - error, - cmd.get("duration_seconds", 0), - cmd.get("timestamp", ""), - 1 if cmd.get("useful", True) else 0, - )) + """, + ( + run_id, + idx, + cmd.get("command", ""), + cmd.get("purpose", ""), + cmd.get("status", "pending"), + output, + error, + cmd.get("duration_seconds", 0), + cmd.get("timestamp", ""), + 1 if cmd.get("useful", True) else 0, + ), + ) except (json.JSONDecodeError, KeyError): pass - + def _generate_run_id(self) -> str: """Generate a unique run ID.""" timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S%f") random_part = hashlib.sha256(os.urandom(16)).hexdigest()[:8] return f"do_{timestamp}_{random_part}" - + def _truncate_output(self, text: str, max_length: int = 250) -> str: """Truncate output to specified length.""" if not text: @@ -193,81 +201,85 @@ def _truncate_output(self, text: str, max_length: int = 250) -> str: if len(text) <= max_length: return text return text[:max_length] + "... [truncated]" - + def save_run(self, run: DoRun) -> str: """Save a do run to the database with detailed command information.""" if not run.run_id: run.run_id = self._generate_run_id() - + commands_log = run.get_commands_log_string() - + total_commands = len(run.commands) successful_commands = sum(1 for c in run.commands if c.status == CommandStatus.SUCCESS) failed_commands = sum(1 for c in run.commands if c.status == CommandStatus.FAILED) skipped_commands = sum(1 for c in run.commands if c.status == CommandStatus.SKIPPED) - + commands_list = json.dumps([cmd.command for cmd in run.commands]) - + with sqlite3.connect(str(self.db_path)) as conn: - conn.execute(""" - INSERT OR REPLACE INTO do_runs - (run_id, session_id, summary, commands_log, commands_list, mode, user_query, started_at, + conn.execute( + """ + INSERT OR REPLACE INTO do_runs + (run_id, session_id, summary, commands_log, commands_list, mode, user_query, started_at, completed_at, files_accessed, privileges_granted, full_data, total_commands, successful_commands, failed_commands, skipped_commands) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - run.run_id, - run.session_id or None, - run.summary, - commands_log, - commands_list, - run.mode.value, - run.user_query, - run.started_at, - run.completed_at, - json.dumps(run.files_accessed), - json.dumps(run.privileges_granted), - json.dumps(run.to_dict()), - total_commands, - successful_commands, - failed_commands, - skipped_commands, - )) - + """, + ( + run.run_id, + run.session_id or None, + run.summary, + commands_log, + commands_list, + run.mode.value, + run.user_query, + run.started_at, + run.completed_at, + json.dumps(run.files_accessed), + json.dumps(run.privileges_granted), + json.dumps(run.to_dict()), + total_commands, + successful_commands, + failed_commands, + skipped_commands, + ), + ) + conn.execute("DELETE FROM do_run_commands WHERE run_id = ?", (run.run_id,)) - + for idx, cmd in enumerate(run.commands): - conn.execute(""" - INSERT INTO do_run_commands - (run_id, command_index, command, purpose, status, + conn.execute( + """ + INSERT INTO do_run_commands + (run_id, command_index, command, purpose, status, output_truncated, error_truncated, duration_seconds, timestamp, useful) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) - """, ( - run.run_id, - idx, - cmd.command, - cmd.purpose, - cmd.status.value, - self._truncate_output(cmd.output, 250), - self._truncate_output(cmd.error, 250), - cmd.duration_seconds, - cmd.timestamp, - 1 if cmd.useful else 0, - )) - + """, + ( + run.run_id, + idx, + cmd.command, + cmd.purpose, + cmd.status.value, + self._truncate_output(cmd.output, 250), + self._truncate_output(cmd.error, 250), + cmd.duration_seconds, + cmd.timestamp, + 1 if cmd.useful else 0, + ), + ) + conn.commit() - + return run.run_id - + def get_run(self, run_id: str) -> DoRun | None: """Get a specific run by ID.""" with sqlite3.connect(str(self.db_path)) as conn: conn.row_factory = sqlite3.Row - cursor = conn.execute( - "SELECT * FROM do_runs WHERE run_id = ?", (run_id,) - ) + cursor = conn.execute("SELECT * FROM do_runs WHERE run_id = ?", (run_id,)) row = cursor.fetchone() - + if row: full_data = json.loads(row["full_data"]) run = DoRun( @@ -284,45 +296,53 @@ def get_run(self, run_id: str) -> DoRun | None: ) return run return None - + def get_run_commands(self, run_id: str) -> list[dict[str, Any]]: """Get detailed command information for a run.""" with sqlite3.connect(str(self.db_path)) as conn: conn.row_factory = sqlite3.Row - cursor = conn.execute(""" - SELECT command_index, command, purpose, status, + cursor = conn.execute( + """ + SELECT command_index, command, purpose, status, output_truncated, error_truncated, duration_seconds, timestamp, useful - FROM do_run_commands - WHERE run_id = ? + FROM do_run_commands + WHERE run_id = ? ORDER BY command_index - """, (run_id,)) - + """, + (run_id,), + ) + commands = [] for row in cursor: - commands.append({ - "index": row["command_index"], - "command": row["command"], - "purpose": row["purpose"], - "status": row["status"], - "output": row["output_truncated"], - "error": row["error_truncated"], - "duration": row["duration_seconds"], - "timestamp": row["timestamp"], - "useful": bool(row["useful"]), - }) + commands.append( + { + "index": row["command_index"], + "command": row["command"], + "purpose": row["purpose"], + "status": row["status"], + "output": row["output_truncated"], + "error": row["error_truncated"], + "duration": row["duration_seconds"], + "timestamp": row["timestamp"], + "useful": bool(row["useful"]), + } + ) return commands - + def get_run_stats(self, run_id: str) -> dict[str, Any] | None: """Get command statistics for a run.""" with sqlite3.connect(str(self.db_path)) as conn: conn.row_factory = sqlite3.Row - cursor = conn.execute(""" - SELECT run_id, summary, total_commands, successful_commands, + cursor = conn.execute( + """ + SELECT run_id, summary, total_commands, successful_commands, failed_commands, skipped_commands, started_at, completed_at FROM do_runs WHERE run_id = ? - """, (run_id,)) + """, + (run_id,), + ) row = cursor.fetchone() - + if row: return { "run_id": row["run_id"], @@ -335,35 +355,33 @@ def get_run_stats(self, run_id: str) -> dict[str, Any] | None: "completed_at": row["completed_at"], } return None - + def get_commands_list(self, run_id: str) -> list[str]: """Get just the list of commands for a run.""" with sqlite3.connect(str(self.db_path)) as conn: conn.row_factory = sqlite3.Row - cursor = conn.execute( - "SELECT commands_list FROM do_runs WHERE run_id = ?", (run_id,) - ) + cursor = conn.execute("SELECT commands_list FROM do_runs WHERE run_id = ?", (run_id,)) row = cursor.fetchone() - + if row and row["commands_list"]: try: return json.loads(row["commands_list"]) except (json.JSONDecodeError, TypeError): pass - + cursor = conn.execute( "SELECT command FROM do_run_commands WHERE run_id = ? ORDER BY command_index", - (run_id,) + (run_id,), ) return [row["command"] for row in cursor.fetchall()] - + def get_recent_runs(self, limit: int = 20) -> list[DoRun]: """Get recent do runs.""" with sqlite3.connect(str(self.db_path)) as conn: conn.row_factory = sqlite3.Row cursor = conn.execute( "SELECT full_data, session_id FROM do_runs ORDER BY started_at DESC LIMIT ?", - (limit,) + (limit,), ) runs = [] for row in cursor: @@ -382,35 +400,36 @@ def get_recent_runs(self, limit: int = 20) -> list[DoRun]: run.session_id = row["session_id"] runs.append(run) return runs - + def create_session(self) -> str: """Create a new session and return the session ID.""" session_id = f"session_{datetime.datetime.now().strftime('%Y%m%d%H%M%S%f')}_{hashlib.md5(str(datetime.datetime.now().timestamp()).encode()).hexdigest()[:8]}" - + with sqlite3.connect(str(self.db_path)) as conn: conn.execute( """INSERT INTO do_sessions (session_id, started_at, total_runs, total_queries) VALUES (?, ?, 0, '[]')""", - (session_id, datetime.datetime.now().isoformat()) + (session_id, datetime.datetime.now().isoformat()), ) conn.commit() - + return session_id - - def update_session(self, session_id: str, query: str | None = None, increment_runs: bool = False): + + def update_session( + self, session_id: str, query: str | None = None, increment_runs: bool = False + ): """Update a session with new query or run count.""" with sqlite3.connect(str(self.db_path)) as conn: if increment_runs: conn.execute( "UPDATE do_sessions SET total_runs = total_runs + 1 WHERE session_id = ?", - (session_id,) + (session_id,), ) - + if query: # Get current queries cursor = conn.execute( - "SELECT total_queries FROM do_sessions WHERE session_id = ?", - (session_id,) + "SELECT total_queries FROM do_sessions WHERE session_id = ?", (session_id,) ) row = cursor.fetchone() if row: @@ -418,27 +437,27 @@ def update_session(self, session_id: str, query: str | None = None, increment_ru queries.append(query) conn.execute( "UPDATE do_sessions SET total_queries = ? WHERE session_id = ?", - (json.dumps(queries), session_id) + (json.dumps(queries), session_id), ) - + conn.commit() - + def end_session(self, session_id: str): """Mark a session as ended.""" with sqlite3.connect(str(self.db_path)) as conn: conn.execute( "UPDATE do_sessions SET ended_at = ? WHERE session_id = ?", - (datetime.datetime.now().isoformat(), session_id) + (datetime.datetime.now().isoformat(), session_id), ) conn.commit() - + def get_session_runs(self, session_id: str) -> list[DoRun]: """Get all runs in a session.""" with sqlite3.connect(str(self.db_path)) as conn: conn.row_factory = sqlite3.Row cursor = conn.execute( "SELECT full_data FROM do_runs WHERE session_id = ? ORDER BY started_at ASC", - (session_id,) + (session_id,), ) runs = [] for row in cursor: @@ -455,7 +474,7 @@ def get_session_runs(self, session_id: str) -> list[DoRun]: run.session_id = session_id runs.append(run) return runs - + def get_recent_sessions(self, limit: int = 10) -> list[dict]: """Get recent sessions with their run counts.""" with sqlite3.connect(str(self.db_path)) as conn: @@ -463,16 +482,17 @@ def get_recent_sessions(self, limit: int = 10) -> list[dict]: cursor = conn.execute( """SELECT session_id, started_at, ended_at, total_runs, total_queries FROM do_sessions ORDER BY started_at DESC LIMIT ?""", - (limit,) + (limit,), ) sessions = [] for row in cursor: - sessions.append({ - "session_id": row["session_id"], - "started_at": row["started_at"], - "ended_at": row["ended_at"], - "total_runs": row["total_runs"], - "queries": json.loads(row["total_queries"]) if row["total_queries"] else [], - }) + sessions.append( + { + "session_id": row["session_id"], + "started_at": row["started_at"], + "ended_at": row["ended_at"], + "total_runs": row["total_runs"], + "queries": json.loads(row["total_queries"]) if row["total_queries"] else [], + } + ) return sessions - diff --git a/cortex/do_runner/diagnosis.py b/cortex/do_runner/diagnosis.py index 0a824611b..4df91adc6 100644 --- a/cortex/do_runner/diagnosis.py +++ b/cortex/do_runner/diagnosis.py @@ -21,10 +21,11 @@ import os import re -import subprocess import shutil -from typing import Any, Callable +import subprocess +from collections.abc import Callable from dataclasses import dataclass, field +from typing import Any from rich.console import Console @@ -35,9 +36,11 @@ # Error Pattern Definitions by Category # ============================================================================ + @dataclass class ErrorPattern: """Defines an error pattern and its fix strategy.""" + pattern: str error_type: str category: str @@ -50,533 +53,1657 @@ class ErrorPattern: # Category 1: Command & Shell Errors COMMAND_SHELL_ERRORS = [ # Timeout errors (check first for our specific message) - ErrorPattern(r"[Cc]ommand timed out after \d+ seconds", "command_timeout", "timeout", - "Command timed out - operation took too long", True, "retry_with_longer_timeout"), - ErrorPattern(r"[Tt]imed out", "timeout", "timeout", - "Operation timed out", True, "retry_with_longer_timeout"), - ErrorPattern(r"[Tt]imeout", "timeout", "timeout", - "Operation timed out", True, "retry_with_longer_timeout"), + ErrorPattern( + r"[Cc]ommand timed out after \d+ seconds", + "command_timeout", + "timeout", + "Command timed out - operation took too long", + True, + "retry_with_longer_timeout", + ), + ErrorPattern( + r"[Tt]imed out", + "timeout", + "timeout", + "Operation timed out", + True, + "retry_with_longer_timeout", + ), + ErrorPattern( + r"[Tt]imeout", + "timeout", + "timeout", + "Operation timed out", + True, + "retry_with_longer_timeout", + ), # Standard command errors - ErrorPattern(r"command not found", "command_not_found", "command_shell", - "Command not installed", True, "install_package"), - ErrorPattern(r"No such file or directory", "not_found", "command_shell", - "File or directory not found", True, "create_path"), - ErrorPattern(r"Permission denied", "permission_denied", "command_shell", - "Permission denied", True, "use_sudo"), - ErrorPattern(r"Operation not permitted", "operation_not_permitted", "command_shell", - "Operation not permitted (may need root)", True, "use_sudo"), - ErrorPattern(r"Not a directory", "not_a_directory", "command_shell", - "Expected directory but found file", False, "check_path"), - ErrorPattern(r"Is a directory", "is_a_directory", "command_shell", - "Expected file but found directory", False, "check_path"), - ErrorPattern(r"Invalid argument", "invalid_argument", "command_shell", - "Invalid argument passed", False, "check_args"), - ErrorPattern(r"Too many arguments", "too_many_args", "command_shell", - "Too many arguments provided", False, "check_args"), - ErrorPattern(r"[Mm]issing operand", "missing_operand", "command_shell", - "Required argument missing", False, "check_args"), - ErrorPattern(r"[Aa]mbiguous redirect", "ambiguous_redirect", "command_shell", - "Shell redirect is ambiguous", False, "fix_redirect"), - ErrorPattern(r"[Bb]ad substitution", "bad_substitution", "command_shell", - "Shell variable substitution error", False, "fix_syntax"), - ErrorPattern(r"[Uu]nbound variable", "unbound_variable", "command_shell", - "Variable not set", True, "set_variable"), - ErrorPattern(r"[Ss]yntax error near unexpected token", "syntax_error_token", "command_shell", - "Shell syntax error", False, "fix_syntax"), - ErrorPattern(r"[Uu]nexpected EOF", "unexpected_eof", "command_shell", - "Unclosed quote or bracket", False, "fix_syntax"), - ErrorPattern(r"[Cc]annot execute binary file", "cannot_execute_binary", "command_shell", - "Binary incompatible with system", False, "check_architecture"), - ErrorPattern(r"[Ee]xec format error", "exec_format_error", "command_shell", - "Invalid executable format", False, "check_architecture"), - ErrorPattern(r"[Ii]llegal option", "illegal_option", "command_shell", - "Unrecognized command option", False, "check_help"), - ErrorPattern(r"[Ii]nvalid option", "invalid_option", "command_shell", - "Invalid command option", False, "check_help"), - ErrorPattern(r"[Rr]ead-only file ?system", "readonly_fs", "command_shell", - "Filesystem is read-only", True, "remount_rw"), - ErrorPattern(r"[Ii]nput/output error", "io_error", "command_shell", - "I/O error (disk issue)", False, "check_disk", "critical"), - ErrorPattern(r"[Tt]ext file busy", "text_file_busy", "command_shell", - "File is being executed", True, "wait_retry"), - ErrorPattern(r"[Aa]rgument list too long", "arg_list_too_long", "command_shell", - "Too many arguments for command", True, "use_xargs"), - ErrorPattern(r"[Bb]roken pipe", "broken_pipe", "command_shell", - "Pipe closed unexpectedly", False, "check_pipe"), + ErrorPattern( + r"command not found", + "command_not_found", + "command_shell", + "Command not installed", + True, + "install_package", + ), + ErrorPattern( + r"No such file or directory", + "not_found", + "command_shell", + "File or directory not found", + True, + "create_path", + ), + ErrorPattern( + r"Permission denied", + "permission_denied", + "command_shell", + "Permission denied", + True, + "use_sudo", + ), + ErrorPattern( + r"Operation not permitted", + "operation_not_permitted", + "command_shell", + "Operation not permitted (may need root)", + True, + "use_sudo", + ), + ErrorPattern( + r"Not a directory", + "not_a_directory", + "command_shell", + "Expected directory but found file", + False, + "check_path", + ), + ErrorPattern( + r"Is a directory", + "is_a_directory", + "command_shell", + "Expected file but found directory", + False, + "check_path", + ), + ErrorPattern( + r"Invalid argument", + "invalid_argument", + "command_shell", + "Invalid argument passed", + False, + "check_args", + ), + ErrorPattern( + r"Too many arguments", + "too_many_args", + "command_shell", + "Too many arguments provided", + False, + "check_args", + ), + ErrorPattern( + r"[Mm]issing operand", + "missing_operand", + "command_shell", + "Required argument missing", + False, + "check_args", + ), + ErrorPattern( + r"[Aa]mbiguous redirect", + "ambiguous_redirect", + "command_shell", + "Shell redirect is ambiguous", + False, + "fix_redirect", + ), + ErrorPattern( + r"[Bb]ad substitution", + "bad_substitution", + "command_shell", + "Shell variable substitution error", + False, + "fix_syntax", + ), + ErrorPattern( + r"[Uu]nbound variable", + "unbound_variable", + "command_shell", + "Variable not set", + True, + "set_variable", + ), + ErrorPattern( + r"[Ss]yntax error near unexpected token", + "syntax_error_token", + "command_shell", + "Shell syntax error", + False, + "fix_syntax", + ), + ErrorPattern( + r"[Uu]nexpected EOF", + "unexpected_eof", + "command_shell", + "Unclosed quote or bracket", + False, + "fix_syntax", + ), + ErrorPattern( + r"[Cc]annot execute binary file", + "cannot_execute_binary", + "command_shell", + "Binary incompatible with system", + False, + "check_architecture", + ), + ErrorPattern( + r"[Ee]xec format error", + "exec_format_error", + "command_shell", + "Invalid executable format", + False, + "check_architecture", + ), + ErrorPattern( + r"[Ii]llegal option", + "illegal_option", + "command_shell", + "Unrecognized command option", + False, + "check_help", + ), + ErrorPattern( + r"[Ii]nvalid option", + "invalid_option", + "command_shell", + "Invalid command option", + False, + "check_help", + ), + ErrorPattern( + r"[Rr]ead-only file ?system", + "readonly_fs", + "command_shell", + "Filesystem is read-only", + True, + "remount_rw", + ), + ErrorPattern( + r"[Ii]nput/output error", + "io_error", + "command_shell", + "I/O error (disk issue)", + False, + "check_disk", + "critical", + ), + ErrorPattern( + r"[Tt]ext file busy", + "text_file_busy", + "command_shell", + "File is being executed", + True, + "wait_retry", + ), + ErrorPattern( + r"[Aa]rgument list too long", + "arg_list_too_long", + "command_shell", + "Too many arguments for command", + True, + "use_xargs", + ), + ErrorPattern( + r"[Bb]roken pipe", + "broken_pipe", + "command_shell", + "Pipe closed unexpectedly", + False, + "check_pipe", + ), ] # Category 2: File & Directory Errors FILE_DIRECTORY_ERRORS = [ - ErrorPattern(r"[Ff]ile exists", "file_exists", "file_directory", - "File already exists", True, "backup_overwrite"), - ErrorPattern(r"[Ff]ile name too long", "filename_too_long", "file_directory", - "Filename exceeds limit", False, "shorten_name"), - ErrorPattern(r"[Tt]oo many.*symbolic links", "symlink_loop", "file_directory", - "Symbolic link loop detected", True, "fix_symlink"), - ErrorPattern(r"[Ss]tale file handle", "stale_handle", "file_directory", - "NFS file handle stale", True, "remount_nfs"), - ErrorPattern(r"[Dd]irectory not empty", "dir_not_empty", "file_directory", - "Directory has contents", True, "rm_recursive"), - ErrorPattern(r"[Cc]ross-device link", "cross_device_link", "file_directory", - "Cannot link across filesystems", True, "copy_instead"), - ErrorPattern(r"[Tt]oo many open files", "too_many_files", "file_directory", - "File descriptor limit reached", True, "increase_ulimit"), - ErrorPattern(r"[Qq]uota exceeded", "quota_exceeded", "file_directory", - "Disk quota exceeded", False, "check_quota"), - ErrorPattern(r"[Oo]peration timed out", "operation_timeout", "file_directory", - "Operation timed out", True, "increase_timeout"), + ErrorPattern( + r"[Ff]ile exists", + "file_exists", + "file_directory", + "File already exists", + True, + "backup_overwrite", + ), + ErrorPattern( + r"[Ff]ile name too long", + "filename_too_long", + "file_directory", + "Filename exceeds limit", + False, + "shorten_name", + ), + ErrorPattern( + r"[Tt]oo many.*symbolic links", + "symlink_loop", + "file_directory", + "Symbolic link loop detected", + True, + "fix_symlink", + ), + ErrorPattern( + r"[Ss]tale file handle", + "stale_handle", + "file_directory", + "NFS file handle stale", + True, + "remount_nfs", + ), + ErrorPattern( + r"[Dd]irectory not empty", + "dir_not_empty", + "file_directory", + "Directory has contents", + True, + "rm_recursive", + ), + ErrorPattern( + r"[Cc]ross-device link", + "cross_device_link", + "file_directory", + "Cannot link across filesystems", + True, + "copy_instead", + ), + ErrorPattern( + r"[Tt]oo many open files", + "too_many_files", + "file_directory", + "File descriptor limit reached", + True, + "increase_ulimit", + ), + ErrorPattern( + r"[Qq]uota exceeded", + "quota_exceeded", + "file_directory", + "Disk quota exceeded", + False, + "check_quota", + ), + ErrorPattern( + r"[Oo]peration timed out", + "operation_timeout", + "file_directory", + "Operation timed out", + True, + "increase_timeout", + ), ] # Category 3: Permission & Ownership Errors PERMISSION_ERRORS = [ - ErrorPattern(r"[Aa]ccess denied", "access_denied", "permission", - "Access denied", True, "use_sudo"), - ErrorPattern(r"[Aa]uthentication fail", "auth_failure", "permission", - "Authentication failed", False, "check_credentials"), - ErrorPattern(r"[Ii]nvalid user", "invalid_user", "permission", - "User does not exist", True, "create_user"), - ErrorPattern(r"[Ii]nvalid group", "invalid_group", "permission", - "Group does not exist", True, "create_group"), - ErrorPattern(r"[Nn]ot owner", "not_owner", "permission", - "Not the owner of file", True, "use_sudo"), + ErrorPattern( + r"[Aa]ccess denied", "access_denied", "permission", "Access denied", True, "use_sudo" + ), + ErrorPattern( + r"[Aa]uthentication fail", + "auth_failure", + "permission", + "Authentication failed", + False, + "check_credentials", + ), + ErrorPattern( + r"[Ii]nvalid user", "invalid_user", "permission", "User does not exist", True, "create_user" + ), + ErrorPattern( + r"[Ii]nvalid group", + "invalid_group", + "permission", + "Group does not exist", + True, + "create_group", + ), + ErrorPattern( + r"[Nn]ot owner", "not_owner", "permission", "Not the owner of file", True, "use_sudo" + ), ] # Category 4: Process & Execution Errors PROCESS_ERRORS = [ - ErrorPattern(r"[Nn]o such process", "no_such_process", "process", - "Process does not exist", False, "check_pid"), - ErrorPattern(r"[Pp]rocess already running", "already_running", "process", - "Process already running", True, "kill_existing"), - ErrorPattern(r"[Pp]rocess terminated", "process_terminated", "process", - "Process was terminated", False, "check_logs"), - ErrorPattern(r"[Kk]illed", "killed", "process", - "Process was killed (OOM?)", False, "check_memory", "critical"), - ErrorPattern(r"[Ss]egmentation fault", "segfault", "process", - "Memory access violation", False, "debug_crash", "critical"), - ErrorPattern(r"[Bb]us error", "bus_error", "process", - "Bus error (memory alignment)", False, "debug_crash", "critical"), - ErrorPattern(r"[Ff]loating point exception", "fpe", "process", - "Floating point exception", False, "debug_crash"), - ErrorPattern(r"[Ii]llegal instruction", "illegal_instruction", "process", - "CPU instruction error", False, "check_architecture", "critical"), - ErrorPattern(r"[Tt]race.*trap", "trace_trap", "process", - "Debugger trap", False, "check_debugger"), - ErrorPattern(r"[Rr]esource temporarily unavailable", "resource_unavailable", "process", - "Resource busy", True, "wait_retry"), - ErrorPattern(r"[Tt]oo many processes", "too_many_processes", "process", - "Process limit reached", True, "increase_ulimit"), - ErrorPattern(r"[Oo]peration canceled", "operation_canceled", "process", - "Operation was canceled", False, "check_timeout"), + ErrorPattern( + r"[Nn]o such process", + "no_such_process", + "process", + "Process does not exist", + False, + "check_pid", + ), + ErrorPattern( + r"[Pp]rocess already running", + "already_running", + "process", + "Process already running", + True, + "kill_existing", + ), + ErrorPattern( + r"[Pp]rocess terminated", + "process_terminated", + "process", + "Process was terminated", + False, + "check_logs", + ), + ErrorPattern( + r"[Kk]illed", + "killed", + "process", + "Process was killed (OOM?)", + False, + "check_memory", + "critical", + ), + ErrorPattern( + r"[Ss]egmentation fault", + "segfault", + "process", + "Memory access violation", + False, + "debug_crash", + "critical", + ), + ErrorPattern( + r"[Bb]us error", + "bus_error", + "process", + "Bus error (memory alignment)", + False, + "debug_crash", + "critical", + ), + ErrorPattern( + r"[Ff]loating point exception", + "fpe", + "process", + "Floating point exception", + False, + "debug_crash", + ), + ErrorPattern( + r"[Ii]llegal instruction", + "illegal_instruction", + "process", + "CPU instruction error", + False, + "check_architecture", + "critical", + ), + ErrorPattern( + r"[Tt]race.*trap", "trace_trap", "process", "Debugger trap", False, "check_debugger" + ), + ErrorPattern( + r"[Rr]esource temporarily unavailable", + "resource_unavailable", + "process", + "Resource busy", + True, + "wait_retry", + ), + ErrorPattern( + r"[Tt]oo many processes", + "too_many_processes", + "process", + "Process limit reached", + True, + "increase_ulimit", + ), + ErrorPattern( + r"[Oo]peration canceled", + "operation_canceled", + "process", + "Operation was canceled", + False, + "check_timeout", + ), ] # Category 5: Memory & Resource Errors MEMORY_ERRORS = [ - ErrorPattern(r"[Oo]ut of memory", "oom", "memory", - "Out of memory", True, "free_memory", "critical"), - ErrorPattern(r"[Cc]annot allocate memory", "cannot_allocate", "memory", - "Memory allocation failed", True, "free_memory", "critical"), - ErrorPattern(r"[Mm]emory exhausted", "memory_exhausted", "memory", - "Memory exhausted", True, "free_memory", "critical"), - ErrorPattern(r"[Ss]tack overflow", "stack_overflow", "memory", - "Stack overflow", False, "increase_stack", "critical"), - ErrorPattern(r"[Dd]evice or resource busy", "device_busy", "memory", - "Device or resource busy", True, "wait_retry"), - ErrorPattern(r"[Nn]o space left on device", "no_space", "memory", - "Disk full", True, "free_disk", "critical"), - ErrorPattern(r"[Dd]isk quota exceeded", "disk_quota", "memory", - "Disk quota exceeded", False, "check_quota"), - ErrorPattern(r"[Ff]ile table overflow", "file_table_overflow", "memory", - "System file table full", True, "increase_ulimit", "critical"), + ErrorPattern( + r"[Oo]ut of memory", "oom", "memory", "Out of memory", True, "free_memory", "critical" + ), + ErrorPattern( + r"[Cc]annot allocate memory", + "cannot_allocate", + "memory", + "Memory allocation failed", + True, + "free_memory", + "critical", + ), + ErrorPattern( + r"[Mm]emory exhausted", + "memory_exhausted", + "memory", + "Memory exhausted", + True, + "free_memory", + "critical", + ), + ErrorPattern( + r"[Ss]tack overflow", + "stack_overflow", + "memory", + "Stack overflow", + False, + "increase_stack", + "critical", + ), + ErrorPattern( + r"[Dd]evice or resource busy", + "device_busy", + "memory", + "Device or resource busy", + True, + "wait_retry", + ), + ErrorPattern( + r"[Nn]o space left on device", + "no_space", + "memory", + "Disk full", + True, + "free_disk", + "critical", + ), + ErrorPattern( + r"[Dd]isk quota exceeded", + "disk_quota", + "memory", + "Disk quota exceeded", + False, + "check_quota", + ), + ErrorPattern( + r"[Ff]ile table overflow", + "file_table_overflow", + "memory", + "System file table full", + True, + "increase_ulimit", + "critical", + ), ] # Category 6: Disk & Filesystem Errors FILESYSTEM_ERRORS = [ - ErrorPattern(r"[Ww]rong fs type", "wrong_fs_type", "filesystem", - "Wrong filesystem type", False, "check_fstype"), - ErrorPattern(r"[Ff]ilesystem.*corrupt", "fs_corrupt", "filesystem", - "Filesystem corrupted", False, "fsck", "critical"), - ErrorPattern(r"[Ss]uperblock invalid", "superblock_invalid", "filesystem", - "Superblock invalid", False, "fsck", "critical"), - ErrorPattern(r"[Mm]ount point does not exist", "mount_point_missing", "filesystem", - "Mount point missing", True, "create_mountpoint"), - ErrorPattern(r"[Dd]evice is busy", "device_busy_mount", "filesystem", - "Device busy (in use)", True, "lazy_umount"), - ErrorPattern(r"[Nn]ot mounted", "not_mounted", "filesystem", - "Filesystem not mounted", True, "mount_fs"), - ErrorPattern(r"[Aa]lready mounted", "already_mounted", "filesystem", - "Already mounted", False, "check_mount"), - ErrorPattern(r"[Bb]ad magic number", "bad_magic", "filesystem", - "Bad magic number in superblock", False, "fsck", "critical"), - ErrorPattern(r"[Ss]tructure needs cleaning", "needs_cleaning", "filesystem", - "Filesystem needs fsck", False, "fsck"), - ErrorPattern(r"[Jj]ournal has aborted", "journal_aborted", "filesystem", - "Journal aborted", False, "fsck", "critical"), + ErrorPattern( + r"[Ww]rong fs type", + "wrong_fs_type", + "filesystem", + "Wrong filesystem type", + False, + "check_fstype", + ), + ErrorPattern( + r"[Ff]ilesystem.*corrupt", + "fs_corrupt", + "filesystem", + "Filesystem corrupted", + False, + "fsck", + "critical", + ), + ErrorPattern( + r"[Ss]uperblock invalid", + "superblock_invalid", + "filesystem", + "Superblock invalid", + False, + "fsck", + "critical", + ), + ErrorPattern( + r"[Mm]ount point does not exist", + "mount_point_missing", + "filesystem", + "Mount point missing", + True, + "create_mountpoint", + ), + ErrorPattern( + r"[Dd]evice is busy", + "device_busy_mount", + "filesystem", + "Device busy (in use)", + True, + "lazy_umount", + ), + ErrorPattern( + r"[Nn]ot mounted", "not_mounted", "filesystem", "Filesystem not mounted", True, "mount_fs" + ), + ErrorPattern( + r"[Aa]lready mounted", + "already_mounted", + "filesystem", + "Already mounted", + False, + "check_mount", + ), + ErrorPattern( + r"[Bb]ad magic number", + "bad_magic", + "filesystem", + "Bad magic number in superblock", + False, + "fsck", + "critical", + ), + ErrorPattern( + r"[Ss]tructure needs cleaning", + "needs_cleaning", + "filesystem", + "Filesystem needs fsck", + False, + "fsck", + ), + ErrorPattern( + r"[Jj]ournal has aborted", + "journal_aborted", + "filesystem", + "Journal aborted", + False, + "fsck", + "critical", + ), ] # Category 7: Networking Errors NETWORK_ERRORS = [ - ErrorPattern(r"[Nn]etwork is unreachable", "network_unreachable", "network", - "Network unreachable", True, "check_network"), - ErrorPattern(r"[Nn]o route to host", "no_route", "network", - "No route to host", True, "check_routing"), - ErrorPattern(r"[Cc]onnection refused", "connection_refused", "network", - "Connection refused", True, "check_service"), - ErrorPattern(r"[Cc]onnection timed out", "connection_timeout", "network", - "Connection timed out", True, "check_firewall"), - ErrorPattern(r"[Cc]onnection reset by peer", "connection_reset", "network", - "Connection reset", False, "check_remote"), - ErrorPattern(r"[Hh]ost is down", "host_down", "network", - "Remote host down", False, "check_host"), - ErrorPattern(r"[Tt]emporary failure in name resolution", "dns_temp_fail", "network", - "DNS temporary failure", True, "retry_dns"), - ErrorPattern(r"[Nn]ame or service not known", "dns_unknown", "network", - "DNS lookup failed", True, "check_dns"), - ErrorPattern(r"[Dd]NS lookup failed", "dns_failed", "network", - "DNS lookup failed", True, "check_dns"), - ErrorPattern(r"[Aa]ddress already in use", "address_in_use", "network", - "Port already in use", True, "find_port_user"), - ErrorPattern(r"[Cc]annot assign requested address", "cannot_assign_addr", "network", - "Address not available", False, "check_interface"), - ErrorPattern(r"[Pp]rotocol not supported", "protocol_not_supported", "network", - "Protocol not supported", False, "check_protocol"), - ErrorPattern(r"[Ss]ocket operation on non-socket", "not_socket", "network", - "Invalid socket operation", False, "check_fd"), + ErrorPattern( + r"[Nn]etwork is unreachable", + "network_unreachable", + "network", + "Network unreachable", + True, + "check_network", + ), + ErrorPattern( + r"[Nn]o route to host", "no_route", "network", "No route to host", True, "check_routing" + ), + ErrorPattern( + r"[Cc]onnection refused", + "connection_refused", + "network", + "Connection refused", + True, + "check_service", + ), + ErrorPattern( + r"[Cc]onnection timed out", + "connection_timeout", + "network", + "Connection timed out", + True, + "check_firewall", + ), + ErrorPattern( + r"[Cc]onnection reset by peer", + "connection_reset", + "network", + "Connection reset", + False, + "check_remote", + ), + ErrorPattern( + r"[Hh]ost is down", "host_down", "network", "Remote host down", False, "check_host" + ), + ErrorPattern( + r"[Tt]emporary failure in name resolution", + "dns_temp_fail", + "network", + "DNS temporary failure", + True, + "retry_dns", + ), + ErrorPattern( + r"[Nn]ame or service not known", + "dns_unknown", + "network", + "DNS lookup failed", + True, + "check_dns", + ), + ErrorPattern( + r"[Dd]NS lookup failed", "dns_failed", "network", "DNS lookup failed", True, "check_dns" + ), + ErrorPattern( + r"[Aa]ddress already in use", + "address_in_use", + "network", + "Port already in use", + True, + "find_port_user", + ), + ErrorPattern( + r"[Cc]annot assign requested address", + "cannot_assign_addr", + "network", + "Address not available", + False, + "check_interface", + ), + ErrorPattern( + r"[Pp]rotocol not supported", + "protocol_not_supported", + "network", + "Protocol not supported", + False, + "check_protocol", + ), + ErrorPattern( + r"[Ss]ocket operation on non-socket", + "not_socket", + "network", + "Invalid socket operation", + False, + "check_fd", + ), ] # Category 8: Package Manager Errors (Ubuntu/Debian apt) PACKAGE_ERRORS = [ - ErrorPattern(r"[Uu]nable to locate package", "package_not_found", "package", - "Package not found", True, "update_repos"), - ErrorPattern(r"[Pp]ackage.*not found", "package_not_found", "package", - "Package not found", True, "update_repos"), - ErrorPattern(r"[Ff]ailed to fetch", "fetch_failed", "package", - "Failed to download package", True, "change_mirror"), - ErrorPattern(r"[Hh]ash [Ss]um mismatch", "hash_mismatch", "package", - "Package checksum mismatch", True, "clean_apt"), - ErrorPattern(r"[Rr]epository.*not signed", "repo_not_signed", "package", - "Repository not signed", True, "add_key"), - ErrorPattern(r"[Gg][Pp][Gg] error", "gpg_error", "package", - "GPG signature error", True, "fix_gpg"), - ErrorPattern(r"[Dd]ependency problems", "dependency_problems", "package", - "Dependency issues", True, "fix_dependencies"), - ErrorPattern(r"[Uu]nmet dependencies", "unmet_dependencies", "package", - "Unmet dependencies", True, "fix_dependencies"), - ErrorPattern(r"[Bb]roken packages", "broken_packages", "package", - "Broken packages", True, "fix_broken"), - ErrorPattern(r"[Vv]ery bad inconsistent state", "inconsistent_state", "package", - "Package in bad state", True, "force_reinstall"), - ErrorPattern(r"[Cc]onflicts with", "package_conflict", "package", - "Package conflict", True, "resolve_conflict"), - ErrorPattern(r"dpkg.*lock", "dpkg_lock", "package", - "Package manager locked", True, "clear_lock"), - ErrorPattern(r"apt.*lock", "apt_lock", "package", - "APT locked", True, "clear_lock"), - ErrorPattern(r"E: Could not get lock", "could_not_get_lock", "package", - "Package manager locked", True, "clear_lock"), + ErrorPattern( + r"[Uu]nable to locate package", + "package_not_found", + "package", + "Package not found", + True, + "update_repos", + ), + ErrorPattern( + r"[Pp]ackage.*not found", + "package_not_found", + "package", + "Package not found", + True, + "update_repos", + ), + ErrorPattern( + r"[Ff]ailed to fetch", + "fetch_failed", + "package", + "Failed to download package", + True, + "change_mirror", + ), + ErrorPattern( + r"[Hh]ash [Ss]um mismatch", + "hash_mismatch", + "package", + "Package checksum mismatch", + True, + "clean_apt", + ), + ErrorPattern( + r"[Rr]epository.*not signed", + "repo_not_signed", + "package", + "Repository not signed", + True, + "add_key", + ), + ErrorPattern( + r"[Gg][Pp][Gg] error", "gpg_error", "package", "GPG signature error", True, "fix_gpg" + ), + ErrorPattern( + r"[Dd]ependency problems", + "dependency_problems", + "package", + "Dependency issues", + True, + "fix_dependencies", + ), + ErrorPattern( + r"[Uu]nmet dependencies", + "unmet_dependencies", + "package", + "Unmet dependencies", + True, + "fix_dependencies", + ), + ErrorPattern( + r"[Bb]roken packages", "broken_packages", "package", "Broken packages", True, "fix_broken" + ), + ErrorPattern( + r"[Vv]ery bad inconsistent state", + "inconsistent_state", + "package", + "Package in bad state", + True, + "force_reinstall", + ), + ErrorPattern( + r"[Cc]onflicts with", + "package_conflict", + "package", + "Package conflict", + True, + "resolve_conflict", + ), + ErrorPattern( + r"dpkg.*lock", "dpkg_lock", "package", "Package manager locked", True, "clear_lock" + ), + ErrorPattern(r"apt.*lock", "apt_lock", "package", "APT locked", True, "clear_lock"), + ErrorPattern( + r"E: Could not get lock", + "could_not_get_lock", + "package", + "Package manager locked", + True, + "clear_lock", + ), ] # Category 9: User & Authentication Errors USER_AUTH_ERRORS = [ - ErrorPattern(r"[Uu]ser does not exist", "user_not_exist", "user_auth", - "User does not exist", True, "create_user"), - ErrorPattern(r"[Gg]roup does not exist", "group_not_exist", "user_auth", - "Group does not exist", True, "create_group"), - ErrorPattern(r"[Aa]ccount expired", "account_expired", "user_auth", - "Account expired", False, "renew_account"), - ErrorPattern(r"[Pp]assword expired", "password_expired", "user_auth", - "Password expired", False, "change_password"), - ErrorPattern(r"[Ii]ncorrect password", "wrong_password", "user_auth", - "Wrong password", False, "check_password"), - ErrorPattern(r"[Aa]ccount locked", "account_locked", "user_auth", - "Account locked", False, "unlock_account"), + ErrorPattern( + r"[Uu]ser does not exist", + "user_not_exist", + "user_auth", + "User does not exist", + True, + "create_user", + ), + ErrorPattern( + r"[Gg]roup does not exist", + "group_not_exist", + "user_auth", + "Group does not exist", + True, + "create_group", + ), + ErrorPattern( + r"[Aa]ccount expired", + "account_expired", + "user_auth", + "Account expired", + False, + "renew_account", + ), + ErrorPattern( + r"[Pp]assword expired", + "password_expired", + "user_auth", + "Password expired", + False, + "change_password", + ), + ErrorPattern( + r"[Ii]ncorrect password", + "wrong_password", + "user_auth", + "Wrong password", + False, + "check_password", + ), + ErrorPattern( + r"[Aa]ccount locked", + "account_locked", + "user_auth", + "Account locked", + False, + "unlock_account", + ), ] # Category 16: Docker/Container Errors DOCKER_ERRORS = [ # Container name conflicts - ErrorPattern(r"[Cc]onflict.*container name.*already in use", "container_name_conflict", "docker", - "Container name already in use", True, "remove_or_rename_container"), - ErrorPattern(r"is already in use by container", "container_name_conflict", "docker", - "Container name already in use", True, "remove_or_rename_container"), + ErrorPattern( + r"[Cc]onflict.*container name.*already in use", + "container_name_conflict", + "docker", + "Container name already in use", + True, + "remove_or_rename_container", + ), + ErrorPattern( + r"is already in use by container", + "container_name_conflict", + "docker", + "Container name already in use", + True, + "remove_or_rename_container", + ), # Container not found - ErrorPattern(r"[Nn]o such container", "container_not_found", "docker", - "Container does not exist", True, "check_container_name"), - ErrorPattern(r"[Ee]rror: No such container", "container_not_found", "docker", - "Container does not exist", True, "check_container_name"), + ErrorPattern( + r"[Nn]o such container", + "container_not_found", + "docker", + "Container does not exist", + True, + "check_container_name", + ), + ErrorPattern( + r"[Ee]rror: No such container", + "container_not_found", + "docker", + "Container does not exist", + True, + "check_container_name", + ), # Image not found - ErrorPattern(r"[Uu]nable to find image", "image_not_found", "docker", - "Docker image not found locally", True, "pull_image"), - ErrorPattern(r"[Rr]epository.*not found", "image_not_found", "docker", - "Docker image repository not found", True, "check_image_name"), - ErrorPattern(r"manifest.*not found", "manifest_not_found", "docker", - "Image manifest not found", True, "check_image_tag"), + ErrorPattern( + r"[Uu]nable to find image", + "image_not_found", + "docker", + "Docker image not found locally", + True, + "pull_image", + ), + ErrorPattern( + r"[Rr]epository.*not found", + "image_not_found", + "docker", + "Docker image repository not found", + True, + "check_image_name", + ), + ErrorPattern( + r"manifest.*not found", + "manifest_not_found", + "docker", + "Image manifest not found", + True, + "check_image_tag", + ), # Container already running/stopped - ErrorPattern(r"is already running", "container_already_running", "docker", - "Container is already running", True, "stop_or_use_existing"), - ErrorPattern(r"is not running", "container_not_running", "docker", - "Container is not running", True, "start_container"), + ErrorPattern( + r"is already running", + "container_already_running", + "docker", + "Container is already running", + True, + "stop_or_use_existing", + ), + ErrorPattern( + r"is not running", + "container_not_running", + "docker", + "Container is not running", + True, + "start_container", + ), # Port conflicts - ErrorPattern(r"[Pp]ort.*already allocated", "port_in_use", "docker", - "Port is already in use", True, "free_port_or_use_different"), - ErrorPattern(r"[Bb]ind.*address already in use", "port_in_use", "docker", - "Port is already in use", True, "free_port_or_use_different"), + ErrorPattern( + r"[Pp]ort.*already allocated", + "port_in_use", + "docker", + "Port is already in use", + True, + "free_port_or_use_different", + ), + ErrorPattern( + r"[Bb]ind.*address already in use", + "port_in_use", + "docker", + "Port is already in use", + True, + "free_port_or_use_different", + ), # Volume errors - ErrorPattern(r"[Vv]olume.*not found", "volume_not_found", "docker", - "Docker volume not found", True, "create_volume"), - ErrorPattern(r"[Mm]ount.*denied", "mount_denied", "docker", - "Mount point access denied", True, "check_mount_permissions"), + ErrorPattern( + r"[Vv]olume.*not found", + "volume_not_found", + "docker", + "Docker volume not found", + True, + "create_volume", + ), + ErrorPattern( + r"[Mm]ount.*denied", + "mount_denied", + "docker", + "Mount point access denied", + True, + "check_mount_permissions", + ), # Network errors - ErrorPattern(r"[Nn]etwork.*not found", "network_not_found", "docker", - "Docker network not found", True, "create_network"), + ErrorPattern( + r"[Nn]etwork.*not found", + "network_not_found", + "docker", + "Docker network not found", + True, + "create_network", + ), # Daemon errors - ErrorPattern(r"[Cc]annot connect to the Docker daemon", "docker_daemon_not_running", "docker", - "Docker daemon is not running", True, "start_docker_daemon"), - ErrorPattern(r"[Ii]s the docker daemon running", "docker_daemon_not_running", "docker", - "Docker daemon is not running", True, "start_docker_daemon"), + ErrorPattern( + r"[Cc]annot connect to the Docker daemon", + "docker_daemon_not_running", + "docker", + "Docker daemon is not running", + True, + "start_docker_daemon", + ), + ErrorPattern( + r"[Ii]s the docker daemon running", + "docker_daemon_not_running", + "docker", + "Docker daemon is not running", + True, + "start_docker_daemon", + ), # OOM errors - ErrorPattern(r"[Oo]ut of memory", "container_oom", "docker", - "Container ran out of memory", True, "increase_memory_limit"), + ErrorPattern( + r"[Oo]ut of memory", + "container_oom", + "docker", + "Container ran out of memory", + True, + "increase_memory_limit", + ), # Exec errors - ErrorPattern(r"[Oo]CI runtime.*not found", "runtime_not_found", "docker", - "Container runtime not found", False, "check_docker_installation"), + ErrorPattern( + r"[Oo]CI runtime.*not found", + "runtime_not_found", + "docker", + "Container runtime not found", + False, + "check_docker_installation", + ), ] # Category 17: Login/Credential Required Errors LOGIN_REQUIRED_ERRORS = [ # Docker/Container registry login errors - ErrorPattern(r"[Uu]sername.*[Rr]equired", "docker_username_required", "login_required", - "Docker username required", True, "prompt_docker_login"), - ErrorPattern(r"[Nn]on-null [Uu]sername", "docker_username_required", "login_required", - "Docker username required", True, "prompt_docker_login"), - ErrorPattern(r"unauthorized.*authentication required", "docker_auth_required", "login_required", - "Docker authentication required", True, "prompt_docker_login"), - ErrorPattern(r"denied.*requested access", "docker_access_denied", "login_required", - "Docker registry access denied", True, "prompt_docker_login"), - ErrorPattern(r"denied:.*access", "docker_access_denied", "login_required", - "Docker registry access denied", True, "prompt_docker_login"), - ErrorPattern(r"access.*denied", "docker_access_denied", "login_required", - "Docker registry access denied", True, "prompt_docker_login"), - ErrorPattern(r"no basic auth credentials", "docker_no_credentials", "login_required", - "Docker credentials not found", True, "prompt_docker_login"), - ErrorPattern(r"docker login", "docker_login_needed", "login_required", - "Docker login required", True, "prompt_docker_login"), + ErrorPattern( + r"[Uu]sername.*[Rr]equired", + "docker_username_required", + "login_required", + "Docker username required", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"[Nn]on-null [Uu]sername", + "docker_username_required", + "login_required", + "Docker username required", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"unauthorized.*authentication required", + "docker_auth_required", + "login_required", + "Docker authentication required", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"denied.*requested access", + "docker_access_denied", + "login_required", + "Docker registry access denied", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"denied:.*access", + "docker_access_denied", + "login_required", + "Docker registry access denied", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"access.*denied", + "docker_access_denied", + "login_required", + "Docker registry access denied", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"no basic auth credentials", + "docker_no_credentials", + "login_required", + "Docker credentials not found", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"docker login", + "docker_login_needed", + "login_required", + "Docker login required", + True, + "prompt_docker_login", + ), # ghcr.io (GitHub Container Registry) specific errors - ErrorPattern(r"ghcr\.io.*denied", "ghcr_access_denied", "login_required", - "GitHub Container Registry access denied - login required", True, "prompt_docker_login"), - ErrorPattern(r"Head.*ghcr\.io.*denied", "ghcr_access_denied", "login_required", - "GitHub Container Registry access denied - login required", True, "prompt_docker_login"), + ErrorPattern( + r"ghcr\.io.*denied", + "ghcr_access_denied", + "login_required", + "GitHub Container Registry access denied - login required", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"Head.*ghcr\.io.*denied", + "ghcr_access_denied", + "login_required", + "GitHub Container Registry access denied - login required", + True, + "prompt_docker_login", + ), # Generic registry denied patterns - ErrorPattern(r"Error response from daemon.*denied", "registry_access_denied", "login_required", - "Container registry access denied - login may be required", True, "prompt_docker_login"), - ErrorPattern(r"pull access denied", "pull_access_denied", "login_required", - "Pull access denied - login required or image doesn't exist", True, "prompt_docker_login"), - ErrorPattern(r"requested resource.*denied", "resource_access_denied", "login_required", - "Resource access denied - authentication required", True, "prompt_docker_login"), - + ErrorPattern( + r"Error response from daemon.*denied", + "registry_access_denied", + "login_required", + "Container registry access denied - login may be required", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"pull access denied", + "pull_access_denied", + "login_required", + "Pull access denied - login required or image doesn't exist", + True, + "prompt_docker_login", + ), + ErrorPattern( + r"requested resource.*denied", + "resource_access_denied", + "login_required", + "Resource access denied - authentication required", + True, + "prompt_docker_login", + ), # Git credential errors - ErrorPattern(r"[Cc]ould not read.*[Uu]sername", "git_username_required", "login_required", - "Git username required", True, "prompt_git_login"), - ErrorPattern(r"[Ff]atal:.*[Aa]uthentication failed", "git_auth_failed", "login_required", - "Git authentication failed", True, "prompt_git_login"), - ErrorPattern(r"[Pp]assword.*authentication.*removed", "git_token_required", "login_required", - "Git token required (password auth disabled)", True, "prompt_git_token"), - ErrorPattern(r"[Pp]ermission denied.*publickey", "git_ssh_required", "login_required", - "Git SSH key required", True, "setup_git_ssh"), - + ErrorPattern( + r"[Cc]ould not read.*[Uu]sername", + "git_username_required", + "login_required", + "Git username required", + True, + "prompt_git_login", + ), + ErrorPattern( + r"[Ff]atal:.*[Aa]uthentication failed", + "git_auth_failed", + "login_required", + "Git authentication failed", + True, + "prompt_git_login", + ), + ErrorPattern( + r"[Pp]assword.*authentication.*removed", + "git_token_required", + "login_required", + "Git token required (password auth disabled)", + True, + "prompt_git_token", + ), + ErrorPattern( + r"[Pp]ermission denied.*publickey", + "git_ssh_required", + "login_required", + "Git SSH key required", + True, + "setup_git_ssh", + ), # npm login errors - ErrorPattern(r"npm ERR!.*E401", "npm_auth_required", "login_required", - "npm authentication required", True, "prompt_npm_login"), - ErrorPattern(r"npm ERR!.*ENEEDAUTH", "npm_need_auth", "login_required", - "npm authentication needed", True, "prompt_npm_login"), - ErrorPattern(r"You must be logged in", "npm_login_required", "login_required", - "npm login required", True, "prompt_npm_login"), - + ErrorPattern( + r"npm ERR!.*E401", + "npm_auth_required", + "login_required", + "npm authentication required", + True, + "prompt_npm_login", + ), + ErrorPattern( + r"npm ERR!.*ENEEDAUTH", + "npm_need_auth", + "login_required", + "npm authentication needed", + True, + "prompt_npm_login", + ), + ErrorPattern( + r"You must be logged in", + "npm_login_required", + "login_required", + "npm login required", + True, + "prompt_npm_login", + ), # AWS credential errors - ErrorPattern(r"[Uu]nable to locate credentials", "aws_no_credentials", "login_required", - "AWS credentials not configured", True, "prompt_aws_configure"), - ErrorPattern(r"[Ii]nvalid[Aa]ccess[Kk]ey", "aws_invalid_key", "login_required", - "AWS access key invalid", True, "prompt_aws_configure"), - ErrorPattern(r"[Ss]ignature.*[Dd]oes[Nn]ot[Mm]atch", "aws_secret_invalid", "login_required", - "AWS secret key invalid", True, "prompt_aws_configure"), - ErrorPattern(r"[Ee]xpired[Tt]oken", "aws_token_expired", "login_required", - "AWS token expired", True, "prompt_aws_configure"), - + ErrorPattern( + r"[Uu]nable to locate credentials", + "aws_no_credentials", + "login_required", + "AWS credentials not configured", + True, + "prompt_aws_configure", + ), + ErrorPattern( + r"[Ii]nvalid[Aa]ccess[Kk]ey", + "aws_invalid_key", + "login_required", + "AWS access key invalid", + True, + "prompt_aws_configure", + ), + ErrorPattern( + r"[Ss]ignature.*[Dd]oes[Nn]ot[Mm]atch", + "aws_secret_invalid", + "login_required", + "AWS secret key invalid", + True, + "prompt_aws_configure", + ), + ErrorPattern( + r"[Ee]xpired[Tt]oken", + "aws_token_expired", + "login_required", + "AWS token expired", + True, + "prompt_aws_configure", + ), # PyPI/pip login errors - ErrorPattern(r"HTTPError: 403.*upload", "pypi_auth_required", "login_required", - "PyPI authentication required", True, "prompt_pypi_login"), - + ErrorPattern( + r"HTTPError: 403.*upload", + "pypi_auth_required", + "login_required", + "PyPI authentication required", + True, + "prompt_pypi_login", + ), # Generic credential prompts - ErrorPattern(r"[Ee]nter.*[Uu]sername", "username_prompt", "login_required", - "Username required", True, "prompt_credentials"), - ErrorPattern(r"[Ee]nter.*[Pp]assword", "password_prompt", "login_required", - "Password required", True, "prompt_credentials"), - ErrorPattern(r"[Aa]ccess [Tt]oken.*[Rr]equired", "token_required", "login_required", - "Access token required", True, "prompt_token"), - ErrorPattern(r"[Aa][Pp][Ii].*[Kk]ey.*[Rr]equired", "api_key_required", "login_required", - "API key required", True, "prompt_api_key"), + ErrorPattern( + r"[Ee]nter.*[Uu]sername", + "username_prompt", + "login_required", + "Username required", + True, + "prompt_credentials", + ), + ErrorPattern( + r"[Ee]nter.*[Pp]assword", + "password_prompt", + "login_required", + "Password required", + True, + "prompt_credentials", + ), + ErrorPattern( + r"[Aa]ccess [Tt]oken.*[Rr]equired", + "token_required", + "login_required", + "Access token required", + True, + "prompt_token", + ), + ErrorPattern( + r"[Aa][Pp][Ii].*[Kk]ey.*[Rr]equired", + "api_key_required", + "login_required", + "API key required", + True, + "prompt_api_key", + ), ] # Category 10: Device & Hardware Errors DEVICE_ERRORS = [ - ErrorPattern(r"[Nn]o such device", "no_device", "device", - "Device not found", False, "check_device"), - ErrorPattern(r"[Dd]evice not configured", "device_not_configured", "device", - "Device not configured", False, "configure_device"), - ErrorPattern(r"[Hh]ardware error", "hardware_error", "device", - "Hardware error", False, "check_hardware", "critical"), - ErrorPattern(r"[Dd]evice offline", "device_offline", "device", - "Device offline", False, "bring_online"), - ErrorPattern(r"[Mm]edia not present", "no_media", "device", - "No media in device", False, "insert_media"), - ErrorPattern(r"[Rr]ead error", "read_error", "device", - "Device read error", False, "check_disk", "critical"), - ErrorPattern(r"[Ww]rite error", "write_error", "device", - "Device write error", False, "check_disk", "critical"), + ErrorPattern( + r"[Nn]o such device", "no_device", "device", "Device not found", False, "check_device" + ), + ErrorPattern( + r"[Dd]evice not configured", + "device_not_configured", + "device", + "Device not configured", + False, + "configure_device", + ), + ErrorPattern( + r"[Hh]ardware error", + "hardware_error", + "device", + "Hardware error", + False, + "check_hardware", + "critical", + ), + ErrorPattern( + r"[Dd]evice offline", "device_offline", "device", "Device offline", False, "bring_online" + ), + ErrorPattern( + r"[Mm]edia not present", "no_media", "device", "No media in device", False, "insert_media" + ), + ErrorPattern( + r"[Rr]ead error", + "read_error", + "device", + "Device read error", + False, + "check_disk", + "critical", + ), + ErrorPattern( + r"[Ww]rite error", + "write_error", + "device", + "Device write error", + False, + "check_disk", + "critical", + ), ] # Category 11: Compilation & Build Errors BUILD_ERRORS = [ - ErrorPattern(r"[Nn]o rule to make target", "no_make_rule", "build", - "Make target not found", False, "check_makefile"), - ErrorPattern(r"[Mm]issing separator", "missing_separator", "build", - "Makefile syntax error", False, "fix_makefile"), - ErrorPattern(r"[Uu]ndefined reference", "undefined_reference", "build", - "Undefined symbol", True, "add_library"), - ErrorPattern(r"[Ss]ymbol lookup error", "symbol_lookup", "build", - "Symbol not found", True, "fix_ldpath"), - ErrorPattern(r"[Ll]ibrary not found", "library_not_found", "build", - "Library not found", True, "install_lib"), - ErrorPattern(r"[Hh]eader.*not found", "header_not_found", "build", - "Header file not found", True, "install_dev"), - ErrorPattern(r"[Rr]elocation error", "relocation_error", "build", - "Relocation error", True, "fix_ldpath"), - ErrorPattern(r"[Cc]ompilation terminated", "compilation_failed", "build", - "Compilation failed", False, "check_errors"), + ErrorPattern( + r"[Nn]o rule to make target", + "no_make_rule", + "build", + "Make target not found", + False, + "check_makefile", + ), + ErrorPattern( + r"[Mm]issing separator", + "missing_separator", + "build", + "Makefile syntax error", + False, + "fix_makefile", + ), + ErrorPattern( + r"[Uu]ndefined reference", + "undefined_reference", + "build", + "Undefined symbol", + True, + "add_library", + ), + ErrorPattern( + r"[Ss]ymbol lookup error", "symbol_lookup", "build", "Symbol not found", True, "fix_ldpath" + ), + ErrorPattern( + r"[Ll]ibrary not found", + "library_not_found", + "build", + "Library not found", + True, + "install_lib", + ), + ErrorPattern( + r"[Hh]eader.*not found", + "header_not_found", + "build", + "Header file not found", + True, + "install_dev", + ), + ErrorPattern( + r"[Rr]elocation error", "relocation_error", "build", "Relocation error", True, "fix_ldpath" + ), + ErrorPattern( + r"[Cc]ompilation terminated", + "compilation_failed", + "build", + "Compilation failed", + False, + "check_errors", + ), ] # Category 12: Archive & Compression Errors ARCHIVE_ERRORS = [ - ErrorPattern(r"[Uu]nexpected end of file", "unexpected_eof_archive", "archive", - "Archive truncated", False, "redownload"), - ErrorPattern(r"[Cc]orrupt archive", "corrupt_archive", "archive", - "Archive corrupted", False, "redownload"), - ErrorPattern(r"[Ii]nvalid tar magic", "invalid_tar", "archive", - "Invalid tar archive", False, "check_format"), - ErrorPattern(r"[Cc]hecksum error", "checksum_error", "archive", - "Checksum mismatch", False, "redownload"), - ErrorPattern(r"[Nn]ot in gzip format", "not_gzip", "archive", - "Not gzip format", False, "check_format"), - ErrorPattern(r"[Dd]ecompression failed", "decompress_failed", "archive", - "Decompression failed", False, "check_format"), + ErrorPattern( + r"[Uu]nexpected end of file", + "unexpected_eof_archive", + "archive", + "Archive truncated", + False, + "redownload", + ), + ErrorPattern( + r"[Cc]orrupt archive", + "corrupt_archive", + "archive", + "Archive corrupted", + False, + "redownload", + ), + ErrorPattern( + r"[Ii]nvalid tar magic", + "invalid_tar", + "archive", + "Invalid tar archive", + False, + "check_format", + ), + ErrorPattern( + r"[Cc]hecksum error", "checksum_error", "archive", "Checksum mismatch", False, "redownload" + ), + ErrorPattern( + r"[Nn]ot in gzip format", "not_gzip", "archive", "Not gzip format", False, "check_format" + ), + ErrorPattern( + r"[Dd]ecompression failed", + "decompress_failed", + "archive", + "Decompression failed", + False, + "check_format", + ), ] # Category 13: Shell Script Errors SCRIPT_ERRORS = [ - ErrorPattern(r"[Bb]ad interpreter", "bad_interpreter", "script", - "Interpreter not found", True, "fix_shebang"), - ErrorPattern(r"[Ll]ine \d+:.*command not found", "script_cmd_not_found", "script", - "Command in script not found", True, "install_dependency"), - ErrorPattern(r"[Ii]nteger expression expected", "integer_expected", "script", - "Expected integer", False, "fix_syntax"), - ErrorPattern(r"[Cc]onditional binary operator expected", "conditional_expected", "script", - "Expected conditional", False, "fix_syntax"), + ErrorPattern( + r"[Bb]ad interpreter", + "bad_interpreter", + "script", + "Interpreter not found", + True, + "fix_shebang", + ), + ErrorPattern( + r"[Ll]ine \d+:.*command not found", + "script_cmd_not_found", + "script", + "Command in script not found", + True, + "install_dependency", + ), + ErrorPattern( + r"[Ii]nteger expression expected", + "integer_expected", + "script", + "Expected integer", + False, + "fix_syntax", + ), + ErrorPattern( + r"[Cc]onditional binary operator expected", + "conditional_expected", + "script", + "Expected conditional", + False, + "fix_syntax", + ), ] # Category 14: Environment & PATH Errors ENVIRONMENT_ERRORS = [ - ErrorPattern(r"[Vv]ariable not set", "var_not_set", "environment", - "Environment variable not set", True, "set_variable"), - ErrorPattern(r"[Pp][Aa][Tt][Hh] not set", "path_not_set", "environment", - "PATH not configured", True, "set_path"), - ErrorPattern(r"[Ee]nvironment corrupt", "env_corrupt", "environment", - "Environment corrupted", True, "reset_env"), - ErrorPattern(r"[Ll]ibrary path not found", "lib_path_missing", "environment", - "Library path missing", True, "set_ldpath"), - ErrorPattern(r"LD_LIBRARY_PATH", "ld_path_issue", "environment", - "Library path issue", True, "set_ldpath"), + ErrorPattern( + r"[Vv]ariable not set", + "var_not_set", + "environment", + "Environment variable not set", + True, + "set_variable", + ), + ErrorPattern( + r"[Pp][Aa][Tt][Hh] not set", + "path_not_set", + "environment", + "PATH not configured", + True, + "set_path", + ), + ErrorPattern( + r"[Ee]nvironment corrupt", + "env_corrupt", + "environment", + "Environment corrupted", + True, + "reset_env", + ), + ErrorPattern( + r"[Ll]ibrary path not found", + "lib_path_missing", + "environment", + "Library path missing", + True, + "set_ldpath", + ), + ErrorPattern( + r"LD_LIBRARY_PATH", "ld_path_issue", "environment", "Library path issue", True, "set_ldpath" + ), ] # Category 15: Service & System Errors # Category 16: Config File Errors (Nginx, Apache, etc.) CONFIG_ERRORS = [ # Nginx errors - ErrorPattern(r"nginx:.*\[emerg\]", "nginx_config_error", "config", - "Nginx configuration error", True, "fix_nginx_config"), - ErrorPattern(r"nginx.*syntax.*error", "nginx_syntax_error", "config", - "Nginx syntax error", True, "fix_nginx_config"), - ErrorPattern(r"nginx.*unexpected", "nginx_unexpected", "config", - "Nginx unexpected token", True, "fix_nginx_config"), - ErrorPattern(r"nginx.*unknown directive", "nginx_unknown_directive", "config", - "Nginx unknown directive", True, "fix_nginx_config"), - ErrorPattern(r"nginx.*test failed", "nginx_test_failed", "config", - "Nginx config test failed", True, "fix_nginx_config"), - ErrorPattern(r"nginx.*could not open", "nginx_file_error", "config", - "Nginx could not open file", True, "fix_nginx_permissions"), + ErrorPattern( + r"nginx:.*\[emerg\]", + "nginx_config_error", + "config", + "Nginx configuration error", + True, + "fix_nginx_config", + ), + ErrorPattern( + r"nginx.*syntax.*error", + "nginx_syntax_error", + "config", + "Nginx syntax error", + True, + "fix_nginx_config", + ), + ErrorPattern( + r"nginx.*unexpected", + "nginx_unexpected", + "config", + "Nginx unexpected token", + True, + "fix_nginx_config", + ), + ErrorPattern( + r"nginx.*unknown directive", + "nginx_unknown_directive", + "config", + "Nginx unknown directive", + True, + "fix_nginx_config", + ), + ErrorPattern( + r"nginx.*test failed", + "nginx_test_failed", + "config", + "Nginx config test failed", + True, + "fix_nginx_config", + ), + ErrorPattern( + r"nginx.*could not open", + "nginx_file_error", + "config", + "Nginx could not open file", + True, + "fix_nginx_permissions", + ), # Apache errors - ErrorPattern(r"apache.*syntax error", "apache_syntax_error", "config", - "Apache syntax error", True, "fix_apache_config"), - ErrorPattern(r"apache2?ctl.*configtest", "apache_config_error", "config", - "Apache config test failed", True, "fix_apache_config"), - ErrorPattern(r"[Ss]yntax error on line \d+", "config_line_error", "config", - "Config syntax error at line", True, "fix_config_line"), + ErrorPattern( + r"apache.*syntax error", + "apache_syntax_error", + "config", + "Apache syntax error", + True, + "fix_apache_config", + ), + ErrorPattern( + r"apache2?ctl.*configtest", + "apache_config_error", + "config", + "Apache config test failed", + True, + "fix_apache_config", + ), + ErrorPattern( + r"[Ss]yntax error on line \d+", + "config_line_error", + "config", + "Config syntax error at line", + True, + "fix_config_line", + ), # MySQL/MariaDB errors - ErrorPattern(r"mysql.*error.*config", "mysql_config_error", "config", - "MySQL configuration error", True, "fix_mysql_config"), + ErrorPattern( + r"mysql.*error.*config", + "mysql_config_error", + "config", + "MySQL configuration error", + True, + "fix_mysql_config", + ), # PostgreSQL errors - ErrorPattern(r"postgres.*error.*config", "postgres_config_error", "config", - "PostgreSQL configuration error", True, "fix_postgres_config"), + ErrorPattern( + r"postgres.*error.*config", + "postgres_config_error", + "config", + "PostgreSQL configuration error", + True, + "fix_postgres_config", + ), # Generic config errors - ErrorPattern(r"configuration.*syntax", "generic_config_syntax", "config", - "Configuration syntax error", True, "fix_config_syntax"), - ErrorPattern(r"invalid.*configuration", "invalid_config", "config", - "Invalid configuration", True, "fix_config_syntax"), - ErrorPattern(r"[Cc]onfig.*parse error", "config_parse_error", "config", - "Config parse error", True, "fix_config_syntax"), + ErrorPattern( + r"configuration.*syntax", + "generic_config_syntax", + "config", + "Configuration syntax error", + True, + "fix_config_syntax", + ), + ErrorPattern( + r"invalid.*configuration", + "invalid_config", + "config", + "Invalid configuration", + True, + "fix_config_syntax", + ), + ErrorPattern( + r"[Cc]onfig.*parse error", + "config_parse_error", + "config", + "Config parse error", + True, + "fix_config_syntax", + ), ] SERVICE_ERRORS = [ - ErrorPattern(r"[Ss]ervice failed to start", "service_failed", "service", - "Service failed to start", True, "check_service_logs"), - ErrorPattern(r"[Uu]nit.*failed", "unit_failed", "service", - "Systemd unit failed", True, "check_service_logs"), - ErrorPattern(r"[Jj]ob for.*\.service failed", "job_failed", "service", - "Service job failed", True, "check_service_logs"), - ErrorPattern(r"[Ff]ailed to start.*\.service", "start_failed", "service", - "Failed to start service", True, "check_service_logs"), - ErrorPattern(r"[Dd]ependency failed", "dependency_failed", "service", - "Service dependency failed", True, "start_dependency"), - ErrorPattern(r"[Ii]nactive.*dead", "service_inactive", "service", - "Service not running", True, "start_service"), - ErrorPattern(r"[Mm]asked", "service_masked", "service", - "Service is masked", True, "unmask_service"), - ErrorPattern(r"[Ee]nabled-runtime", "service_enabled_runtime", "service", - "Service enabled at runtime", False, "check_service"), - ErrorPattern(r"[Cc]ontrol process exited with error", "control_process_error", "service", - "Service control process failed", True, "check_service_logs"), - ErrorPattern(r"[Aa]ctivation.*timed out", "activation_timeout", "service", - "Service activation timed out", True, "check_service_logs"), + ErrorPattern( + r"[Ss]ervice failed to start", + "service_failed", + "service", + "Service failed to start", + True, + "check_service_logs", + ), + ErrorPattern( + r"[Uu]nit.*failed", + "unit_failed", + "service", + "Systemd unit failed", + True, + "check_service_logs", + ), + ErrorPattern( + r"[Jj]ob for.*\.service failed", + "job_failed", + "service", + "Service job failed", + True, + "check_service_logs", + ), + ErrorPattern( + r"[Ff]ailed to start.*\.service", + "start_failed", + "service", + "Failed to start service", + True, + "check_service_logs", + ), + ErrorPattern( + r"[Dd]ependency failed", + "dependency_failed", + "service", + "Service dependency failed", + True, + "start_dependency", + ), + ErrorPattern( + r"[Ii]nactive.*dead", + "service_inactive", + "service", + "Service not running", + True, + "start_service", + ), + ErrorPattern( + r"[Mm]asked", "service_masked", "service", "Service is masked", True, "unmask_service" + ), + ErrorPattern( + r"[Ee]nabled-runtime", + "service_enabled_runtime", + "service", + "Service enabled at runtime", + False, + "check_service", + ), + ErrorPattern( + r"[Cc]ontrol process exited with error", + "control_process_error", + "service", + "Service control process failed", + True, + "check_service_logs", + ), + ErrorPattern( + r"[Aa]ctivation.*timed out", + "activation_timeout", + "service", + "Service activation timed out", + True, + "check_service_logs", + ), ] # Combine all error patterns ALL_ERROR_PATTERNS = ( - DOCKER_ERRORS + # Check Docker errors first (common) - LOGIN_REQUIRED_ERRORS + # Check login errors (interactive) - CONFIG_ERRORS + # Check config errors (more specific) - COMMAND_SHELL_ERRORS + - FILE_DIRECTORY_ERRORS + - PERMISSION_ERRORS + - PROCESS_ERRORS + - MEMORY_ERRORS + - FILESYSTEM_ERRORS + - NETWORK_ERRORS + - PACKAGE_ERRORS + - USER_AUTH_ERRORS + - DEVICE_ERRORS + - BUILD_ERRORS + - ARCHIVE_ERRORS + - SCRIPT_ERRORS + - ENVIRONMENT_ERRORS + - SERVICE_ERRORS + DOCKER_ERRORS # Check Docker errors first (common) + + LOGIN_REQUIRED_ERRORS # Check login errors (interactive) + + CONFIG_ERRORS # Check config errors (more specific) + + COMMAND_SHELL_ERRORS + + FILE_DIRECTORY_ERRORS + + PERMISSION_ERRORS + + PROCESS_ERRORS + + MEMORY_ERRORS + + FILESYSTEM_ERRORS + + NETWORK_ERRORS + + PACKAGE_ERRORS + + USER_AUTH_ERRORS + + DEVICE_ERRORS + + BUILD_ERRORS + + ARCHIVE_ERRORS + + SCRIPT_ERRORS + + ENVIRONMENT_ERRORS + + SERVICE_ERRORS ) @@ -584,9 +1711,11 @@ class ErrorPattern: # Login/Credential Requirements Configuration # ============================================================================ + @dataclass class LoginRequirement: """Defines credentials required for a service login.""" + service: str display_name: str command_pattern: str # Regex to match commands that need this login @@ -740,40 +1869,86 @@ class LoginRequirement: UBUNTU_PACKAGE_MAP = { # Commands to packages - "nginx": "nginx", "apache2": "apache2", "httpd": "apache2", - "mysql": "mysql-server", "mysqld": "mysql-server", - "postgres": "postgresql", "psql": "postgresql-client", - "redis": "redis-server", "redis-server": "redis-server", - "mongo": "mongodb", "mongod": "mongodb", - "node": "nodejs", "npm": "npm", "yarn": "yarnpkg", - "python": "python3", "python3": "python3", "pip": "python3-pip", "pip3": "python3-pip", - "docker": "docker.io", "docker-compose": "docker-compose", - "git": "git", "curl": "curl", "wget": "wget", - "vim": "vim", "nano": "nano", "emacs": "emacs", - "gcc": "gcc", "g++": "g++", "make": "make", "cmake": "cmake", - "java": "default-jdk", "javac": "default-jdk", - "ruby": "ruby", "gem": "ruby", - "go": "golang-go", "cargo": "cargo", "rustc": "rustc", - "php": "php", "composer": "composer", - "ffmpeg": "ffmpeg", "imagemagick": "imagemagick", "convert": "imagemagick", - "htop": "htop", "tree": "tree", "jq": "jq", - "nc": "netcat-openbsd", "netcat": "netcat-openbsd", - "ss": "iproute2", "ip": "iproute2", - "dig": "dnsutils", "nslookup": "dnsutils", - "zip": "zip", "unzip": "unzip", - "tar": "tar", "gzip": "gzip", - "rsync": "rsync", "ssh": "openssh-client", "sshd": "openssh-server", - "screen": "screen", "tmux": "tmux", - "awk": "gawk", "sed": "sed", "grep": "grep", - "setfacl": "acl", "getfacl": "acl", - "lsof": "lsof", "strace": "strace", + "nginx": "nginx", + "apache2": "apache2", + "httpd": "apache2", + "mysql": "mysql-server", + "mysqld": "mysql-server", + "postgres": "postgresql", + "psql": "postgresql-client", + "redis": "redis-server", + "redis-server": "redis-server", + "mongo": "mongodb", + "mongod": "mongodb", + "node": "nodejs", + "npm": "npm", + "yarn": "yarnpkg", + "python": "python3", + "python3": "python3", + "pip": "python3-pip", + "pip3": "python3-pip", + "docker": "docker.io", + "docker-compose": "docker-compose", + "git": "git", + "curl": "curl", + "wget": "wget", + "vim": "vim", + "nano": "nano", + "emacs": "emacs", + "gcc": "gcc", + "g++": "g++", + "make": "make", + "cmake": "cmake", + "java": "default-jdk", + "javac": "default-jdk", + "ruby": "ruby", + "gem": "ruby", + "go": "golang-go", + "cargo": "cargo", + "rustc": "rustc", + "php": "php", + "composer": "composer", + "ffmpeg": "ffmpeg", + "imagemagick": "imagemagick", + "convert": "imagemagick", + "htop": "htop", + "tree": "tree", + "jq": "jq", + "nc": "netcat-openbsd", + "netcat": "netcat-openbsd", + "ss": "iproute2", + "ip": "iproute2", + "dig": "dnsutils", + "nslookup": "dnsutils", + "zip": "zip", + "unzip": "unzip", + "tar": "tar", + "gzip": "gzip", + "rsync": "rsync", + "ssh": "openssh-client", + "sshd": "openssh-server", + "screen": "screen", + "tmux": "tmux", + "awk": "gawk", + "sed": "sed", + "grep": "grep", + "setfacl": "acl", + "getfacl": "acl", + "lsof": "lsof", + "strace": "strace", # System monitoring tools - "sensors": "lm-sensors", "sensors-detect": "lm-sensors", - "htop": "htop", "iotop": "iotop", "iftop": "iftop", - "nmap": "nmap", "netstat": "net-tools", "ifconfig": "net-tools", - "smartctl": "smartmontools", "hdparm": "hdparm", + "sensors": "lm-sensors", + "sensors-detect": "lm-sensors", + "iotop": "iotop", + "iftop": "iftop", + "nmap": "nmap", + "netstat": "net-tools", + "ifconfig": "net-tools", + "smartctl": "smartmontools", + "hdparm": "hdparm", # Optional tools (may not be in all repos) - "snap": "snapd", "flatpak": "flatpak", + "snap": "snapd", + "flatpak": "flatpak", } UBUNTU_SERVICE_MAP = { @@ -794,12 +1969,13 @@ class LoginRequirement: # Error Diagnoser Class # ============================================================================ + class ErrorDiagnoser: """Comprehensive error diagnosis for all system error types.""" - + def __init__(self): self._compile_patterns() - + def _compile_patterns(self): """Pre-compile regex patterns for performance.""" self._compiled_patterns = [] @@ -809,7 +1985,7 @@ def _compile_patterns(self): self._compiled_patterns.append((compiled, ep)) except re.error: console.print(f"[yellow]Warning: Invalid pattern: {ep.pattern}[/yellow]") - + def extract_path_from_error(self, stderr: str, cmd: str) -> str | None: """Extract the problematic file path from an error message.""" patterns = [ @@ -824,49 +2000,62 @@ def extract_path_from_error(self, stderr: str, cmd: str) -> str | None: r"touch: cannot touch ['\"]?([/\w\.\-_]+)['\"]?", r"cp: cannot (?:create|stat|access) ['\"]?([/\w\.\-_]+)['\"]?", ] - + for pattern in patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: path = match.group(1) if path.startswith("/"): return path - + # Extract from command itself for part in cmd.split(): - if part.startswith("/") and any(c in part for c in ["/etc/", "/var/", "/usr/", "/home/", "/opt/", "/tmp/"]): + if part.startswith("/") and any( + c in part for c in ["/etc/", "/var/", "/usr/", "/home/", "/opt/", "/tmp/"] + ): return part - + return None - + def extract_service_from_error(self, stderr: str, cmd: str) -> str | None: """Extract service name from error message or command.""" cmd_parts = cmd.split() - + # From systemctl/service commands for i, part in enumerate(cmd_parts): if part in ["systemctl", "service"]: for j in range(i + 1, len(cmd_parts)): candidate = cmd_parts[j] - if candidate not in ["start", "stop", "restart", "reload", "status", - "enable", "disable", "is-active", "is-enabled", - "-q", "--quiet", "--no-pager"]: + if candidate not in [ + "start", + "stop", + "restart", + "reload", + "status", + "enable", + "disable", + "is-active", + "is-enabled", + "-q", + "--quiet", + "--no-pager", + ]: return candidate.replace(".service", "") - + # From error message patterns = [ r"(?:Unit|Service) ([a-zA-Z0-9\-_]+)(?:\.service)? (?:not found|failed|could not)", r"Failed to (?:start|stop|restart|enable|disable) ([a-zA-Z0-9\-_]+)", r"([a-zA-Z0-9\-_]+)\.service", ] - + for pattern in patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: return match.group(1).replace(".service", "") - + return None - + def extract_package_from_error(self, stderr: str, cmd: str) -> str | None: """Extract package name from error.""" patterns = [ @@ -875,14 +2064,14 @@ def extract_package_from_error(self, stderr: str, cmd: str) -> str | None: r"[Nn]o package '?([a-zA-Z0-9\-_\.]+)'? (?:found|available)", r"apt.*install.*?([a-zA-Z0-9\-_\.]+)", ] - + for pattern in patterns: match = re.search(pattern, stderr + " " + cmd, re.IGNORECASE) if match: return match.group(1) - + return None - + def extract_port_from_error(self, stderr: str) -> int | None: """Extract port number from error.""" patterns = [ @@ -890,32 +2079,32 @@ def extract_port_from_error(self, stderr: str) -> int | None: r"[Aa]ddress.*:(\d+)", r":(\d{2,5})\s", ] - + for pattern in patterns: match = re.search(pattern, stderr) if match: port = int(match.group(1)) if 1 <= port <= 65535: return port - + return None - + def _extract_container_name(self, stderr: str) -> str | None: """Extract Docker container name from error message.""" patterns = [ r'container name ["\'/]([a-zA-Z0-9_\-]+)["\'/]', r'["\'/]([a-zA-Z0-9_\-]+)["\'/] is already in use', r'container ["\']?([a-zA-Z0-9_\-]+)["\']?', - r'No such container:?\s*([a-zA-Z0-9_\-]+)', + r"No such container:?\s*([a-zA-Z0-9_\-]+)", ] - + for pattern in patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: return match.group(1) - + return None - + def _extract_image_name(self, stderr: str, cmd: str) -> str | None: """Extract Docker image name from error or command.""" # From command @@ -930,47 +2119,47 @@ def _extract_image_name(self, stderr: str, cmd: str) -> str | None: return candidate elif not candidate.startswith("-") and j == len(parts) - 1: return candidate - + # From error patterns = [ r'[Uu]nable to find image ["\']([^"\']+)["\']', r'repository ["\']?([^"\':\s]+(?::[^"\':\s]+)?)["\']? not found', - r'manifest for ([^\s]+) not found', + r"manifest for ([^\s]+) not found", ] - + for pattern in patterns: match = re.search(pattern, stderr) if match: return match.group(1) - + return None - + def _extract_port(self, stderr: str) -> str | None: """Extract port from Docker error.""" patterns = [ - r'[Pp]ort (\d+)', - r':(\d+)->', - r'address.*:(\d+)', - r'-p\s*(\d+):', + r"[Pp]ort (\d+)", + r":(\d+)->", + r"address.*:(\d+)", + r"-p\s*(\d+):", ] - + for pattern in patterns: match = re.search(pattern, stderr) if match: return match.group(1) - + return None - + def extract_config_file_and_line(self, stderr: str) -> tuple[str | None, int | None]: """Extract config file path and line number from error.""" patterns = [ - r'in\s+(/[^\s:]+):(\d+)', # "in /path:line" - r'at\s+(/[^\s:]+):(\d+)', # "at /path:line" - r'(/[^\s:]+):(\d+):', # "/path:line:" - r'line\s+(\d+)\s+of\s+(/[^\s:]+)', # "line X of /path" - r'(/[^\s:]+)\s+line\s+(\d+)', # "/path line X" + r"in\s+(/[^\s:]+):(\d+)", # "in /path:line" + r"at\s+(/[^\s:]+):(\d+)", # "at /path:line" + r"(/[^\s:]+):(\d+):", # "/path:line:" + r"line\s+(\d+)\s+of\s+(/[^\s:]+)", # "line X of /path" + r"(/[^\s:]+)\s+line\s+(\d+)", # "/path line X" ] - + for pattern in patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: @@ -979,9 +2168,9 @@ def extract_config_file_and_line(self, stderr: str) -> tuple[str | None, int | N return groups[0], int(groups[1]) elif len(groups) > 1 and groups[1].startswith("/"): return groups[1], int(groups[0]) - + return None, None - + def extract_command_from_error(self, stderr: str) -> str | None: """Extract the failing command name from error.""" patterns = [ @@ -990,18 +2179,18 @@ def extract_command_from_error(self, stderr: str) -> str | None: r"bash: ([a-zA-Z0-9\-_]+):", r"/usr/bin/env: '?([a-zA-Z0-9\-_]+)'?:", ] - + for pattern in patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: return match.group(1) - + return None - + def diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: """ Comprehensive error diagnosis using pattern matching. - + Returns a detailed diagnosis dict with: - error_type: Specific error type - category: Error category (command_shell, network, etc.) @@ -1023,20 +2212,20 @@ def diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: "extracted_info": {}, "severity": "error", } - + stderr_lower = stderr.lower() - + # Extract common info diagnosis["extracted_path"] = self.extract_path_from_error(stderr, cmd) diagnosis["extracted_info"]["service"] = self.extract_service_from_error(stderr, cmd) diagnosis["extracted_info"]["package"] = self.extract_package_from_error(stderr, cmd) diagnosis["extracted_info"]["port"] = self.extract_port_from_error(stderr) - + config_file, line_num = self.extract_config_file_and_line(stderr) if config_file: diagnosis["extracted_info"]["config_file"] = config_file diagnosis["extracted_info"]["line_num"] = line_num - + # Match against compiled patterns for compiled, ep in self._compiled_patterns: if compiled.search(stderr): @@ -1046,12 +2235,12 @@ def diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: diagnosis["can_auto_fix"] = ep.can_auto_fix diagnosis["fix_strategy"] = ep.fix_strategy diagnosis["severity"] = ep.severity - + # Generate fix commands based on category and strategy self._generate_fix_commands(diagnosis, cmd, stderr) - + return diagnosis - + # Fallback: try generic patterns if "permission denied" in stderr_lower: diagnosis["error_type"] = "permission_denied" @@ -1061,7 +2250,7 @@ def diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: diagnosis["fix_strategy"] = "use_sudo" if not cmd.strip().startswith("sudo"): diagnosis["fix_commands"] = [f"sudo {cmd}"] - + elif "not found" in stderr_lower or "no such" in stderr_lower: diagnosis["error_type"] = "not_found" diagnosis["category"] = "file_directory" @@ -1069,47 +2258,44 @@ def diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: if diagnosis["extracted_path"]: diagnosis["can_auto_fix"] = True diagnosis["fix_strategy"] = "create_path" - + return diagnosis - + def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None: """Generate specific fix commands based on the error type and strategy.""" strategy = diagnosis.get("fix_strategy", "") extracted = diagnosis.get("extracted_info", {}) path = diagnosis.get("extracted_path") - + # Permission/Sudo strategies if strategy == "use_sudo": if not cmd.strip().startswith("sudo"): diagnosis["fix_commands"] = [f"sudo {cmd}"] - + # Path creation strategies elif strategy == "create_path": if path: parent = os.path.dirname(path) if parent: diagnosis["fix_commands"] = [f"sudo mkdir -p {parent}"] - + # Package installation elif strategy == "install_package": missing_cmd = self.extract_command_from_error(stderr) or cmd.split()[0] pkg = UBUNTU_PACKAGE_MAP.get(missing_cmd, missing_cmd) - diagnosis["fix_commands"] = [ - "sudo apt-get update", - f"sudo apt-get install -y {pkg}" - ] + diagnosis["fix_commands"] = ["sudo apt-get update", f"sudo apt-get install -y {pkg}"] diagnosis["extracted_info"]["missing_command"] = missing_cmd diagnosis["extracted_info"]["suggested_package"] = pkg - + # Service management elif strategy == "start_service" or strategy == "check_service": service = extracted.get("service") if service: diagnosis["fix_commands"] = [ f"sudo systemctl start {service}", - f"sudo systemctl status {service}" + f"sudo systemctl status {service}", ] - + elif strategy == "check_service_logs": service = extracted.get("service") if service: @@ -1142,22 +2328,22 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None f"sudo systemctl reset-failed {service} 2>/dev/null || true", f"sudo systemctl start {service}", ] - + elif strategy == "unmask_service": service = extracted.get("service") if service: diagnosis["fix_commands"] = [ f"sudo systemctl unmask {service}", - f"sudo systemctl start {service}" + f"sudo systemctl start {service}", ] - + # Config file fixes elif strategy in ["fix_nginx_config", "fix_nginx_permissions"]: config_file = extracted.get("config_file") line_num = extracted.get("line_num") if config_file: diagnosis["fix_commands"] = [ - f"sudo nginx -t 2>&1", + "sudo nginx -t 2>&1", f"# Check config at: {config_file}" + (f":{line_num}" if line_num else ""), ] else: @@ -1165,7 +2351,7 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None "sudo nginx -t 2>&1", "# Check /etc/nginx/nginx.conf and sites-enabled/*", ] - + elif strategy == "fix_apache_config": config_file = extracted.get("config_file") diagnosis["fix_commands"] = [ @@ -1174,7 +2360,7 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None ] if config_file: diagnosis["fix_commands"].append(f"# Check config at: {config_file}") - + elif strategy in ["fix_config_syntax", "fix_config_line"]: config_file = extracted.get("config_file") line_num = extracted.get("line_num") @@ -1188,138 +2374,154 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None f"sudo cat {config_file}", f"# Edit: sudo nano {config_file}", ] - + elif strategy == "fix_mysql_config": diagnosis["fix_commands"] = [ "sudo mysql --help --verbose 2>&1 | grep -A 1 'Default options'", "# Edit: sudo nano /etc/mysql/mysql.conf.d/mysqld.cnf", ] - + elif strategy == "fix_postgres_config": diagnosis["fix_commands"] = [ "sudo -u postgres psql -c 'SHOW config_file;'", "# Edit: sudo nano /etc/postgresql/*/main/postgresql.conf", ] - + # Package manager elif strategy == "clear_lock": diagnosis["fix_commands"] = [ "sudo rm -f /var/lib/dpkg/lock-frontend", "sudo rm -f /var/lib/dpkg/lock", "sudo rm -f /var/cache/apt/archives/lock", - "sudo dpkg --configure -a" + "sudo dpkg --configure -a", ] - + elif strategy == "update_repos": pkg = extracted.get("package") diagnosis["fix_commands"] = ["sudo apt-get update"] if pkg: diagnosis["fix_commands"].append(f"apt-cache search {pkg}") - + elif strategy == "fix_dependencies": diagnosis["fix_commands"] = [ "sudo apt-get install -f", "sudo dpkg --configure -a", "sudo apt-get update", - "sudo apt-get upgrade" + "sudo apt-get upgrade", ] - + elif strategy == "fix_broken": diagnosis["fix_commands"] = [ "sudo apt-get install -f", "sudo dpkg --configure -a", "sudo apt-get clean", - "sudo apt-get update" + "sudo apt-get update", ] - + elif strategy == "clean_apt": diagnosis["fix_commands"] = [ "sudo apt-get clean", "sudo rm -rf /var/lib/apt/lists/*", - "sudo apt-get update" + "sudo apt-get update", ] - + elif strategy == "fix_gpg": diagnosis["fix_commands"] = [ "sudo apt-key adv --refresh-keys --keyserver keyserver.ubuntu.com", - "sudo apt-get update" + "sudo apt-get update", ] - + # Docker strategies elif strategy == "remove_or_rename_container": container_name = self._extract_container_name(stderr) if container_name: diagnosis["fix_commands"] = [ f"docker rm -f {container_name}", - "# Or rename: docker rename {container_name} {container_name}_old" + "# Or rename: docker rename {container_name} {container_name}_old", ] - diagnosis["suggestion"] = f"Container '{container_name}' already exists. Removing it and retrying." + diagnosis["suggestion"] = ( + f"Container '{container_name}' already exists. Removing it and retrying." + ) else: diagnosis["fix_commands"] = [ "docker ps -a", - "# Then: docker rm -f " + "# Then: docker rm -f ", ] - + elif strategy == "stop_or_use_existing": container_name = self._extract_container_name(stderr) diagnosis["fix_commands"] = [ f"docker stop {container_name}" if container_name else "docker stop ", - "# Or connect to existing: docker exec -it /bin/sh" + "# Or connect to existing: docker exec -it /bin/sh", ] - + elif strategy == "start_container": container_name = self._extract_container_name(stderr) diagnosis["fix_commands"] = [ f"docker start {container_name}" if container_name else "docker start " ] - + elif strategy == "pull_image": image_name = self._extract_image_name(stderr, cmd) diagnosis["fix_commands"] = [ f"docker pull {image_name}" if image_name else "docker pull " ] - + elif strategy == "free_port_or_use_different": port = self._extract_port(stderr) if port: diagnosis["fix_commands"] = [ f"sudo lsof -i :{port}", f"# Kill process using port: sudo kill $(sudo lsof -t -i:{port})", - f"# Or use different port: -p {int(port)+1}:{port}" + f"# Or use different port: -p {int(port)+1}:{port}", ] else: diagnosis["fix_commands"] = ["docker ps", "# Check which ports are in use"] - + elif strategy == "start_docker_daemon": diagnosis["fix_commands"] = [ "sudo systemctl start docker", - "sudo systemctl status docker" + "sudo systemctl status docker", ] - + elif strategy == "create_volume": volume_name = extracted.get("volume") diagnosis["fix_commands"] = [ - f"docker volume create {volume_name}" if volume_name else "docker volume create " + ( + f"docker volume create {volume_name}" + if volume_name + else "docker volume create " + ) ] - + elif strategy == "create_network": network_name = extracted.get("network") diagnosis["fix_commands"] = [ - f"docker network create {network_name}" if network_name else "docker network create " + ( + f"docker network create {network_name}" + if network_name + else "docker network create " + ) ] - + elif strategy == "check_container_name": diagnosis["fix_commands"] = [ "docker ps -a", - "# Check container names and use correct one" + "# Check container names and use correct one", ] - + # Timeout strategies elif strategy == "retry_with_longer_timeout": # Check if this is an interactive command that needs TTY - interactive_patterns = ["docker exec -it", "docker run -it", "-ti ", "ollama run", "ollama chat"] + interactive_patterns = [ + "docker exec -it", + "docker run -it", + "-ti ", + "ollama run", + "ollama chat", + ] is_interactive = any(p in cmd.lower() for p in interactive_patterns) - + if is_interactive: diagnosis["fix_commands"] = [ "# This is an INTERACTIVE command that requires a terminal (TTY)", @@ -1327,54 +2529,51 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None f"# {cmd}", ] diagnosis["description"] = "Interactive command cannot run in background" - diagnosis["suggestion"] = "This command needs interactive input. Please run it in a separate terminal." + diagnosis["suggestion"] = ( + "This command needs interactive input. Please run it in a separate terminal." + ) else: diagnosis["fix_commands"] = [ "# This command timed out - it may still be running or need more time", "# For docker pull: The image may be very large, try again with better network", "# Check if the operation completed in background", ] - diagnosis["suggestion"] = "The operation timed out. This often happens with large downloads. You can retry manually." + diagnosis["suggestion"] = ( + "The operation timed out. This often happens with large downloads. You can retry manually." + ) diagnosis["can_auto_fix"] = False # Let user decide what to do - + # Network strategies elif strategy == "check_network": - diagnosis["fix_commands"] = [ - "ping -c 2 8.8.8.8", - "ip route", - "cat /etc/resolv.conf" - ] - + diagnosis["fix_commands"] = ["ping -c 2 8.8.8.8", "ip route", "cat /etc/resolv.conf"] + elif strategy == "check_dns": diagnosis["fix_commands"] = [ "cat /etc/resolv.conf", "systemd-resolve --status", - "sudo systemctl restart systemd-resolved" + "sudo systemctl restart systemd-resolved", ] - + elif strategy == "check_service": port = extracted.get("port") if port: diagnosis["fix_commands"] = [ f"sudo ss -tlnp sport = :{port}", - f"sudo lsof -i :{port}" + f"sudo lsof -i :{port}", ] - + elif strategy == "find_port_user": port = extracted.get("port") if port: diagnosis["fix_commands"] = [ f"sudo lsof -i :{port}", f"sudo ss -tlnp sport = :{port}", - f"# Kill process: sudo kill " + "# Kill process: sudo kill ", ] - + elif strategy == "check_firewall": - diagnosis["fix_commands"] = [ - "sudo ufw status", - "sudo iptables -L -n" - ] - + diagnosis["fix_commands"] = ["sudo ufw status", "sudo iptables -L -n"] + # Disk/Memory strategies elif strategy == "free_disk": diagnosis["fix_commands"] = [ @@ -1382,55 +2581,52 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None "sudo apt-get clean", "sudo apt-get autoremove -y", "sudo journalctl --vacuum-size=100M", - "du -sh /var/log/*" + "du -sh /var/log/*", ] - + elif strategy == "free_memory": diagnosis["fix_commands"] = [ "free -h", "sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches", - "top -b -n 1 | head -20" + "top -b -n 1 | head -20", ] - + elif strategy == "increase_ulimit": diagnosis["fix_commands"] = [ "ulimit -a", "# Add to /etc/security/limits.conf:", "# * soft nofile 65535", - "# * hard nofile 65535" + "# * hard nofile 65535", ] - + # Filesystem strategies elif strategy == "remount_rw": if path: mount_point = self._find_mount_point(path) if mount_point: diagnosis["fix_commands"] = [f"sudo mount -o remount,rw {mount_point}"] - + elif strategy == "create_mountpoint": if path: diagnosis["fix_commands"] = [f"sudo mkdir -p {path}"] - + elif strategy == "mount_fs": diagnosis["fix_commands"] = ["mount", "cat /etc/fstab"] - + # User strategies elif strategy == "create_user": # Extract username from error if possible match = re.search(r"user '?([a-zA-Z0-9_-]+)'?", stderr, re.IGNORECASE) if match: user = match.group(1) - diagnosis["fix_commands"] = [ - f"sudo useradd -m {user}", - f"sudo passwd {user}" - ] - + diagnosis["fix_commands"] = [f"sudo useradd -m {user}", f"sudo passwd {user}"] + elif strategy == "create_group": match = re.search(r"group '?([a-zA-Z0-9_-]+)'?", stderr, re.IGNORECASE) if match: group = match.group(1) diagnosis["fix_commands"] = [f"sudo groupadd {group}"] - + # Build strategies elif strategy == "install_lib": lib_match = re.search(r"library.*?([a-zA-Z0-9_-]+)", stderr, re.IGNORECASE) @@ -1438,41 +2634,38 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None lib = lib_match.group(1) diagnosis["fix_commands"] = [ f"apt-cache search {lib}", - f"# Install with: sudo apt-get install lib{lib}-dev" + f"# Install with: sudo apt-get install lib{lib}-dev", ] - + elif strategy == "install_dev": header_match = re.search(r"([a-zA-Z0-9_/]+\.h)", stderr) if header_match: header = header_match.group(1) diagnosis["fix_commands"] = [ f"apt-file search {header}", - "# Install the -dev package that provides this header" + "# Install the -dev package that provides this header", ] - + elif strategy == "fix_ldpath": diagnosis["fix_commands"] = [ "sudo ldconfig", "echo $LD_LIBRARY_PATH", - "cat /etc/ld.so.conf.d/*.conf" + "cat /etc/ld.so.conf.d/*.conf", ] - + # Wait/Retry strategies elif strategy == "wait_retry": - diagnosis["fix_commands"] = [ - "sleep 2", - f"# Then retry: {cmd}" - ] - + diagnosis["fix_commands"] = ["sleep 2", f"# Then retry: {cmd}"] + # Script strategies elif strategy == "fix_shebang": if path: diagnosis["fix_commands"] = [ f"head -1 {path}", "# Fix shebang line to point to correct interpreter", - "# e.g., #!/usr/bin/env python3" + "# e.g., #!/usr/bin/env python3", ] - + # Environment strategies elif strategy == "set_variable": var_match = re.search(r"([A-Z_]+).*not set", stderr, re.IGNORECASE) @@ -1480,55 +2673,55 @@ def _generate_fix_commands(self, diagnosis: dict, cmd: str, stderr: str) -> None var = var_match.group(1) diagnosis["fix_commands"] = [ f"export {var}=", - f"# Add to ~/.bashrc: export {var}=" + f"# Add to ~/.bashrc: export {var}=", ] - + elif strategy == "set_path": diagnosis["fix_commands"] = [ "echo $PATH", "export PATH=$PATH:/usr/local/bin", - "# Add to ~/.bashrc" + "# Add to ~/.bashrc", ] - + elif strategy == "set_ldpath": diagnosis["fix_commands"] = [ "echo $LD_LIBRARY_PATH", "export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH", - "sudo ldconfig" + "sudo ldconfig", ] - + # Backup/Overwrite strategy elif strategy == "backup_overwrite": if path: diagnosis["fix_commands"] = [ f"sudo mv {path} {path}.backup", - f"# Then retry: {cmd}" + f"# Then retry: {cmd}", ] - + # Symlink strategy elif strategy == "fix_symlink": if path: diagnosis["fix_commands"] = [ f"ls -la {path}", f"readlink -f {path}", - f"# Remove broken symlink: sudo rm {path}" + f"# Remove broken symlink: sudo rm {path}", ] - + # Directory not empty elif strategy == "rm_recursive": if path: diagnosis["fix_commands"] = [ f"ls -la {path}", - f"# Remove recursively (CAUTION): sudo rm -rf {path}" + f"# Remove recursively (CAUTION): sudo rm -rf {path}", ] - + # Copy instead of link elif strategy == "copy_instead": diagnosis["fix_commands"] = [ "# Use cp instead of ln/mv for cross-device operations", - f"# cp -a " + "# cp -a ", ] - + def _find_mount_point(self, path: str) -> str | None: """Find the mount point for a given path.""" try: @@ -1546,46 +2739,49 @@ def _find_mount_point(self, path: str) -> str | None: # Login Handler Class # ============================================================================ + class LoginHandler: """Handles interactive login/credential prompts for various services.""" - + CREDENTIALS_FILE = os.path.expanduser("~/.cortex/credentials.json") - + def __init__(self): self.cached_credentials: dict[str, dict] = {} self._ensure_credentials_dir() self._load_saved_credentials() - + def _ensure_credentials_dir(self) -> None: """Ensure the credentials directory exists with proper permissions.""" cred_dir = os.path.dirname(self.CREDENTIALS_FILE) if not os.path.exists(cred_dir): os.makedirs(cred_dir, mode=0o700, exist_ok=True) - + def _encode_credential(self, value: str) -> str: """Encode a credential value (basic obfuscation, not encryption).""" import base64 + return base64.b64encode(value.encode()).decode() - + def _decode_credential(self, encoded: str) -> str: """Decode a credential value.""" import base64 + try: return base64.b64decode(encoded.encode()).decode() except Exception: return "" - + def _load_saved_credentials(self) -> None: """Load saved credentials from file.""" import json - + if not os.path.exists(self.CREDENTIALS_FILE): return - + try: - with open(self.CREDENTIALS_FILE, 'r') as f: + with open(self.CREDENTIALS_FILE) as f: saved = json.load(f) - + # Decode all saved credentials for service, creds in saved.items(): decoded = {} @@ -1595,132 +2791,132 @@ def _load_saved_credentials(self) -> None: else: decoded[field] = self._decode_credential(value) self.cached_credentials[service] = decoded - - except (json.JSONDecodeError, IOError) as e: + + except (OSError, json.JSONDecodeError) as e: console.print(f"[dim]Note: Could not load saved credentials: {e}[/dim]") - + def _save_credentials(self, service: str, credentials: dict[str, str]) -> None: """Save credentials to file.""" import json from datetime import datetime - + # Load existing credentials all_creds = {} if os.path.exists(self.CREDENTIALS_FILE): try: - with open(self.CREDENTIALS_FILE, 'r') as f: + with open(self.CREDENTIALS_FILE) as f: all_creds = json.load(f) - except (json.JSONDecodeError, IOError): + except (OSError, json.JSONDecodeError): pass - + # Encode new credentials encoded = {} - for field, value in credentials.items(): + for field_name, value in credentials.items(): if value: # Only save non-empty values - encoded[field] = self._encode_credential(value) - + encoded[field_name] = self._encode_credential(value) + # Add metadata encoded["_saved_at"] = datetime.now().isoformat() - + all_creds[service] = encoded - + # Save to file with restricted permissions try: - with open(self.CREDENTIALS_FILE, 'w') as f: + with open(self.CREDENTIALS_FILE, "w") as f: json.dump(all_creds, f, indent=2) os.chmod(self.CREDENTIALS_FILE, 0o600) # Read/write only for owner console.print(f"[green]✓ Credentials saved to {self.CREDENTIALS_FILE}[/green]") - except IOError as e: + except OSError as e: console.print(f"[yellow]Warning: Could not save credentials: {e}[/yellow]") - + def _delete_saved_credentials(self, service: str) -> None: """Delete saved credentials for a service.""" import json - + if not os.path.exists(self.CREDENTIALS_FILE): return - + try: - with open(self.CREDENTIALS_FILE, 'r') as f: + with open(self.CREDENTIALS_FILE) as f: all_creds = json.load(f) - + if service in all_creds: del all_creds[service] - - with open(self.CREDENTIALS_FILE, 'w') as f: + + with open(self.CREDENTIALS_FILE, "w") as f: json.dump(all_creds, f, indent=2) - + console.print(f"[dim]Removed saved credentials for {service}[/dim]") - except (json.JSONDecodeError, IOError): + except (OSError, json.JSONDecodeError): pass - + def _has_saved_credentials(self, service: str) -> bool: """Check if we have saved credentials for a service.""" return service in self.cached_credentials and bool(self.cached_credentials[service]) - + def _ask_use_saved(self, service: str, requirement: LoginRequirement) -> bool: """Ask user if they want to use saved credentials.""" saved = self.cached_credentials.get(service, {}) - + # Show what we have saved (without showing secrets) saved_fields = [] - for field in requirement.required_fields: - if field in saved and saved[field]: - if requirement.field_secret.get(field, False): - saved_fields.append(f"{field}=****") + for field_name in requirement.required_fields: + if field_name in saved and saved[field_name]: + if requirement.field_secret.get(field_name, False): + saved_fields.append(f"{field_name}=****") else: - value = saved[field] + value = saved[field_name] # Truncate long values if len(value) > 20: value = value[:17] + "..." - saved_fields.append(f"{field}={value}") - + saved_fields.append(f"{field_name}={value}") + if not saved_fields: return False - + console.print() console.print(f"[cyan]📁 Found saved credentials for {requirement.display_name}:[/cyan]") console.print(f"[dim] {', '.join(saved_fields)}[/dim]") - + if "_saved_at" in saved: console.print(f"[dim] Saved: {saved['_saved_at'][:19]}[/dim]") - + console.print() try: response = input("Use saved credentials? (y/n/delete): ").strip().lower() except (EOFError, KeyboardInterrupt): return False - + if response in ["d", "delete", "del", "remove"]: self._delete_saved_credentials(service) if service in self.cached_credentials: del self.cached_credentials[service] return False - + return response in ["y", "yes", ""] - + def _ask_save_credentials(self, service: str, credentials: dict[str, str]) -> None: """Ask user if they want to save credentials for next time.""" console.print() - console.print(f"[cyan]💾 Save these credentials for next time?[/cyan]") + console.print("[cyan]💾 Save these credentials for next time?[/cyan]") console.print(f"[dim] Credentials will be stored in {self.CREDENTIALS_FILE}[/dim]") - console.print(f"[dim] (encoded, readable only by you)[/dim]") - + console.print("[dim] (encoded, readable only by you)[/dim]") + try: response = input("Save credentials? (y/n): ").strip().lower() except (EOFError, KeyboardInterrupt): return - + if response in ["y", "yes"]: self._save_credentials(service, credentials) # Also update cache self.cached_credentials[service] = credentials.copy() - + def detect_login_requirement(self, cmd: str, stderr: str) -> LoginRequirement | None: """Detect which service needs login based on command and error.""" cmd_lower = cmd.lower() stderr_lower = stderr.lower() - + # Check for specific registries in docker commands if "docker" in cmd_lower: if "ghcr.io" in cmd_lower or "ghcr.io" in stderr_lower: @@ -1728,58 +2924,62 @@ def detect_login_requirement(self, cmd: str, stderr: str) -> LoginRequirement | if "gcr.io" in cmd_lower or "gcr.io" in stderr_lower: return LOGIN_REQUIREMENTS.get("gcloud") return LOGIN_REQUIREMENTS.get("docker") - + # Check other services for service, req in LOGIN_REQUIREMENTS.items(): if re.search(req.command_pattern, cmd, re.IGNORECASE): return req - + return None - + def check_env_credentials(self, requirement: LoginRequirement) -> dict[str, str]: """Check if credentials are available in environment variables.""" found = {} - for field, env_var in requirement.env_vars.items(): + for field_name, env_var in requirement.env_vars.items(): value = os.environ.get(env_var) if value: - found[field] = value + found[field_name] = value return found - + def prompt_for_credentials( - self, - requirement: LoginRequirement, - pre_filled: dict[str, str] | None = None + self, requirement: LoginRequirement, pre_filled: dict[str, str] | None = None ) -> dict[str, str] | None: """Prompt user for required credentials.""" import getpass - + console.print() - console.print(f"[bold cyan]🔐 {requirement.display_name} Authentication Required[/bold cyan]") + console.print( + f"[bold cyan]🔐 {requirement.display_name} Authentication Required[/bold cyan]" + ) console.print() - + if requirement.signup_url: console.print(f"[dim]Don't have an account? Sign up at: {requirement.signup_url}[/dim]") if requirement.docs_url: console.print(f"[dim]Documentation: {requirement.docs_url}[/dim]") console.print() - + # Check for existing env vars env_creds = self.check_env_credentials(requirement) if env_creds: - console.print(f"[green]Found credentials in environment: {', '.join(env_creds.keys())}[/green]") - + console.print( + f"[green]Found credentials in environment: {', '.join(env_creds.keys())}[/green]" + ) + credentials = pre_filled.copy() if pre_filled else {} credentials.update(env_creds) - + try: for field in requirement.required_fields: if field in credentials and credentials[field]: - console.print(f"[dim]{requirement.field_prompts[field]}: (using existing)[/dim]") + console.print( + f"[dim]{requirement.field_prompts[field]}: (using existing)[/dim]" + ) continue - + prompt_text = requirement.field_prompts.get(field, f"Enter {field}") is_secret = requirement.field_secret.get(field, False) - + # Handle special defaults default_value = "" if field == "registry": @@ -1788,12 +2988,12 @@ def prompt_for_credentials( default_value = "us-east-1" elif field == "kubeconfig": default_value = os.path.expanduser("~/.kube/config") - + if default_value: prompt_text = f"{prompt_text} [{default_value}]" - + console.print(f"[bold]{prompt_text}:[/bold] ", end="") - + if is_secret: value = getpass.getpass("") else: @@ -1802,62 +3002,62 @@ def prompt_for_credentials( except (EOFError, KeyboardInterrupt): console.print("\n[yellow]Authentication cancelled.[/yellow]") return None - + # Use default if empty if not value and default_value: value = default_value console.print(f"[dim]Using default: {default_value}[/dim]") - + if not value and field != "registry": # registry can be empty for Docker Hub console.print(f"[red]Error: {field} is required.[/red]") return None - + credentials[field] = value - + return credentials - + except (EOFError, KeyboardInterrupt): console.print("\n[yellow]Authentication cancelled.[/yellow]") return None - + def execute_login( - self, - requirement: LoginRequirement, - credentials: dict[str, str] + self, requirement: LoginRequirement, credentials: dict[str, str] ) -> tuple[bool, str, str]: """Execute the login command with provided credentials.""" - + # Build the login command if not requirement.login_command_template: return False, "", "No login command template defined" - + # Handle special cases if requirement.service == "docker" and credentials.get("registry") in ["", "docker.io"]: credentials["registry"] = "" # Docker Hub doesn't need registry in command - + # Format the command try: login_cmd = requirement.login_command_template.format(**credentials) except KeyError as e: return False, "", f"Missing credential: {e}" - + # For Docker, use stdin for password to avoid it showing in ps if requirement.service in ["docker", "ghcr"]: password = credentials.get("password") or credentials.get("token", "") username = credentials.get("username", "") registry = credentials.get("registry", "") - + if requirement.service == "ghcr": registry = "ghcr.io" - + # Build safe command if registry: cmd_parts = ["docker", "login", registry, "-u", username, "--password-stdin"] else: cmd_parts = ["docker", "login", "-u", username, "--password-stdin"] - - console.print(f"[dim]Executing: docker login {registry or 'docker.io'} -u {username}[/dim]") - + + console.print( + f"[dim]Executing: docker login {registry or 'docker.io'} -u {username}[/dim]" + ) + try: process = subprocess.Popen( cmd_parts, @@ -1873,9 +3073,9 @@ def execute_login( return False, "", "Login timed out" except Exception as e: return False, "", str(e) - + # For other services, execute directly - console.print(f"[dim]Executing login...[/dim]") + console.print("[dim]Executing login...[/dim]") try: result = subprocess.run( login_cmd, @@ -1889,22 +3089,22 @@ def execute_login( return False, "", "Login timed out" except Exception as e: return False, "", str(e) - + def handle_login(self, cmd: str, stderr: str) -> tuple[bool, str]: """ Main entry point: detect login requirement, prompt, and execute. - + Returns: (success, message) """ requirement = self.detect_login_requirement(cmd, stderr) - + if not requirement: return False, "Could not determine which service needs authentication" - + used_saved = False credentials = None - + # Check for saved credentials first if self._has_saved_credentials(requirement.service): if self._ask_use_saved(requirement.service, requirement): @@ -1913,19 +3113,23 @@ def handle_login(self, cmd: str, stderr: str) -> tuple[bool, str]: # Remove metadata fields credentials = {k: v for k, v in credentials.items() if not k.startswith("_")} used_saved = True - - console.print(f"[cyan]Using saved credentials...[/cyan]") + + console.print("[cyan]Using saved credentials...[/cyan]") success, stdout, login_stderr = self.execute_login(requirement, credentials) - + if success: - console.print(f"[green]✓ Successfully logged in to {requirement.display_name} using saved credentials[/green]") + console.print( + f"[green]✓ Successfully logged in to {requirement.display_name} using saved credentials[/green]" + ) return True, f"Logged in to {requirement.display_name} using saved credentials" else: - console.print(f"[yellow]Saved credentials didn't work: {login_stderr[:100] if login_stderr else 'Unknown error'}[/yellow]") - console.print(f"[dim]Let's enter new credentials...[/dim]") + console.print( + f"[yellow]Saved credentials didn't work: {login_stderr[:100] if login_stderr else 'Unknown error'}[/yellow]" + ) + console.print("[dim]Let's enter new credentials...[/dim]") credentials = None used_saved = False - + # Prompt for new credentials if we don't have valid ones if not credentials: # Pre-fill with any partial saved credentials (like username) @@ -1933,61 +3137,68 @@ def handle_login(self, cmd: str, stderr: str) -> tuple[bool, str]: if requirement.service in self.cached_credentials: saved = self.cached_credentials[requirement.service] for field in requirement.required_fields: - if field in saved and saved[field] and not requirement.field_secret.get(field, False): + if ( + field in saved + and saved[field] + and not requirement.field_secret.get(field, False) + ): pre_filled[field] = saved[field] - - credentials = self.prompt_for_credentials(requirement, pre_filled if pre_filled else None) - + + credentials = self.prompt_for_credentials( + requirement, pre_filled if pre_filled else None + ) + if not credentials: return False, "Authentication cancelled by user" - + # Execute login success, stdout, login_stderr = self.execute_login(requirement, credentials) - + if success: console.print(f"[green]✓ Successfully logged in to {requirement.display_name}[/green]") - + # Ask to save credentials if they weren't from saved file if not used_saved: self._ask_save_credentials(requirement.service, credentials) - + # Update session cache self.cached_credentials[requirement.service] = credentials.copy() - + return True, f"Successfully authenticated with {requirement.display_name}" else: error_msg = login_stderr or "Login failed" console.print(f"[red]✗ Login failed: {error_msg}[/red]") - + # Offer to retry console.print() try: retry = input("Would you like to try again? (y/n): ").strip().lower() except (EOFError, KeyboardInterrupt): retry = "n" - + if retry in ["y", "yes"]: # Clear cached credentials for this service since they failed if requirement.service in self.cached_credentials: del self.cached_credentials[requirement.service] return self.handle_login(cmd, stderr) # Recursive retry - + return False, f"Login failed: {error_msg}" # Auto-Fixer Class # ============================================================================ + class AutoFixer: """Auto-fixes errors based on diagnosis.""" - + def __init__(self, llm_callback: Callable[[str, dict], dict] | None = None): self.diagnoser = ErrorDiagnoser() self.llm_callback = llm_callback # Track all attempted fixes across multiple calls to avoid repeating self._attempted_fixes: dict[str, set[str]] = {} # cmd -> set of fix commands tried self._attempted_strategies: dict[str, set[str]] = {} # cmd -> set of strategies tried - + def _get_fix_key(self, cmd: str) -> str: """Generate a key for tracking fixes for a command.""" # Normalize the command (strip sudo, whitespace) @@ -1995,44 +3206,44 @@ def _get_fix_key(self, cmd: str) -> str: if normalized.startswith("sudo "): normalized = normalized[5:].strip() return normalized - + def _is_fix_attempted(self, original_cmd: str, fix_cmd: str) -> bool: """Check if a fix command has already been attempted for this command.""" key = self._get_fix_key(original_cmd) fix_normalized = fix_cmd.strip() - + if key not in self._attempted_fixes: return False - + return fix_normalized in self._attempted_fixes[key] - + def _mark_fix_attempted(self, original_cmd: str, fix_cmd: str) -> None: """Mark a fix command as attempted.""" key = self._get_fix_key(original_cmd) - + if key not in self._attempted_fixes: self._attempted_fixes[key] = set() - + self._attempted_fixes[key].add(fix_cmd.strip()) - + def _is_strategy_attempted(self, original_cmd: str, strategy: str, error_type: str) -> bool: """Check if a strategy has been attempted for this command/error combination.""" key = f"{self._get_fix_key(original_cmd)}:{error_type}" - + if key not in self._attempted_strategies: return False - + return strategy in self._attempted_strategies[key] - + def _mark_strategy_attempted(self, original_cmd: str, strategy: str, error_type: str) -> None: """Mark a strategy as attempted for this command/error combination.""" key = f"{self._get_fix_key(original_cmd)}:{error_type}" - + if key not in self._attempted_strategies: self._attempted_strategies[key] = set() - + self._attempted_strategies[key].add(strategy) - + def reset_attempts(self, cmd: str | None = None) -> None: """Reset attempted fixes tracking. If cmd is None, reset all.""" if cmd is None: @@ -2046,21 +3257,21 @@ def reset_attempts(self, cmd: str | None = None) -> None: to_delete = [k for k in self._attempted_strategies if k.startswith(key)] for k in to_delete: del self._attempted_strategies[k] - + def _get_llm_fix(self, cmd: str, stderr: str, diagnosis: dict) -> dict | None: """Use LLM to diagnose error and suggest fix commands. - + This is called when pattern matching fails to identify the error. """ if not self.llm_callback: return None - + context = { "error_command": cmd, "error_output": stderr[:1000], # Truncate for LLM context "current_diagnosis": diagnosis, } - + # Create a targeted prompt for error diagnosis prompt = f"""Analyze this Linux command error and provide fix commands. @@ -2081,66 +3292,69 @@ def _get_llm_fix(self, cmd: str, stderr: str, diagnosis: dict) -> dict | None: Example response: {{"fix_commands": ["docker rm -f ollama", "docker run ..."], "reasoning": "Container 'ollama' already exists, removing it first"}}""" - + try: response = self.llm_callback(prompt, context) - + if response and response.get("response_type") != "error": # Check if the response contains fix commands directly if response.get("fix_commands"): return { "fix_commands": response["fix_commands"], - "reasoning": response.get("reasoning", "AI-suggested fix") + "reasoning": response.get("reasoning", "AI-suggested fix"), } - + # Check if it's a do_commands response if response.get("do_commands"): return { "fix_commands": [cmd["command"] for cmd in response["do_commands"]], - "reasoning": response.get("reasoning", "AI-suggested fix") + "reasoning": response.get("reasoning", "AI-suggested fix"), } - + # Try to parse answer as fix suggestion if response.get("answer"): # Extract commands from natural language response answer = response["answer"] commands = [] - for line in answer.split('\n'): + for line in answer.split("\n"): line = line.strip() - if line.startswith('$') or line.startswith('sudo ') or line.startswith('docker '): - commands.append(line.lstrip('$ ')) + if ( + line.startswith("$") + or line.startswith("sudo ") + or line.startswith("docker ") + ): + commands.append(line.lstrip("$ ")) if commands: - return { - "fix_commands": commands, - "reasoning": "Extracted from AI response" - } - + return {"fix_commands": commands, "reasoning": "Extracted from AI response"} + return None - + except Exception as e: console.print(f"[dim] LLM fix generation failed: {e}[/dim]") return None - - def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + + def _execute_command( + self, cmd: str, needs_sudo: bool = False, timeout: int = 120 + ) -> tuple[bool, str, str]: """Execute a single command.""" import sys - + try: if needs_sudo and not cmd.strip().startswith("sudo"): cmd = f"sudo {cmd}" - + # Handle comments if cmd.strip().startswith("#"): return True, "", "" - + # For sudo commands, we need to handle the password prompt specially is_sudo = cmd.strip().startswith("sudo") - + if is_sudo: # Flush output before sudo to ensure clean state sys.stdout.flush() sys.stderr.flush() - + result = subprocess.run( cmd, shell=True, @@ -2148,19 +3362,19 @@ def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 12 text=True, timeout=timeout, ) - + if is_sudo: # After sudo, ensure console is in clean state # Print empty line to reset cursor position after potential password prompt - sys.stdout.write('\n') + sys.stdout.write("\n") sys.stdout.flush() - + return result.returncode == 0, result.stdout.strip(), result.stderr.strip() except subprocess.TimeoutExpired: return False, "", f"Command timed out after {timeout} seconds" except Exception as e: return False, "", str(e) - + def auto_fix_error( self, cmd: str, @@ -2170,9 +3384,9 @@ def auto_fix_error( ) -> tuple[bool, str, list[str]]: """ General-purpose auto-fix system with retry logic. - + Tracks attempted fixes to avoid repeating the same fixes. - + Returns: Tuple of (fixed, message, commands_executed) """ @@ -2182,31 +3396,33 @@ def auto_fix_error( attempt = 0 skipped_attempts = 0 max_skips = 3 # Max attempts to skip before giving up - + while attempt < max_attempts and skipped_attempts < max_skips: attempt += 1 error_type = current_diagnosis.get("error_type", "unknown") strategy = current_diagnosis.get("fix_strategy", "") category = current_diagnosis.get("category", "unknown") - + # Check if this strategy was already attempted for this error if self._is_strategy_attempted(cmd, strategy, error_type): - console.print(f"[dim] Skipping already-tried strategy: {strategy} for {error_type}[/dim]") + console.print( + f"[dim] Skipping already-tried strategy: {strategy} for {error_type}[/dim]" + ) skipped_attempts += 1 - + # Try to get a different diagnosis by re-analyzing if current_stderr: # Force a different approach by marking current strategy as exhausted current_diagnosis["fix_strategy"] = "" current_diagnosis["can_auto_fix"] = False continue - + # Mark this strategy as attempted self._mark_strategy_attempted(cmd, strategy, error_type) - + # Check fix commands that would be generated fix_commands = current_diagnosis.get("fix_commands", []) - + # Filter out already-attempted fix commands new_fix_commands = [] for fix_cmd in fix_commands: @@ -2216,144 +3432,171 @@ def auto_fix_error( console.print(f"[dim] Skipping already-executed: {fix_cmd[:50]}...[/dim]") else: new_fix_commands.append(fix_cmd) - + # If all fix commands were already tried, skip this attempt if fix_commands and not new_fix_commands: console.print(f"[dim] All fix commands already tried for {error_type}[/dim]") skipped_attempts += 1 continue - + # Update diagnosis with filtered commands current_diagnosis["fix_commands"] = new_fix_commands - + # Reset skip counter since we found something new to try skipped_attempts = 0 - + severity = current_diagnosis.get("severity", "error") - + # Visual grouping for auto-fix attempts from rich.panel import Panel from rich.text import Text - + fix_title = Text() fix_title.append("🔧 AUTO-FIX ", style="bold yellow") fix_title.append(f"Attempt {attempt}/{max_attempts}", style="dim") - + severity_color = "red" if severity == "critical" else "yellow" fix_content = Text() if severity == "critical": fix_content.append("⚠️ CRITICAL: ", style="bold red") fix_content.append(f"[{category}] ", style="dim") fix_content.append(error_type, style=f"bold {severity_color}") - + console.print() - console.print(Panel( - fix_content, - title=fix_title, - title_align="left", - border_style=severity_color, - padding=(0, 1), - )) - + console.print( + Panel( + fix_content, + title=fix_title, + title_align="left", + border_style=severity_color, + padding=(0, 1), + ) + ) + # Ensure output is flushed before executing fixes import sys + sys.stdout.flush() - + fixed, message, commands = self.apply_single_fix(cmd, current_stderr, current_diagnosis) - + # Mark all executed commands as attempted for exec_cmd in commands: self._mark_fix_attempted(cmd, exec_cmd) all_commands_executed.extend(commands) - + if fixed: # Check if it's just a "use sudo" suggestion if message == "Will retry with sudo": sudo_cmd = f"sudo {cmd}" if not cmd.startswith("sudo") else cmd - + # Check if we already tried sudo if self._is_fix_attempted(cmd, sudo_cmd): - console.print(f"[dim] Already tried sudo, skipping...[/dim]") + console.print("[dim] Already tried sudo, skipping...[/dim]") skipped_attempts += 1 continue - + self._mark_fix_attempted(cmd, sudo_cmd) success, stdout, new_stderr = self._execute_command(sudo_cmd) all_commands_executed.append(sudo_cmd) - + if success: - console.print(Panel( - "[bold green]✓ Fixed with sudo[/bold green]", - border_style="green", - padding=(0, 1), - expand=False, - )) - return True, f"Fixed with sudo after {attempt} attempt(s)", all_commands_executed + console.print( + Panel( + "[bold green]✓ Fixed with sudo[/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) + return ( + True, + f"Fixed with sudo after {attempt} attempt(s)", + all_commands_executed, + ) else: current_stderr = new_stderr current_diagnosis = self.diagnoser.diagnose_error(cmd, new_stderr) continue - + # Verify the original command now works - console.print(Panel( - f"[bold cyan]✓ Fix applied:[/bold cyan] {message}\n[dim]Verifying original command...[/dim]", - border_style="cyan", - padding=(0, 1), - expand=False, - )) - + console.print( + Panel( + f"[bold cyan]✓ Fix applied:[/bold cyan] {message}\n[dim]Verifying original command...[/dim]", + border_style="cyan", + padding=(0, 1), + expand=False, + ) + ) + verify_cmd = f"sudo {cmd}" if not cmd.startswith("sudo") else cmd success, stdout, new_stderr = self._execute_command(verify_cmd) all_commands_executed.append(verify_cmd) - + if success: - console.print(Panel( - "[bold green]✓ Verified![/bold green] Command now succeeds", - border_style="green", - padding=(0, 1), - expand=False, - )) - return True, f"Fixed after {attempt} attempt(s): {message}", all_commands_executed + console.print( + Panel( + "[bold green]✓ Verified![/bold green] Command now succeeds", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) + return ( + True, + f"Fixed after {attempt} attempt(s): {message}", + all_commands_executed, + ) else: new_diagnosis = self.diagnoser.diagnose_error(cmd, new_stderr) - + if new_diagnosis["error_type"] == error_type: - console.print(f" [dim yellow]Same error persists, trying different approach...[/dim yellow]") + console.print( + " [dim yellow]Same error persists, trying different approach...[/dim yellow]" + ) else: - console.print(f" [yellow]New error: {new_diagnosis['error_type']}[/yellow]") - + console.print( + f" [yellow]New error: {new_diagnosis['error_type']}[/yellow]" + ) + current_stderr = new_stderr current_diagnosis = new_diagnosis else: console.print(f" [dim red]Fix attempt failed: {message}[/dim red]") - console.print(f" [dim]Trying fallback...[/dim]") - + console.print(" [dim]Trying fallback...[/dim]") + # Try with sudo as fallback sudo_fallback = f"sudo {cmd}" - if not cmd.strip().startswith("sudo") and not self._is_fix_attempted(cmd, sudo_fallback): + if not cmd.strip().startswith("sudo") and not self._is_fix_attempted( + cmd, sudo_fallback + ): self._mark_fix_attempted(cmd, sudo_fallback) success, _, new_stderr = self._execute_command(sudo_fallback) all_commands_executed.append(sudo_fallback) - + if success: - return True, f"Fixed with sudo fallback", all_commands_executed - + return True, "Fixed with sudo fallback", all_commands_executed + current_stderr = new_stderr current_diagnosis = self.diagnoser.diagnose_error(cmd, new_stderr) else: if cmd.strip().startswith("sudo"): - console.print(f"[dim] Already running with sudo, no more fallbacks[/dim]") + console.print("[dim] Already running with sudo, no more fallbacks[/dim]") else: - console.print(f"[dim] Sudo fallback already tried[/dim]") + console.print("[dim] Sudo fallback already tried[/dim]") break - + # Final summary of what was attempted unique_attempts = len(self._attempted_fixes.get(self._get_fix_key(cmd), set())) if unique_attempts > 0: console.print(f"[dim] Total unique fixes attempted: {unique_attempts}[/dim]") - - return False, f"Could not fix after {attempt} attempts ({skipped_attempts} skipped as duplicates)", all_commands_executed - + + return ( + False, + f"Could not fix after {attempt} attempts ({skipped_attempts} skipped as duplicates)", + all_commands_executed, + ) + def apply_single_fix( self, cmd: str, @@ -2367,56 +3610,62 @@ def apply_single_fix( fix_commands = diagnosis.get("fix_commands", []) extracted = diagnosis.get("extracted_info", {}) path = diagnosis.get("extracted_path") - + commands_executed = [] - + # Strategy-based fixes - + # === Use Sudo === - if strategy == "use_sudo" or error_type in ["permission_denied", "operation_not_permitted", "access_denied"]: + if strategy == "use_sudo" or error_type in [ + "permission_denied", + "operation_not_permitted", + "access_denied", + ]: if not cmd.strip().startswith("sudo"): console.print("[dim] Adding sudo...[/dim]") return True, "Will retry with sudo", [] - + # === Create Path === if strategy == "create_path" or error_type == "not_found": missing_path = path or extracted.get("missing_path") - + if missing_path: parent_dir = os.path.dirname(missing_path) - + if parent_dir and not os.path.exists(parent_dir): console.print(f"[dim] Creating directory: {parent_dir}[/dim]") mkdir_cmd = f"sudo mkdir -p {parent_dir}" success, _, mkdir_err = self._execute_command(mkdir_cmd) commands_executed.append(mkdir_cmd) - + if success: return True, f"Created directory {parent_dir}", commands_executed else: return False, f"Failed to create directory: {mkdir_err}", commands_executed - + # === Install Package === if strategy == "install_package" or error_type == "command_not_found": - missing_cmd = extracted.get("missing_command") or self.diagnoser.extract_command_from_error(stderr) + missing_cmd = extracted.get( + "missing_command" + ) or self.diagnoser.extract_command_from_error(stderr) if not missing_cmd: missing_cmd = cmd.split()[0] if cmd.split() else "" - + suggested_pkg = UBUNTU_PACKAGE_MAP.get(missing_cmd, missing_cmd) - + if missing_cmd: console.print(f"[dim] Installing package: {suggested_pkg}[/dim]") - + # Update repos first update_cmd = "sudo apt-get update" self._execute_command(update_cmd) commands_executed.append(update_cmd) - + # Install package install_cmd = f"sudo apt-get install -y {suggested_pkg}" success, _, install_err = self._execute_command(install_cmd) commands_executed.append(install_cmd) - + if success: return True, f"Installed {suggested_pkg}", commands_executed else: @@ -2427,51 +3676,58 @@ def apply_single_fix( commands_executed.append(install_cmd2) if success: return True, f"Installed {missing_cmd}", commands_executed - + return False, f"Failed to install: {install_err[:100]}", commands_executed - + # === Clear Package Lock === - if strategy == "clear_lock" or error_type in ["dpkg_lock", "apt_lock", "could_not_get_lock"]: + if strategy == "clear_lock" or error_type in [ + "dpkg_lock", + "apt_lock", + "could_not_get_lock", + ]: console.print("[dim] Clearing package locks...[/dim]") - + lock_cmds = [ "sudo rm -f /var/lib/dpkg/lock-frontend", "sudo rm -f /var/lib/dpkg/lock", "sudo rm -f /var/cache/apt/archives/lock", "sudo dpkg --configure -a", ] - + for lock_cmd in lock_cmds: self._execute_command(lock_cmd) commands_executed.append(lock_cmd) - + return True, "Cleared package locks", commands_executed - + # === Fix Dependencies === if strategy in ["fix_dependencies", "fix_broken"]: console.print("[dim] Fixing package dependencies...[/dim]") - + fix_cmds = [ "sudo apt-get install -f -y", "sudo dpkg --configure -a", ] - + for fix_cmd in fix_cmds: success, _, _ = self._execute_command(fix_cmd) commands_executed.append(fix_cmd) - + return True, "Attempted dependency fix", commands_executed - + # === Start Service === - if strategy in ["start_service", "check_service"] or error_type in ["service_inactive", "service_not_running"]: + if strategy in ["start_service", "check_service"] or error_type in [ + "service_inactive", + "service_not_running", + ]: service = extracted.get("service") - + if service: console.print(f"[dim] Starting service: {service}[/dim]") start_cmd = f"sudo systemctl start {service}" success, _, start_err = self._execute_command(start_cmd) commands_executed.append(start_cmd) - + if success: return True, f"Started service {service}", commands_executed else: @@ -2482,71 +3738,85 @@ def apply_single_fix( if success: return True, f"Enabled and started {service}", commands_executed return False, f"Failed to start {service}: {start_err[:100]}", commands_executed - + # === Unmask Service === if strategy == "unmask_service" or error_type == "service_masked": service = extracted.get("service") - + if service: console.print(f"[dim] Unmasking service: {service}[/dim]") unmask_cmd = f"sudo systemctl unmask {service}" success, _, _ = self._execute_command(unmask_cmd) commands_executed.append(unmask_cmd) - + if success: start_cmd = f"sudo systemctl start {service}" self._execute_command(start_cmd) commands_executed.append(start_cmd) return True, f"Unmasked and started {service}", commands_executed - + # === Free Disk Space === if strategy == "free_disk" or error_type == "no_space": console.print("[dim] Cleaning up disk space...[/dim]") - + cleanup_cmds = [ "sudo apt-get clean", "sudo apt-get autoremove -y", "sudo journalctl --vacuum-size=100M", ] - + for cleanup_cmd in cleanup_cmds: self._execute_command(cleanup_cmd) commands_executed.append(cleanup_cmd) - + return True, "Freed disk space", commands_executed - + # === Free Memory === - if strategy == "free_memory" or error_type in ["oom", "cannot_allocate", "memory_exhausted"]: + if strategy == "free_memory" or error_type in [ + "oom", + "cannot_allocate", + "memory_exhausted", + ]: console.print("[dim] Freeing memory...[/dim]") - + mem_cmds = [ "sudo sync", "echo 3 | sudo tee /proc/sys/vm/drop_caches", ] - + for mem_cmd in mem_cmds: self._execute_command(mem_cmd) commands_executed.append(mem_cmd) - + return True, "Freed memory caches", commands_executed - + # === Fix Config Syntax (all config error types) === config_error_types = [ - "config_syntax_error", "nginx_config_error", "nginx_syntax_error", - "nginx_unexpected", "nginx_unknown_directive", "nginx_test_failed", - "apache_syntax_error", "apache_config_error", "config_line_error", - "mysql_config_error", "postgres_config_error", "generic_config_syntax", - "invalid_config", "config_parse_error", "syntax_error" + "config_syntax_error", + "nginx_config_error", + "nginx_syntax_error", + "nginx_unexpected", + "nginx_unknown_directive", + "nginx_test_failed", + "apache_syntax_error", + "apache_config_error", + "config_line_error", + "mysql_config_error", + "postgres_config_error", + "generic_config_syntax", + "invalid_config", + "config_parse_error", + "syntax_error", ] - + if error_type in config_error_types or category == "config": config_file = extracted.get("config_file") line_num = extracted.get("line_num") - + # Try to extract config file/line from error if not already done if not config_file: config_file, line_num = self.diagnoser.extract_config_file_and_line(stderr) - + if config_file and line_num: console.print(f"[dim] Config error at {config_file}:{line_num}[/dim]") fixed, msg = self.fix_config_syntax(config_file, line_num, stderr, cmd) @@ -2559,7 +3829,7 @@ def apply_single_fix( if v_success: return True, f"{msg} - nginx config now valid", commands_executed else: - console.print(f"[yellow] Config still has errors[/yellow]") + console.print("[yellow] Config still has errors[/yellow]") # Re-diagnose for next iteration return False, f"{msg} but still has errors", commands_executed return True, msg, commands_executed @@ -2580,17 +3850,21 @@ def apply_single_fix( if fixed: return True, msg, commands_executed return False, "Could not identify config file/line to fix", commands_executed - + # === Network Fixes === if category == "network": - if strategy == "check_dns" or error_type in ["dns_temp_fail", "dns_unknown", "dns_failed"]: + if strategy == "check_dns" or error_type in [ + "dns_temp_fail", + "dns_unknown", + "dns_failed", + ]: console.print("[dim] Restarting DNS resolver...[/dim]") dns_cmd = "sudo systemctl restart systemd-resolved" success, _, _ = self._execute_command(dns_cmd) commands_executed.append(dns_cmd) if success: return True, "Restarted DNS resolver", commands_executed - + if strategy == "find_port_user" or error_type == "address_in_use": port = extracted.get("port") if port: @@ -2600,8 +3874,12 @@ def apply_single_fix( commands_executed.append(lsof_cmd) if stdout: console.print(f"[dim] Process using port: {stdout[:100]}[/dim]") - return False, f"Port {port} is in use - kill the process first", commands_executed - + return ( + False, + f"Port {port} is in use - kill the process first", + commands_executed, + ) + # === Remount Read-Write === if strategy == "remount_rw" or error_type == "readonly_fs": if path: @@ -2614,13 +3892,13 @@ def apply_single_fix( mount_point = check_path break check_path = os.path.dirname(check_path) - + remount_cmd = f"sudo mount -o remount,rw {mount_point}" success, _, remount_err = self._execute_command(remount_cmd) commands_executed.append(remount_cmd) if success: return True, f"Remounted {mount_point} read-write", commands_executed - + # === Fix Symlink Loop === if strategy == "fix_symlink" or error_type == "symlink_loop": if path: @@ -2632,19 +3910,24 @@ def apply_single_fix( commands_executed.append(rm_cmd) if success: return True, f"Removed broken symlink {path}", commands_executed - + # === Wait and Retry === - if strategy == "wait_retry" or error_type in ["resource_unavailable", "text_file_busy", "device_busy"]: + if strategy == "wait_retry" or error_type in [ + "resource_unavailable", + "text_file_busy", + "device_busy", + ]: import time + console.print("[dim] Waiting for resource...[/dim]") time.sleep(2) return True, "Waited 2 seconds", commands_executed - + # === Use xargs for long argument lists === if strategy == "use_xargs" or error_type == "arg_list_too_long": console.print("[dim] Argument list too long - need to use xargs or loop[/dim]") return False, "Use xargs or a loop to process files in batches", commands_executed - + # === Execute provided fix commands === if fix_commands: console.print("[dim] Executing fix commands...[/dim]") @@ -2655,10 +3938,10 @@ def apply_single_fix( commands_executed.append(fix_cmd) if not success and err: console.print(f"[dim] Warning: {fix_cmd} failed: {err[:50]}[/dim]") - + if commands_executed: return True, f"Executed {len(commands_executed)} fix commands", commands_executed - + # === Try LLM-based fix if available === if self.llm_callback and error_type == "unknown": console.print("[dim] Using AI to diagnose error...[/dim]") @@ -2666,35 +3949,37 @@ def apply_single_fix( if llm_fix: fix_commands = llm_fix.get("fix_commands", []) reasoning = llm_fix.get("reasoning", "AI-suggested fix") - + if fix_commands: console.print(f"[cyan] 🤖 AI diagnosis: {reasoning}[/cyan]") for fix_cmd in fix_commands: if self._is_fix_attempted(cmd, fix_cmd): console.print(f"[dim] Skipping (already tried): {fix_cmd}[/dim]") continue - + console.print(f"[dim] Executing: {fix_cmd}[/dim]") self._mark_fix_attempted(cmd, fix_cmd) - + needs_sudo = fix_cmd.strip().startswith("sudo") or "docker" in fix_cmd - success, stdout, stderr = self._execute_command(fix_cmd, needs_sudo=needs_sudo) + success, stdout, stderr = self._execute_command( + fix_cmd, needs_sudo=needs_sudo + ) commands_executed.append(fix_cmd) - + if success: console.print(f"[green] ✓ Fixed: {fix_cmd}[/green]") return True, reasoning, commands_executed - + if commands_executed: - return True, f"Executed AI-suggested fixes", commands_executed - + return True, "Executed AI-suggested fixes", commands_executed + # === Fallback: try with sudo === if not cmd.strip().startswith("sudo"): console.print("[dim] Fallback: will try with sudo...[/dim]") return True, "Will retry with sudo", [] - + return False, f"No fix strategy for {error_type}", commands_executed - + def fix_config_syntax( self, config_file: str, @@ -2704,21 +3989,21 @@ def fix_config_syntax( ) -> tuple[bool, str]: """Fix configuration file syntax errors.""" console.print(f"[dim] Analyzing config: {config_file}:{line_num}[/dim]") - + # Read the config file success, config_content, read_err = self._execute_command(f"sudo cat {config_file}") if not success or not config_content: return False, f"Could not read {config_file}: {read_err}" - - lines = config_content.split('\n') + + lines = config_content.split("\n") if line_num > len(lines) or line_num < 1: return False, f"Invalid line number {line_num}" - + problem_line = lines[line_num - 1] console.print(f"[dim] Line {line_num}: {problem_line.strip()[:60]}...[/dim]") - + stderr_lower = stderr.lower() - + # Duplicate entry if "duplicate" in stderr_lower: console.print("[cyan] Commenting out duplicate entry...[/cyan]") @@ -2726,18 +4011,18 @@ def fix_config_syntax( success, _, _ = self._execute_command(fix_cmd) if success: return True, f"Commented out duplicate at line {line_num}" - + # Missing semicolon (for nginx, etc.) if "unexpected" in stderr_lower or "expecting" in stderr_lower: stripped = problem_line.strip() - if stripped and not stripped.endswith((';', '{', '}', ':', ',', '#', ')')): + if stripped and not stripped.endswith((";", "{", "}", ":", ",", "#", ")")): console.print("[cyan] Adding missing semicolon...[/cyan]") - escaped_line = stripped.replace('/', '\\/').replace('&', '\\&') + escaped_line = stripped.replace("/", "\\/").replace("&", "\\&") fix_cmd = f"sudo sed -i '{line_num}s/.*/ {escaped_line};/' {config_file}" success, _, _ = self._execute_command(fix_cmd) if success: return True, f"Added semicolon at line {line_num}" - + # Unknown directive if "unknown" in stderr_lower and ("directive" in stderr_lower or "option" in stderr_lower): console.print("[cyan] Commenting out unknown directive...[/cyan]") @@ -2745,7 +4030,7 @@ def fix_config_syntax( success, _, _ = self._execute_command(fix_cmd) if success: return True, f"Commented out unknown directive at line {line_num}" - + # Invalid value/argument if "invalid" in stderr_lower: console.print("[cyan] Commenting out line with invalid value...[/cyan]") @@ -2753,7 +4038,7 @@ def fix_config_syntax( success, _, _ = self._execute_command(fix_cmd) if success: return True, f"Commented out invalid line at line {line_num}" - + # Unterminated string if "unterminated" in stderr_lower or ("string" in stderr_lower and "quote" in stderr_lower): if problem_line.count('"') % 2 == 1: @@ -2764,18 +4049,18 @@ def fix_config_syntax( return True, f"Added missing quote at line {line_num}" elif problem_line.count("'") % 2 == 1: console.print("[cyan] Adding missing single quote...[/cyan]") - fix_cmd = f"sudo sed -i \"{line_num}s/$/'/\" {config_file}" + fix_cmd = f'sudo sed -i "{line_num}s/$/\'/" {config_file}' success, _, _ = self._execute_command(fix_cmd) if success: return True, f"Added missing quote at line {line_num}" - + # Fallback: comment out problematic line console.print("[cyan] Fallback: commenting out problematic line...[/cyan]") fix_cmd = f"sudo sed -i '{line_num}s/^/# ERROR: /' {config_file}" success, _, _ = self._execute_command(fix_cmd) if success: return True, f"Commented out problematic line {line_num}" - + return False, "Could not identify a fix for this config error" @@ -2783,6 +4068,7 @@ def fix_config_syntax( # Utility Functions # ============================================================================ + def get_error_category(error_type: str) -> str: """Get the category for an error type.""" for pattern in ALL_ERROR_PATTERNS: diff --git a/cortex/do_runner/diagnosis_v2.py b/cortex/do_runner/diagnosis_v2.py index 9cb0bdb40..7607a06fd 100644 --- a/cortex/do_runner/diagnosis_v2.py +++ b/cortex/do_runner/diagnosis_v2.py @@ -17,9 +17,10 @@ import re import subprocess import time +from collections.abc import Callable from dataclasses import dataclass, field from enum import Enum -from typing import Any, Callable +from typing import Any from rich.console import Console from rich.panel import Panel @@ -33,9 +34,10 @@ # ERROR CATEGORIES # ============================================================================= + class ErrorCategory(str, Enum): """Broad categories of errors that can occur during command execution.""" - + # File & Directory Errors (LOCAL) FILE_NOT_FOUND = "file_not_found" FILE_EXISTS = "file_exists" @@ -43,59 +45,59 @@ class ErrorCategory(str, Enum): PERMISSION_DENIED_LOCAL = "permission_denied_local" # Local file/dir permission READ_ONLY_FILESYSTEM = "read_only_filesystem" DISK_FULL = "disk_full" - + # URL/Link Permission Errors (REMOTE) PERMISSION_DENIED_URL = "permission_denied_url" # URL/API permission ACCESS_DENIED_REGISTRY = "access_denied_registry" # Container registry ACCESS_DENIED_REPO = "access_denied_repo" # Git/package repo ACCESS_DENIED_API = "access_denied_api" # API endpoint - + # Authentication & Login Errors LOGIN_REQUIRED = "login_required" AUTH_FAILED = "auth_failed" TOKEN_EXPIRED = "token_expired" INVALID_CREDENTIALS = "invalid_credentials" - + # Legacy - for backward compatibility PERMISSION_DENIED = "permission_denied" # Will be resolved to LOCAL or URL - + # Package & Resource Errors PACKAGE_NOT_FOUND = "package_not_found" IMAGE_NOT_FOUND = "image_not_found" RESOURCE_NOT_FOUND = "resource_not_found" DEPENDENCY_MISSING = "dependency_missing" VERSION_CONFLICT = "version_conflict" - + # Command Errors COMMAND_NOT_FOUND = "command_not_found" SYNTAX_ERROR = "syntax_error" INVALID_ARGUMENT = "invalid_argument" MISSING_ARGUMENT = "missing_argument" DEPRECATED_SYNTAX = "deprecated_syntax" - + # Service & Process Errors SERVICE_NOT_RUNNING = "service_not_running" SERVICE_FAILED = "service_failed" PORT_IN_USE = "port_in_use" PROCESS_KILLED = "process_killed" TIMEOUT = "timeout" - + # Network Errors NETWORK_UNREACHABLE = "network_unreachable" CONNECTION_REFUSED = "connection_refused" DNS_FAILED = "dns_failed" SSL_ERROR = "ssl_error" - + # Configuration Errors CONFIG_SYNTAX_ERROR = "config_syntax_error" CONFIG_INVALID_VALUE = "config_invalid_value" CONFIG_MISSING_KEY = "config_missing_key" - + # Resource Errors OUT_OF_MEMORY = "out_of_memory" CPU_LIMIT = "cpu_limit" QUOTA_EXCEEDED = "quota_exceeded" - + # Unknown UNKNOWN = "unknown" @@ -127,14 +129,12 @@ class ErrorCategory(str, Enum): (r"Operation not permitted.*(/[^\s:]+)", "path"), (r"EACCES.*(/[^\s]+)", "path"), ], - # URL/Link permission denied (registries, APIs, repos) ErrorCategory.PERMISSION_DENIED_URL: [ (r"403 Forbidden.*https?://([^\s/]+)", "host"), (r"401 Unauthorized.*https?://([^\s/]+)", "host"), (r"Access denied.*https?://([^\s/]+)", "host"), ], - ErrorCategory.ACCESS_DENIED_REGISTRY: [ (r"denied: requested access to the resource is denied", "registry"), (r"pull access denied", "registry"), # Higher priority pattern @@ -144,7 +144,6 @@ class ErrorCategory(str, Enum): (r"UNAUTHORIZED.*registry", "registry"), (r"unauthorized to access repository", "registry"), ], - ErrorCategory.ACCESS_DENIED_REPO: [ (r"Repository not found.*https?://([^\s]+)", "repo"), (r"fatal: could not read from remote repository", "repo"), @@ -152,7 +151,6 @@ class ErrorCategory(str, Enum): (r"Host key verification failed", "host"), (r"remote: Permission to ([^\s]+) denied", "repo"), ], - ErrorCategory.ACCESS_DENIED_API: [ (r"API.*access denied", "api"), (r"AccessDenied.*Access denied", "api"), # AWS-style error @@ -161,7 +159,6 @@ class ErrorCategory(str, Enum): (r"An error occurred \(AccessDenied\)", "api"), # AWS CLI error (r"not authorized to perform", "api"), ], - # Legacy pattern for generic permission denied ErrorCategory.PERMISSION_DENIED: [ (r"Permission denied", "resource"), @@ -176,7 +173,6 @@ class ErrorCategory(str, Enum): (r"No space left on device", "device"), (r"Disk quota exceeded", "quota"), ], - # Authentication & Login ErrorCategory.LOGIN_REQUIRED: [ (r"Login required", "service"), @@ -202,7 +198,6 @@ class ErrorCategory(str, Enum): (r"bad credentials", "type"), (r"incorrect password", "auth"), ], - # Package & Resource ErrorCategory.PACKAGE_NOT_FOUND: [ (r"Unable to locate package ([^\s]+)", "package"), @@ -237,7 +232,6 @@ class ErrorCategory(str, Enum): (r"incompatible version", "version"), (r"requires.*but ([^\s]+) is installed", "conflict"), ], - # Command Errors ErrorCategory.COMMAND_NOT_FOUND: [ (r"command not found", "command"), @@ -269,7 +263,6 @@ class ErrorCategory(str, Enum): (r"obsolete", "feature"), (r"use.*instead", "replacement"), ], - # Service & Process ErrorCategory.SERVICE_NOT_RUNNING: [ (r"is not running", "service"), @@ -302,7 +295,6 @@ class ErrorCategory(str, Enum): (r"timeout", "operation"), (r"deadline exceeded", "operation"), ], - # Network ErrorCategory.NETWORK_UNREACHABLE: [ (r"Network is unreachable", "network"), @@ -324,7 +316,6 @@ class ErrorCategory(str, Enum): (r"certificate.*error", "certificate"), (r"CERT_", "certificate"), ], - # Configuration ErrorCategory.CONFIG_SYNTAX_ERROR: [ (r"configuration.*syntax.*error", "config"), @@ -343,7 +334,6 @@ class ErrorCategory(str, Enum): (r"required.*not set", "key"), (r"undefined variable", "variable"), ], - # Resource ErrorCategory.OUT_OF_MEMORY: [ (r"Out of memory", "memory"), @@ -363,9 +353,11 @@ class ErrorCategory(str, Enum): # DATA STRUCTURES # ============================================================================= + @dataclass class DiagnosisResult: """Result of error diagnosis (Step 1).""" + category: ErrorCategory error_message: str extracted_info: dict[str, str] = field(default_factory=dict) @@ -376,24 +368,26 @@ class DiagnosisResult: @dataclass class FixCommand: """A single fix command with variable placeholders.""" + command_template: str # Command with {variable} placeholders purpose: str variables: list[str] = field(default_factory=list) # Variable names found requires_sudo: bool = False - + def __post_init__(self): # Extract variables from template - self.variables = re.findall(r'\{(\w+)\}', self.command_template) + self.variables = re.findall(r"\{(\w+)\}", self.command_template) -@dataclass +@dataclass class FixPlan: """Plan for fixing an error (Step 2 output).""" + category: ErrorCategory commands: list[FixCommand] reasoning: str all_variables: set[str] = field(default_factory=set) - + def __post_init__(self): # Collect all unique variables for cmd in self.commands: @@ -403,6 +397,7 @@ def __post_init__(self): @dataclass class VariableResolution: """Resolution for a variable (Step 3).""" + name: str value: str source: str # "query", "llm", "system_info", "default" @@ -411,6 +406,7 @@ class VariableResolution: @dataclass class ExecutionResult: """Result of executing a fix command (Step 4).""" + command: str success: bool stdout: str @@ -421,6 +417,7 @@ class ExecutionResult: @dataclass class ErrorStackEntry: """Entry in the error stack for tracking.""" + original_command: str intent: str error: str @@ -434,10 +431,11 @@ class ErrorStackEntry: # DIAGNOSIS ENGINE # ============================================================================= + class DiagnosisEngine: """ Main diagnosis engine implementing the structured error resolution flow. - + Flow: 1. Categorize error type 2. LLM generates fix commands with variables @@ -446,10 +444,10 @@ class DiagnosisEngine: 5. If error, push to stack and repeat 6. Test original command """ - + MAX_FIX_ATTEMPTS = 5 MAX_STACK_DEPTH = 10 - + # Known URL/remote service patterns in commands URL_COMMAND_PATTERNS = [ r"docker\s+(pull|push|login)", @@ -465,7 +463,7 @@ class DiagnosisEngine: r"az\s+", # Azure CLI r"gh\s+", # GitHub CLI ] - + # Known registries and their authentication services KNOWN_SERVICES = { "ghcr.io": "ghcr", @@ -479,7 +477,7 @@ class DiagnosisEngine: "amazonaws.com": "aws", "gcr.io": "gcloud", } - + def __init__( self, api_key: str | None = None, @@ -487,53 +485,58 @@ def __init__( model: str | None = None, debug: bool = False, ): - self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + self.api_key = ( + api_key or os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + ) self.provider = provider.lower() self.model = model or self._default_model() self.debug = debug - + # Error stack for tracking command errors self.error_stack: list[ErrorStackEntry] = [] - + # Resolution cache to avoid re-resolving same variables self.variable_cache: dict[str, str] = {} - + # Execution history for logging self.execution_history: list[dict[str, Any]] = [] - + # Initialize LoginHandler for credential management self._login_handler = None try: from cortex.do_runner.diagnosis import LoginHandler + self._login_handler = LoginHandler() except ImportError: pass - + self._initialize_client() - + def _default_model(self) -> str: if self.provider == "openai": return "gpt-4o" elif self.provider == "claude": return "claude-sonnet-4-20250514" return "gpt-4o" - + def _initialize_client(self): """Initialize the LLM client.""" if not self.api_key: console.print("[yellow]⚠ No API key found - LLM features disabled[/yellow]") self.client = None return - + if self.provider == "openai": try: from openai import OpenAI + self.client = OpenAI(api_key=self.api_key) except ImportError: self.client = None elif self.provider == "claude": try: from anthropic import Anthropic + self.client = Anthropic(api_key=self.api_key) except ImportError: self.client = None @@ -543,20 +546,21 @@ def _initialize_client(self): # ========================================================================= # PERMISSION TYPE DETECTION # ========================================================================= - - def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[bool, str | None, str | None]: + + def _is_url_based_permission_error( + self, command: str, stderr: str + ) -> tuple[bool, str | None, str | None]: """ Determine if permission denied is for a local file/dir or a URL/link. - + Returns: Tuple of (is_url_based, service_name, url_or_host) """ # Check if command involves known remote operations is_remote_command = any( - re.search(pattern, command, re.IGNORECASE) - for pattern in self.URL_COMMAND_PATTERNS + re.search(pattern, command, re.IGNORECASE) for pattern in self.URL_COMMAND_PATTERNS ) - + # Check stderr for URL patterns url_patterns = [ r"https?://([^\s/]+)", @@ -564,14 +568,14 @@ def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[boo r"registry[.\s]", r"(ghcr\.io|docker\.io|gcr\.io|quay\.io)", ] - + found_host = None for pattern in url_patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: found_host = match.group(1) if match.groups() else match.group(0) break - + # Also check command for URLs/hosts if not found_host: for pattern in url_patterns: @@ -579,7 +583,7 @@ def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[boo if match: found_host = match.group(1) if match.groups() else match.group(0) break - + # Determine service service = None if found_host: @@ -587,7 +591,7 @@ def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[boo if host_pattern in found_host.lower(): service = svc break - + # Detect service from command if not found from host if not service: if "git " in command.lower(): @@ -602,7 +606,7 @@ def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[boo service = "docker" elif "npm " in command.lower(): service = "npm" - + # Git-specific patterns git_remote_patterns = [ "remote:" in stderr.lower(), @@ -611,37 +615,44 @@ def _is_url_based_permission_error(self, command: str, stderr: str) -> tuple[boo "could not read from remote repository" in stderr.lower(), "fatal: authentication failed" in stderr.lower(), ] - + # AWS-specific patterns aws_patterns = [ "accessdenied" in stderr.lower().replace(" ", ""), "an error occurred" in stderr.lower() and "denied" in stderr.lower(), "not authorized" in stderr.lower(), ] - + # If it's a remote command with a host or URL-based error patterns - is_url_based = bool(is_remote_command and found_host) or any([ - "401" in stderr, - "403" in stderr, - "unauthorized" in stderr.lower(), - "authentication required" in stderr.lower(), - "login required" in stderr.lower(), - "access denied" in stderr.lower() and found_host, - "pull access denied" in stderr.lower(), - "denied: requested access" in stderr.lower(), - ]) or any(git_remote_patterns) or any(aws_patterns) - + is_url_based = ( + bool(is_remote_command and found_host) + or any( + [ + "401" in stderr, + "403" in stderr, + "unauthorized" in stderr.lower(), + "authentication required" in stderr.lower(), + "login required" in stderr.lower(), + "access denied" in stderr.lower() and found_host, + "pull access denied" in stderr.lower(), + "denied: requested access" in stderr.lower(), + ] + ) + or any(git_remote_patterns) + or any(aws_patterns) + ) + if is_url_based: - console.print(f"[cyan] 🌐 Detected URL-based permission error[/cyan]") + console.print("[cyan] 🌐 Detected URL-based permission error[/cyan]") console.print(f"[dim] Host: {found_host or 'unknown'}[/dim]") console.print(f"[dim] Service: {service or 'unknown'}[/dim]") - + return is_url_based, service, found_host - + def _is_local_file_permission_error(self, command: str, stderr: str) -> tuple[bool, str | None]: """ Check if permission error is for a local file/directory. - + Returns: Tuple of (is_local_file, file_path) """ @@ -655,41 +666,41 @@ def _is_local_file_permission_error(self, command: str, stderr: str) -> tuple[bo r"failed to open '([^']+)'", r"open\(\) \"([^\"]+)\" failed", ] - + for pattern in local_patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: path = match.group(1) # Verify it's a local path (starts with / or ./) if path.startswith("/") or path.startswith("./"): - console.print(f"[cyan] 📁 Detected local file permission error[/cyan]") + console.print("[cyan] 📁 Detected local file permission error[/cyan]") console.print(f"[dim] Path: {path}[/dim]") return True, path - + # Check command for local paths being accessed path_match = re.search(r"(/[^\s]+)", command) if path_match and "permission denied" in stderr.lower(): path = path_match.group(1) - console.print(f"[cyan] 📁 Detected local file permission error (from command)[/cyan]") + console.print("[cyan] 📁 Detected local file permission error (from command)[/cyan]") console.print(f"[dim] Path: {path}[/dim]") return True, path - + return False, None - + def _resolve_permission_error_type( - self, - command: str, + self, + command: str, stderr: str, current_category: ErrorCategory, ) -> tuple[ErrorCategory, dict[str, str]]: """ Resolve generic PERMISSION_DENIED to specific LOCAL or URL category. - + Returns: Tuple of (refined_category, additional_info) """ additional_info = {} - + # Only process if it's a generic permission error permission_categories = [ ErrorCategory.PERMISSION_DENIED, @@ -700,16 +711,16 @@ def _resolve_permission_error_type( ErrorCategory.ACCESS_DENIED_API, ErrorCategory.AUTH_FAILED, ] - + if current_category not in permission_categories: return current_category, additional_info - + # Check URL-based first (more specific) is_url, service, host = self._is_url_based_permission_error(command, stderr) if is_url: additional_info["service"] = service or "unknown" additional_info["host"] = host or "unknown" - + # Determine more specific category if "registry" in stderr.lower() or service in ["docker", "ghcr", "gcloud"]: return ErrorCategory.ACCESS_DENIED_REGISTRY, additional_info @@ -718,60 +729,64 @@ def _resolve_permission_error_type( elif "api" in stderr.lower() or service in ["aws", "gcloud", "azure"]: # AWS, GCloud, Azure are API-based services return ErrorCategory.ACCESS_DENIED_API, additional_info - elif "aws " in command.lower() or "az " in command.lower() or "gcloud " in command.lower(): + elif ( + "aws " in command.lower() + or "az " in command.lower() + or "gcloud " in command.lower() + ): # Cloud CLI commands are API-based return ErrorCategory.ACCESS_DENIED_API, additional_info else: return ErrorCategory.PERMISSION_DENIED_URL, additional_info - + # Check local file is_local, path = self._is_local_file_permission_error(command, stderr) if is_local: additional_info["path"] = path or "" return ErrorCategory.PERMISSION_DENIED_LOCAL, additional_info - + # Default to local for generic permission denied return ErrorCategory.PERMISSION_DENIED_LOCAL, additional_info # ========================================================================= # STEP 1: Categorize Error # ========================================================================= - + def categorize_error(self, command: str, stderr: str, stdout: str = "") -> DiagnosisResult: """ Step 1: Categorize the error type. - + Examines stderr (and stdout) to determine the broad category of error. For permission errors, distinguishes between local file/dir and URL/link. """ self._log_step(1, "Categorizing error type") - + combined_output = f"{stderr}\n{stdout}".lower() - + best_match: tuple[ErrorCategory, dict[str, str], float] | None = None - + for category, patterns in ERROR_PATTERNS.items(): for pattern, info_key in patterns: match = re.search(pattern, stderr, re.IGNORECASE) if match: extracted_info = {info_key: match.group(1) if match.groups() else ""} - + # Calculate confidence based on pattern specificity confidence = len(pattern) / 50.0 # Longer patterns = more specific confidence = min(confidence, 1.0) - + if best_match is None or confidence > best_match[2]: best_match = (category, extracted_info, confidence) - + if best_match: category, extracted_info, confidence = best_match - + # Refine permission errors to LOCAL or URL refined_category, additional_info = self._resolve_permission_error_type( command, stderr, category ) extracted_info.update(additional_info) - + result = DiagnosisResult( category=refined_category, error_message=stderr[:500], @@ -786,34 +801,29 @@ def categorize_error(self, command: str, stderr: str, stdout: str = "") -> Diagn confidence=0.0, raw_stderr=stderr, ) - + self._print_diagnosis(result, command) return result # ========================================================================= # STEP 2: Generate Fix Plan via LLM # ========================================================================= - - def generate_fix_plan( - self, - command: str, - intent: str, - diagnosis: DiagnosisResult - ) -> FixPlan: + + def generate_fix_plan(self, command: str, intent: str, diagnosis: DiagnosisResult) -> FixPlan: """ Step 2: LLM generates fix commands with variable placeholders. - + Context given: command, intent, error, category Output: List of commands with {variable} placeholders """ self._log_step(2, "Generating fix plan via LLM") - + if not self.client: # Fallback to rule-based fix generation return self._generate_fallback_fix_plan(command, intent, diagnosis) - + system_prompt = self._get_fix_generation_prompt() - + user_prompt = f"""Generate fix commands for this error: **Command:** `{command}` @@ -838,35 +848,37 @@ def generate_fix_plan( try: response = self._call_llm(system_prompt, user_prompt) - + # Parse response - json_match = re.search(r'\{[\s\S]*\}', response) + json_match = re.search(r"\{[\s\S]*\}", response) if json_match: data = json.loads(json_match.group()) - + commands = [] for cmd_data in data.get("commands", []): - commands.append(FixCommand( - command_template=cmd_data.get("command", ""), - purpose=cmd_data.get("purpose", ""), - requires_sudo=cmd_data.get("requires_sudo", False), - )) - + commands.append( + FixCommand( + command_template=cmd_data.get("command", ""), + purpose=cmd_data.get("purpose", ""), + requires_sudo=cmd_data.get("requires_sudo", False), + ) + ) + plan = FixPlan( category=diagnosis.category, commands=commands, reasoning=data.get("reasoning", ""), ) - + self._print_fix_plan(plan) return plan - + except Exception as e: console.print(f"[yellow]⚠ LLM fix generation failed: {e}[/yellow]") - + # Fallback return self._generate_fallback_fix_plan(command, intent, diagnosis) - + def _get_fix_generation_prompt(self) -> str: return """You are a Linux system error diagnosis expert. Generate shell commands to fix errors. @@ -879,7 +891,7 @@ def _get_fix_generation_prompt(self) -> str: VARIABLE NAMING: - {file_path} - path to a file -- {dir_path} - path to a directory +- {dir_path} - path to a directory - {package} - package name to install - {service} - systemd service name - {user} - username @@ -909,28 +921,27 @@ def _get_fix_generation_prompt(self) -> str: }""" def _generate_fallback_fix_plan( - self, - command: str, - intent: str, - diagnosis: DiagnosisResult + self, command: str, intent: str, diagnosis: DiagnosisResult ) -> FixPlan: """Generate a fix plan using rules when LLM is unavailable.""" commands: list[FixCommand] = [] reasoning = f"Rule-based fix for {diagnosis.category.value}" - + category = diagnosis.category info = diagnosis.extracted_info - + # LOCAL permission denied - use sudo if category == ErrorCategory.PERMISSION_DENIED_LOCAL: path = info.get("path", "") - reasoning = f"Local file/directory permission denied - using elevated privileges" - commands.append(FixCommand( - command_template=f"sudo {command}", - purpose=f"Retry with elevated privileges for local path{' ' + path if path else ''}", - requires_sudo=True, - )) - + reasoning = "Local file/directory permission denied - using elevated privileges" + commands.append( + FixCommand( + command_template=f"sudo {command}", + purpose=f"Retry with elevated privileges for local path{' ' + path if path else ''}", + requires_sudo=True, + ) + ) + # URL-based permission - handle login elif category in [ ErrorCategory.PERMISSION_DENIED_URL, @@ -941,178 +952,210 @@ def _generate_fallback_fix_plan( service = info.get("service", "unknown") host = info.get("host", "unknown") reasoning = f"URL/remote access denied - requires authentication to {service or host}" - + # Generate login command based on service if service == "docker" or service == "ghcr" or "registry" in category.value: registry = host if host != "unknown" else "{registry}" - commands.extend([ - FixCommand( - command_template=f"docker login {registry}", - purpose=f"Login to container registry {registry}", - ), - FixCommand( - command_template=command, - purpose="Retry original command after login", - ), - ]) + commands.extend( + [ + FixCommand( + command_template=f"docker login {registry}", + purpose=f"Login to container registry {registry}", + ), + FixCommand( + command_template=command, + purpose="Retry original command after login", + ), + ] + ) elif service == "git_https" or "repo" in category.value: - commands.extend([ - FixCommand( - command_template="git config --global credential.helper store", - purpose="Enable credential storage for git", - ), - FixCommand( - command_template=command, - purpose="Retry original command (will prompt for credentials)", - ), - ]) + commands.extend( + [ + FixCommand( + command_template="git config --global credential.helper store", + purpose="Enable credential storage for git", + ), + FixCommand( + command_template=command, + purpose="Retry original command (will prompt for credentials)", + ), + ] + ) elif service == "npm": - commands.extend([ - FixCommand( - command_template="npm login", - purpose="Login to npm registry", - ), - FixCommand( - command_template=command, - purpose="Retry original command after login", - ), - ]) + commands.extend( + [ + FixCommand( + command_template="npm login", + purpose="Login to npm registry", + ), + FixCommand( + command_template=command, + purpose="Retry original command after login", + ), + ] + ) elif service == "aws": - commands.extend([ + commands.extend( + [ + FixCommand( + command_template="aws configure", + purpose="Configure AWS credentials", + ), + FixCommand( + command_template=command, + purpose="Retry original command after configuration", + ), + ] + ) + else: + # Generic login placeholder + commands.append( FixCommand( - command_template="aws configure", - purpose="Configure AWS credentials", - ), + command_template="{login_command}", + purpose=f"Login to {service or host}", + ) + ) + commands.append( FixCommand( command_template=command, - purpose="Retry original command after configuration", - ), - ]) - else: - # Generic login placeholder - commands.append(FixCommand( - command_template="{login_command}", - purpose=f"Login to {service or host}", - )) - commands.append(FixCommand( - command_template=command, - purpose="Retry original command after login", - )) - + purpose="Retry original command after login", + ) + ) + # Legacy generic permission denied - try to determine type elif category == ErrorCategory.PERMISSION_DENIED: - commands.append(FixCommand( - command_template=f"sudo {command}", - purpose="Retry with elevated privileges", - requires_sudo=True, - )) - + commands.append( + FixCommand( + command_template=f"sudo {command}", + purpose="Retry with elevated privileges", + requires_sudo=True, + ) + ) + elif category == ErrorCategory.FILE_NOT_FOUND: file_path = info.get("file", "{file_path}") - commands.append(FixCommand( - command_template=f"touch {file_path}", - purpose=f"Create missing file", - )) - + commands.append( + FixCommand( + command_template=f"touch {file_path}", + purpose="Create missing file", + ) + ) + elif category == ErrorCategory.DIRECTORY_NOT_FOUND: dir_path = info.get("directory", info.get("parent_directory", "{dir_path}")) - commands.append(FixCommand( - command_template=f"mkdir -p {dir_path}", - purpose="Create missing directory", - )) - + commands.append( + FixCommand( + command_template=f"mkdir -p {dir_path}", + purpose="Create missing directory", + ) + ) + elif category == ErrorCategory.COMMAND_NOT_FOUND: # Try to guess package from command cmd_name = command.split()[0] if command else "{package}" - commands.append(FixCommand( - command_template=f"sudo apt install -y {{package}}", - purpose=f"Install package providing the command", - requires_sudo=True, - )) - + commands.append( + FixCommand( + command_template="sudo apt install -y {package}", + purpose="Install package providing the command", + requires_sudo=True, + ) + ) + elif category == ErrorCategory.SERVICE_NOT_RUNNING: service = info.get("service", "{service}") - commands.append(FixCommand( - command_template=f"sudo systemctl start {service}", - purpose="Start the service", - requires_sudo=True, - )) - + commands.append( + FixCommand( + command_template=f"sudo systemctl start {service}", + purpose="Start the service", + requires_sudo=True, + ) + ) + elif category == ErrorCategory.LOGIN_REQUIRED: service = info.get("service", "{service}") - commands.append(FixCommand( - command_template="{login_command}", - purpose=f"Login to {service}", - )) - + commands.append( + FixCommand( + command_template="{login_command}", + purpose=f"Login to {service}", + ) + ) + elif category == ErrorCategory.PACKAGE_NOT_FOUND: package = info.get("package", "{package}") - commands.extend([ - FixCommand( - command_template="sudo apt update", - purpose="Update package lists", - requires_sudo=True, - ), - FixCommand( - command_template=f"sudo apt install -y {package}", - purpose=f"Install the package", - requires_sudo=True, - ), - ]) - + commands.extend( + [ + FixCommand( + command_template="sudo apt update", + purpose="Update package lists", + requires_sudo=True, + ), + FixCommand( + command_template=f"sudo apt install -y {package}", + purpose="Install the package", + requires_sudo=True, + ), + ] + ) + elif category == ErrorCategory.PORT_IN_USE: port = info.get("port", "{port}") - commands.extend([ - FixCommand( - command_template=f"sudo lsof -i :{port}", - purpose="Find process using the port", - requires_sudo=True, - ), - FixCommand( - command_template="sudo kill -9 {pid}", - purpose="Kill the process using the port", - requires_sudo=True, - ), - ]) - + commands.extend( + [ + FixCommand( + command_template=f"sudo lsof -i :{port}", + purpose="Find process using the port", + requires_sudo=True, + ), + FixCommand( + command_template="sudo kill -9 {pid}", + purpose="Kill the process using the port", + requires_sudo=True, + ), + ] + ) + elif category == ErrorCategory.CONFIG_SYNTAX_ERROR: config_file = info.get("config", info.get("nginx_config", "{config_file}")) - commands.extend([ - FixCommand( - command_template=f"cat -n {config_file}", - purpose="Show config file with line numbers", - ), - FixCommand( - command_template=f"sudo nano {config_file}", - purpose="Edit config file to fix syntax", - requires_sudo=True, - ), - ]) - + commands.extend( + [ + FixCommand( + command_template=f"cat -n {config_file}", + purpose="Show config file with line numbers", + ), + FixCommand( + command_template=f"sudo nano {config_file}", + purpose="Edit config file to fix syntax", + requires_sudo=True, + ), + ] + ) + else: # Generic retry with sudo - commands.append(FixCommand( - command_template=f"sudo {command}", - purpose="Retry with elevated privileges", - requires_sudo=True, - )) - + commands.append( + FixCommand( + command_template=f"sudo {command}", + purpose="Retry with elevated privileges", + requires_sudo=True, + ) + ) + plan = FixPlan( category=diagnosis.category, commands=commands, reasoning=reasoning, ) - + self._print_fix_plan(plan) return plan # ========================================================================= # STEP 3: Resolve Variables # ========================================================================= - + def resolve_variables( - self, - fix_plan: FixPlan, + self, + fix_plan: FixPlan, original_query: str, command: str, diagnosis: DiagnosisResult, @@ -1124,66 +1167,67 @@ def resolve_variables( 3. system_info_command_generator """ self._log_step(3, "Resolving variables") - + if not fix_plan.all_variables: console.print("[dim] No variables to resolve[/dim]") return {} - + console.print(f"[cyan] Variables to resolve: {', '.join(fix_plan.all_variables)}[/cyan]") - + resolved: dict[str, str] = {} - + for var_name in fix_plan.all_variables: # Check cache first if var_name in self.variable_cache: resolved[var_name] = self.variable_cache[var_name] console.print(f"[dim] {var_name}: {resolved[var_name]} (cached)[/dim]") continue - + # Try extraction from diagnosis info value = self._try_extract_from_diagnosis(var_name, diagnosis) if value: resolved[var_name] = value console.print(f"[green] ✓ {var_name}: {value} (from error)[/green]") continue - + # Try extraction from query value = self._try_extract_from_query(var_name, original_query) if value: resolved[var_name] = value console.print(f"[green] ✓ {var_name}: {value} (from query)[/green]") continue - + # Try system_info_command_generator value = self._try_system_info(var_name, command, diagnosis) if value: resolved[var_name] = value console.print(f"[green] ✓ {var_name}: {value} (from system)[/green]") continue - + # Fall back to LLM value = self._try_llm_resolution(var_name, original_query, command, diagnosis) if value: resolved[var_name] = value console.print(f"[green] ✓ {var_name}: {value} (from LLM)[/green]") continue - + # Prompt user as last resort console.print(f"[yellow] ⚠ Could not resolve {var_name}[/yellow]") try: from rich.prompt import Prompt + value = Prompt.ask(f" Enter value for {var_name}") if value: resolved[var_name] = value console.print(f"[green] ✓ {var_name}: {value} (from user)[/green]") except Exception: pass - + # Update cache self.variable_cache.update(resolved) - + return resolved - + def _try_extract_from_diagnosis(self, var_name: str, diagnosis: DiagnosisResult) -> str | None: """Try to extract variable from diagnosis extracted_info.""" # Map variable names to diagnosis info keys @@ -1197,14 +1241,14 @@ def _try_extract_from_diagnosis(self, var_name: str, diagnosis: DiagnosisResult) "user": ["user"], "image": ["image", "repository"], } - + keys_to_check = mappings.get(var_name, [var_name]) for key in keys_to_check: if key in diagnosis.extracted_info and diagnosis.extracted_info[key]: return diagnosis.extracted_info[key] - + return None - + def _try_extract_from_query(self, var_name: str, query: str) -> str | None: """Try to extract variable from the original query.""" # Pattern-based extraction from query @@ -1216,27 +1260,29 @@ def _try_extract_from_query(self, var_name: str, query: str) -> str | None: "port": [r"port\s+(\d+)", r":(\d{2,5})"], "image": [r"image\s+([^\s]+)", r"docker.*\s+([^\s]+:[^\s]*)"], } - + if var_name in patterns: for pattern in patterns[var_name]: match = re.search(pattern, query, re.IGNORECASE) if match: return match.group(1) - + return None - - def _try_system_info(self, var_name: str, command: str, diagnosis: DiagnosisResult) -> str | None: + + def _try_system_info( + self, var_name: str, command: str, diagnosis: DiagnosisResult + ) -> str | None: """Use system_info_command_generator to get variable value.""" try: from cortex.system_info_generator import SystemInfoGenerator - + # System info commands for different variable types system_queries = { "user": "whoami", "home_dir": "echo $HOME", "current_dir": "pwd", } - + if var_name in system_queries: result = subprocess.run( system_queries[var_name], @@ -1247,7 +1293,7 @@ def _try_system_info(self, var_name: str, command: str, diagnosis: DiagnosisResu ) if result.returncode == 0 and result.stdout.strip(): return result.stdout.strip() - + # For package commands, try to find the package if var_name == "package": cmd_name = command.split()[0] if command else "" @@ -1268,7 +1314,7 @@ def _try_system_info(self, var_name: str, command: str, diagnosis: DiagnosisResu } if cmd_name in package_map: return package_map[cmd_name] - + # Try apt-file search if available result = subprocess.run( f"apt-file search --regexp 'bin/{cmd_name}$' 2>/dev/null | head -1 | cut -d: -f1", @@ -1279,31 +1325,31 @@ def _try_system_info(self, var_name: str, command: str, diagnosis: DiagnosisResu ) if result.returncode == 0 and result.stdout.strip(): return result.stdout.strip() - + # For service names, try systemctl if var_name == "service": # Extract service name from command if present - service_match = re.search(r'systemctl\s+\w+\s+(\S+)', command) + service_match = re.search(r"systemctl\s+\w+\s+(\S+)", command) if service_match: return service_match.group(1) - + except Exception as e: if self.debug: console.print(f"[dim] System info failed for {var_name}: {e}[/dim]") - + return None - + def _try_llm_resolution( - self, - var_name: str, - query: str, + self, + var_name: str, + query: str, command: str, diagnosis: DiagnosisResult, ) -> str | None: """Use LLM to resolve variable value.""" if not self.client: return None - + prompt = f"""Extract the value for variable '{var_name}' from this context: Query: {query} @@ -1315,18 +1361,18 @@ def _try_llm_resolution( try: response = self._call_llm("You extract specific values from context.", prompt) - value = response.strip().strip('"\'') + value = response.strip().strip("\"'") if value and value.upper() != "UNKNOWN": return value except Exception: pass - + return None # ========================================================================= # URL AUTHENTICATION HANDLING # ========================================================================= - + def handle_url_authentication( self, command: str, @@ -1334,38 +1380,38 @@ def handle_url_authentication( ) -> tuple[bool, str]: """ Handle URL-based permission errors by prompting for login. - + Uses LoginHandler to: 1. Detect the service/website 2. Prompt for credentials 3. Store credentials for future use 4. Execute login command - + Returns: Tuple of (success, message) """ console.print("\n[bold cyan]🔐 URL Authentication Required[/bold cyan]") - + if not self._login_handler: console.print("[yellow]⚠ LoginHandler not available[/yellow]") return False, "LoginHandler not available" - + service = diagnosis.extracted_info.get("service", "unknown") host = diagnosis.extracted_info.get("host", "") - + console.print(f"[dim] Service: {service}[/dim]") console.print(f"[dim] Host: {host}[/dim]") - + try: # Use LoginHandler to manage authentication login_req = self._login_handler.detect_login_requirement(command, diagnosis.raw_stderr) - + if login_req: console.print(f"\n[cyan]📝 Login to {login_req.display_name}[/cyan]") - + # Handle login (will prompt, execute, and optionally save credentials) success, message = self._login_handler.handle_login(command, diagnosis.raw_stderr) - + if success: console.print(f"[green]✓ {message}[/green]") return True, message @@ -1376,34 +1422,34 @@ def handle_url_authentication( # No matching login requirement, try generic approach console.print("[yellow] Unknown service, trying generic login...[/yellow]") return self._handle_generic_login(command, diagnosis) - + except Exception as e: console.print(f"[red]✗ Authentication error: {e}[/red]") return False, str(e) - + def _handle_generic_login( self, command: str, diagnosis: DiagnosisResult, ) -> tuple[bool, str]: """Handle login for unknown services with interactive prompts.""" - from rich.prompt import Prompt, Confirm - + from rich.prompt import Confirm, Prompt + host = diagnosis.extracted_info.get("host", "unknown service") - + console.print(f"\n[cyan]Login required for: {host}[/cyan]") - + try: # Prompt for credentials username = Prompt.ask("Username") if not username: return False, "Username is required" - + password = Prompt.ask("Password", password=True) - + # Determine login command based on command context login_cmd = None - + if "docker" in command.lower(): registry = diagnosis.extracted_info.get("host", "") login_cmd = f"docker login {registry}" if registry else "docker login" @@ -1415,10 +1461,10 @@ def _handle_generic_login( login_cmd = "npm login" elif "pip" in command.lower() or "pypi" in host.lower(): login_cmd = f"pip config set global.index-url https://{username}:{{password}}@pypi.org/simple/" - + if login_cmd: console.print(f"[dim] Running: {login_cmd}[/dim]") - + # Execute login with password via stdin if needed if "{password}" in login_cmd: login_cmd = login_cmd.replace("{password}", password) @@ -1432,22 +1478,27 @@ def _handle_generic_login( capture_output=True, text=True, ) - + if result.returncode == 0: # Offer to save credentials - if self._login_handler and Confirm.ask("Save credentials for future use?", default=True): - self._login_handler._save_credentials(host, { - "username": username, - "password": password, - }) + if self._login_handler and Confirm.ask( + "Save credentials for future use?", default=True + ): + self._login_handler._save_credentials( + host, + { + "username": username, + "password": password, + }, + ) console.print("[green]✓ Credentials saved[/green]") - + return True, f"Logged in to {host}" else: return False, f"Login failed: {result.stderr[:200]}" - + return False, "Could not determine login command" - + except KeyboardInterrupt: return False, "Login cancelled" except Exception as e: @@ -1456,41 +1507,43 @@ def _handle_generic_login( # ========================================================================= # STEP 4: Execute Fix Commands # ========================================================================= - + def execute_fix_commands( - self, - fix_plan: FixPlan, - resolved_variables: dict[str, str] + self, fix_plan: FixPlan, resolved_variables: dict[str, str] ) -> list[ExecutionResult]: """ Step 4: Execute fix commands with resolved variables. """ self._log_step(4, "Executing fix commands") - + results: list[ExecutionResult] = [] - + for i, fix_cmd in enumerate(fix_plan.commands, 1): # Substitute variables command = fix_cmd.command_template for var_name, value in resolved_variables.items(): command = command.replace(f"{{{var_name}}}", value) - + # Check for unresolved variables - unresolved = re.findall(r'\{(\w+)\}', command) + unresolved = re.findall(r"\{(\w+)\}", command) if unresolved: - console.print(f"[yellow] ⚠ Skipping command with unresolved variables: {unresolved}[/yellow]") - results.append(ExecutionResult( - command=command, - success=False, - stdout="", - stderr=f"Unresolved variables: {unresolved}", - execution_time=0, - )) + console.print( + f"[yellow] ⚠ Skipping command with unresolved variables: {unresolved}[/yellow]" + ) + results.append( + ExecutionResult( + command=command, + success=False, + stdout="", + stderr=f"Unresolved variables: {unresolved}", + execution_time=0, + ) + ) continue - + console.print(f"\n[cyan] [{i}/{len(fix_plan.commands)}] {command}[/cyan]") console.print(f"[dim] └─ {fix_cmd.purpose}[/dim]") - + # Execute start_time = time.time() try: @@ -1502,7 +1555,7 @@ def execute_fix_commands( timeout=120, ) execution_time = time.time() - start_time - + exec_result = ExecutionResult( command=command, success=result.returncode == 0, @@ -1510,64 +1563,70 @@ def execute_fix_commands( stderr=result.stderr.strip(), execution_time=execution_time, ) - + if exec_result.success: console.print(f"[green] ✓ Success ({execution_time:.2f}s)[/green]") if exec_result.stdout and self.debug: console.print(f"[dim] Output: {exec_result.stdout[:200]}[/dim]") else: console.print(f"[red] ✗ Failed: {exec_result.stderr[:200]}[/red]") - + results.append(exec_result) - + # Log to history - self.execution_history.append({ - "command": command, - "success": exec_result.success, - "stderr": exec_result.stderr[:500], - "timestamp": time.time(), - }) - + self.execution_history.append( + { + "command": command, + "success": exec_result.success, + "stderr": exec_result.stderr[:500], + "timestamp": time.time(), + } + ) + except subprocess.TimeoutExpired: - console.print(f"[red] ✗ Timeout after 120s[/red]") - results.append(ExecutionResult( - command=command, - success=False, - stdout="", - stderr="Command timed out", - execution_time=120, - )) + console.print("[red] ✗ Timeout after 120s[/red]") + results.append( + ExecutionResult( + command=command, + success=False, + stdout="", + stderr="Command timed out", + execution_time=120, + ) + ) except Exception as e: console.print(f"[red] ✗ Error: {e}[/red]") - results.append(ExecutionResult( - command=command, - success=False, - stdout="", - stderr=str(e), - execution_time=time.time() - start_time, - )) - + results.append( + ExecutionResult( + command=command, + success=False, + stdout="", + stderr=str(e), + execution_time=time.time() - start_time, + ) + ) + return results # ========================================================================= # STEP 5 & 6: Error Stack Management and Retry Logic # ========================================================================= - + def push_error(self, entry: ErrorStackEntry) -> None: """Push an error onto the stack.""" if len(self.error_stack) >= self.MAX_STACK_DEPTH: console.print(f"[red]⚠ Error stack depth limit ({self.MAX_STACK_DEPTH}) reached[/red]") return - + self.error_stack.append(entry) self._print_error_stack() - + def pop_error(self) -> ErrorStackEntry | None: """Pop an error from the stack.""" if self.error_stack: return self.error_stack.pop() return None - + def diagnose_and_fix( self, command: str, @@ -1578,18 +1637,20 @@ def diagnose_and_fix( ) -> tuple[bool, str]: """ Main diagnosis and fix flow. - + Returns: Tuple of (success, message) """ - console.print(Panel( - f"[bold]Starting Diagnosis[/bold]\n" - f"Command: [cyan]{command}[/cyan]\n" - f"Intent: {intent}", - title="🔧 Cortex Diagnosis Engine", - border_style="blue", - )) - + console.print( + Panel( + f"[bold]Starting Diagnosis[/bold]\n" + f"Command: [cyan]{command}[/cyan]\n" + f"Intent: {intent}", + title="🔧 Cortex Diagnosis Engine", + border_style="blue", + ) + ) + # Push initial error to stack initial_entry = ErrorStackEntry( original_command=command, @@ -1598,23 +1659,27 @@ def diagnose_and_fix( category=ErrorCategory.UNKNOWN, # Will be set in Step 1 ) self.push_error(initial_entry) - + # Process error stack while self.error_stack: entry = self.error_stack[-1] # Peek at top - + if entry.fix_attempts >= self.MAX_FIX_ATTEMPTS: - console.print(f"[red]✗ Max fix attempts ({self.MAX_FIX_ATTEMPTS}) reached for command[/red]") + console.print( + f"[red]✗ Max fix attempts ({self.MAX_FIX_ATTEMPTS}) reached for command[/red]" + ) self.pop_error() continue - + entry.fix_attempts += 1 - console.print(f"\n[bold]Fix Attempt {entry.fix_attempts}/{self.MAX_FIX_ATTEMPTS}[/bold]") - + console.print( + f"\n[bold]Fix Attempt {entry.fix_attempts}/{self.MAX_FIX_ATTEMPTS}[/bold]" + ) + # Step 1: Categorize error diagnosis = self.categorize_error(entry.original_command, entry.error) entry.category = diagnosis.category - + # SPECIAL HANDLING: URL-based permission errors need authentication url_auth_categories = [ ErrorCategory.PERMISSION_DENIED_URL, @@ -1623,19 +1688,20 @@ def diagnose_and_fix( ErrorCategory.ACCESS_DENIED_API, ErrorCategory.LOGIN_REQUIRED, ] - + if diagnosis.category in url_auth_categories: - console.print(f"[cyan]🌐 URL-based access error detected - handling authentication[/cyan]") - + console.print( + "[cyan]🌐 URL-based access error detected - handling authentication[/cyan]" + ) + auth_success, auth_message = self.handle_url_authentication( - entry.original_command, - diagnosis + entry.original_command, diagnosis ) - + if auth_success: # Re-test the original command after login - console.print(f"\n[cyan]📋 Testing original command after login...[/cyan]") - + console.print("\n[cyan]📋 Testing original command after login...[/cyan]") + test_result = subprocess.run( entry.original_command, shell=True, @@ -1643,9 +1709,9 @@ def diagnose_and_fix( text=True, timeout=120, ) - + if test_result.returncode == 0: - console.print(f"[green]✓ Command succeeded after authentication![/green]") + console.print("[green]✓ Command succeeded after authentication![/green]") self.pop_error() if not self.error_stack: return True, f"Fixed via authentication: {auth_message}" @@ -1653,42 +1719,40 @@ def diagnose_and_fix( else: # Different error after login entry.error = test_result.stderr.strip() - console.print(f"[yellow]⚠ New error after login, continuing diagnosis...[/yellow]") + console.print( + "[yellow]⚠ New error after login, continuing diagnosis...[/yellow]" + ) continue else: console.print(f"[yellow]⚠ Authentication failed: {auth_message}[/yellow]") # Continue with normal fix flow - + # Step 2: Generate fix plan - fix_plan = self.generate_fix_plan( - entry.original_command, - entry.intent, - diagnosis - ) + fix_plan = self.generate_fix_plan(entry.original_command, entry.intent, diagnosis) entry.fix_plan = fix_plan - + # Step 3: Resolve variables resolved_vars = self.resolve_variables( - fix_plan, + fix_plan, original_query, entry.original_command, diagnosis, ) - + # Check if all variables resolved unresolved = fix_plan.all_variables - set(resolved_vars.keys()) if unresolved: console.print(f"[yellow]⚠ Could not resolve all variables: {unresolved}[/yellow]") # Continue anyway with what we have - + # Step 4: Execute fix commands results = self.execute_fix_commands(fix_plan, resolved_vars) - + # Check for errors in fix commands (Step 5) fix_errors = [r for r in results if not r.success] if fix_errors: console.print(f"\n[yellow]⚠ {len(fix_errors)} fix command(s) failed[/yellow]") - + # Push the first error back to stack for diagnosis first_error = fix_errors[0] if first_error.stderr and "Unresolved variables" not in first_error.stderr: @@ -1700,10 +1764,10 @@ def diagnose_and_fix( ) self.push_error(new_entry) continue - + # Step 6: Test original command console.print(f"\n[cyan]📋 Testing original command: {entry.original_command}[/cyan]") - + test_result = subprocess.run( entry.original_command, shell=True, @@ -1711,30 +1775,30 @@ def diagnose_and_fix( text=True, timeout=120, ) - + if test_result.returncode == 0: - console.print(f"[green]✓ Original command now succeeds![/green]") + console.print("[green]✓ Original command now succeeds![/green]") self.pop_error() - + # Check if stack is empty if not self.error_stack: return True, "All errors resolved successfully" else: new_error = test_result.stderr.strip() - console.print(f"[yellow]⚠ Original command still fails[/yellow]") - + console.print("[yellow]⚠ Original command still fails[/yellow]") + if new_error != entry.error: - console.print(f"[cyan] New error detected, updating...[/cyan]") + console.print("[cyan] New error detected, updating...[/cyan]") entry.error = new_error # Loop will continue with same entry - + # Stack empty but we didn't explicitly succeed return False, "Could not resolve all errors" # ========================================================================= # HELPERS # ========================================================================= - + def _call_llm(self, system_prompt: str, user_prompt: str) -> str: """Call the LLM and return response text.""" if self.provider == "claude": @@ -1757,52 +1821,59 @@ def _call_llm(self, system_prompt: str, user_prompt: str) -> str: return response.choices[0].message.content else: raise ValueError(f"Unsupported provider: {self.provider}") - + def _log_step(self, step_num: int, description: str) -> None: """Log a diagnosis step.""" console.print(f"\n[bold blue]Step {step_num}:[/bold blue] {description}") - + def _print_diagnosis(self, diagnosis: DiagnosisResult, command: str) -> None: """Print diagnosis result.""" table = Table(title="Error Diagnosis", show_header=False, border_style="dim") table.add_column("Field", style="bold") table.add_column("Value") - + table.add_row("Category", f"[cyan]{diagnosis.category.value}[/cyan]") table.add_row("Confidence", f"{diagnosis.confidence:.0%}") - + if diagnosis.extracted_info: info_str = ", ".join(f"{k}={v}" for k, v in diagnosis.extracted_info.items() if v) table.add_row("Extracted", info_str) - - table.add_row("Error", diagnosis.error_message[:100] + "..." if len(diagnosis.error_message) > 100 else diagnosis.error_message) - + + table.add_row( + "Error", + ( + diagnosis.error_message[:100] + "..." + if len(diagnosis.error_message) > 100 + else diagnosis.error_message + ), + ) + console.print(table) - + def _print_fix_plan(self, plan: FixPlan) -> None: """Print fix plan.""" console.print(f"\n[bold]Fix Plan:[/bold] {plan.reasoning}") - + for i, cmd in enumerate(plan.commands, 1): sudo_tag = "[sudo]" if cmd.requires_sudo else "" vars_tag = f"[vars: {', '.join(cmd.variables)}]" if cmd.variables else "" console.print(f" {i}. [cyan]{cmd.command_template}[/cyan] {sudo_tag} {vars_tag}") console.print(f" [dim]{cmd.purpose}[/dim]") - + def _print_error_stack(self) -> None: """Print current error stack.""" if not self.error_stack: console.print("[dim] Error stack: empty[/dim]") return - + tree = Tree("[bold]Error Stack[/bold]") for i, entry in enumerate(reversed(self.error_stack)): branch = tree.add(f"[{'yellow' if i == 0 else 'dim'}]{entry.original_command[:50]}[/]") branch.add(f"[dim]Category: {entry.category.value}[/dim]") branch.add(f"[dim]Attempts: {entry.fix_attempts}[/dim]") - + console.print(tree) - + def get_execution_summary(self) -> dict[str, Any]: """Get summary of all executions.""" return { @@ -1818,6 +1889,7 @@ def get_execution_summary(self) -> dict[str, Any]: # FACTORY FUNCTION # ============================================================================= + def get_diagnosis_engine( provider: str = "claude", debug: bool = False, @@ -1833,25 +1905,27 @@ def get_diagnosis_engine( if __name__ == "__main__": import sys - + console.print("[bold]Diagnosis Engine Test[/bold]\n") - + engine = get_diagnosis_engine(debug=True) - + # Test error categorization test_cases = [ ("cat /nonexistent/file", "cat: /nonexistent/file: No such file or directory"), ("docker pull ghcr.io/test/image", "Error: Non-null Username Required"), ("apt install fakepackage", "E: Unable to locate package fakepackage"), - ("nginx -t", "nginx: [emerg] unknown directive \"invalid\" in /etc/nginx/nginx.conf:10"), - ("systemctl start myservice", "Failed to start myservice.service: Unit myservice.service not found."), + ("nginx -t", 'nginx: [emerg] unknown directive "invalid" in /etc/nginx/nginx.conf:10'), + ( + "systemctl start myservice", + "Failed to start myservice.service: Unit myservice.service not found.", + ), ] - + for cmd, error in test_cases: console.print(f"\n[bold]Test:[/bold] {cmd}") console.print(f"[dim]Error: {error}[/dim]") - + diagnosis = engine.categorize_error(cmd, error) console.print(f"[green]Category: {diagnosis.category.value}[/green]") console.print("") - diff --git a/cortex/do_runner/executor.py b/cortex/do_runner/executor.py index dce6b0c7f..15769fcda 100644 --- a/cortex/do_runner/executor.py +++ b/cortex/do_runner/executor.py @@ -3,7 +3,8 @@ import os import subprocess import time -from typing import Any, Callable +from collections.abc import Callable +from typing import Any from rich.console import Console from rich.prompt import Confirm @@ -24,7 +25,7 @@ class TaskTreeExecutor: """ Executes a task tree with auto-repair capabilities. - + This handles: - Executing commands in order - Spawning repair sub-tasks when commands fail @@ -32,7 +33,7 @@ class TaskTreeExecutor: - Monitoring terminals during manual intervention - Providing detailed reasoning for failures """ - + def __init__( self, user_manager: type, @@ -46,10 +47,10 @@ def __init__( self._granted_privileges: list[str] = [] self._permission_sets_requested: int = 0 self._terminal_monitor: TerminalMonitor | None = None - + self._in_manual_mode = False self._manual_commands_executed: list[dict] = [] - + def build_tree_from_commands( self, commands: list[dict[str, str]], @@ -61,7 +62,7 @@ def build_tree_from_commands( purpose=cmd.get("purpose", ""), ) return self.tree - + def execute_tree( self, confirm_callback: Callable[[list[TaskNode]], bool] | None = None, @@ -69,7 +70,7 @@ def execute_tree( ) -> tuple[bool, str]: """ Execute the task tree with auto-repair. - + Returns: Tuple of (success, summary) """ @@ -77,14 +78,14 @@ def execute_tree( total_failed = 0 total_repaired = 0 repair_details = [] - + for root_task in self.tree.root_tasks: success, repaired = self._execute_task_with_repair( root_task, confirm_callback, notify_callback, ) - + if success: total_success += 1 if repaired: @@ -92,22 +93,24 @@ def execute_tree( else: total_failed += 1 if root_task.failure_reason: - repair_details.append(f"- {root_task.command[:40]}...: {root_task.failure_reason}") - + repair_details.append( + f"- {root_task.command[:40]}...: {root_task.failure_reason}" + ) + summary_parts = [ f"Completed: {total_success}", f"Failed: {total_failed}", ] if total_repaired > 0: summary_parts.append(f"Auto-repaired: {total_repaired}") - + summary = f"Tasks: {' | '.join(summary_parts)}" - + if repair_details: summary += "\n\nFailure reasons:\n" + "\n".join(repair_details) - + return total_failed == 0, summary - + def _execute_task_with_repair( self, task: TaskNode, @@ -116,33 +119,35 @@ def _execute_task_with_repair( ) -> tuple[bool, bool]: """Execute a task and attempt repair if it fails.""" was_repaired = False - + task.status = CommandStatus.RUNNING success, output, error, duration = self._execute_command(task.command) - + task.output = output task.error = error task.duration_seconds = duration - + if success: task.status = CommandStatus.SUCCESS console.print(f"[green]✓[/green] {task.purpose}") return True, False - + task.status = CommandStatus.NEEDS_REPAIR diagnosis = self._diagnose_error(task.command, error, output) task.failure_reason = diagnosis.get("description", "Unknown error") - + console.print(f"[yellow]⚠[/yellow] {task.purpose} - {diagnosis['error_type']}") console.print(f"[dim] └─ {diagnosis['description']}[/dim]") - + if diagnosis.get("can_auto_fix") and task.repair_attempts < task.max_repair_attempts: task.repair_attempts += 1 fix_commands = diagnosis.get("fix_commands", []) - + if fix_commands: - console.print(f"[cyan]🔧 Attempting auto-repair ({task.repair_attempts}/{task.max_repair_attempts})...[/cyan]") - + console.print( + f"[cyan]🔧 Attempting auto-repair ({task.repair_attempts}/{task.max_repair_attempts})...[/cyan]" + ) + new_paths = self._identify_paths_needing_privileges(fix_commands) if new_paths and confirm_callback: repair_tasks = [] @@ -154,10 +159,12 @@ def _execute_task_with_repair( reasoning=diagnosis.get("reasoning", ""), ) repair_tasks.append(repair_task) - + self._permission_sets_requested += 1 - console.print(f"\n[yellow]🔐 Permission request #{self._permission_sets_requested} for repair commands:[/yellow]") - + console.print( + f"\n[yellow]🔐 Permission request #{self._permission_sets_requested} for repair commands:[/yellow]" + ) + if confirm_callback(repair_tasks): all_repairs_success = True for repair_task in repair_tasks: @@ -166,18 +173,22 @@ def _execute_task_with_repair( ) if not repair_success: all_repairs_success = False - + if all_repairs_success: - console.print(f"[cyan]↻ Retrying original command...[/cyan]") + console.print("[cyan]↻ Retrying original command...[/cyan]") success, output, error, duration = self._execute_command(task.command) task.output = output task.error = error task.duration_seconds += duration - + if success: task.status = CommandStatus.SUCCESS - task.reasoning = f"Auto-repaired after {task.repair_attempts} attempt(s)" - console.print(f"[green]✓[/green] {task.purpose} [dim](repaired)[/dim]") + task.reasoning = ( + f"Auto-repaired after {task.repair_attempts} attempt(s)" + ) + console.print( + f"[green]✓[/green] {task.purpose} [dim](repaired)[/dim]" + ) return True, True else: all_repairs_success = True @@ -193,28 +204,32 @@ def _execute_task_with_repair( ) if not repair_success: all_repairs_success = False - + if all_repairs_success: - console.print(f"[cyan]↻ Retrying original command...[/cyan]") + console.print("[cyan]↻ Retrying original command...[/cyan]") success, output, error, duration = self._execute_command(task.command) task.output = output task.error = error task.duration_seconds += duration - + if success: task.status = CommandStatus.SUCCESS - task.reasoning = f"Auto-repaired after {task.repair_attempts} attempt(s)" + task.reasoning = ( + f"Auto-repaired after {task.repair_attempts} attempt(s)" + ) console.print(f"[green]✓[/green] {task.purpose} [dim](repaired)[/dim]") return True, True - + task.status = CommandStatus.FAILED task.reasoning = self._generate_failure_reasoning(task, diagnosis) - + if diagnosis.get("manual_suggestion") and notify_callback: - console.print(f"\n[yellow]📋 Manual intervention suggested:[/yellow]") + console.print("\n[yellow]📋 Manual intervention suggested:[/yellow]") console.print(f"[dim]{diagnosis['manual_suggestion']}[/dim]") - - if Confirm.ask("Would you like to run this manually while Cortex monitors?", default=False): + + if Confirm.ask( + "Would you like to run this manually while Cortex monitors?", default=False + ): success = self._supervise_manual_intervention( task, diagnosis.get("manual_suggestion", ""), @@ -224,22 +239,22 @@ def _execute_task_with_repair( task.status = CommandStatus.SUCCESS task.reasoning = "Completed via manual intervention with Cortex monitoring" return True, True - + console.print(f"\n[red]✗ Failed:[/red] {task.purpose}") console.print(f"[dim] Reason: {task.reasoning}[/dim]") - + return False, was_repaired - + def _execute_command(self, command: str) -> tuple[bool, str, str, float]: """Execute a command.""" start_time = time.time() - + try: needs_sudo = self._needs_sudo(command) - + if needs_sudo and not command.strip().startswith("sudo"): command = f"sudo {command}" - + result = subprocess.run( command, shell=True, @@ -247,45 +262,71 @@ def _execute_command(self, command: str) -> tuple[bool, str, str, float]: text=True, timeout=300, ) - + duration = time.time() - start_time success = result.returncode == 0 - + return success, result.stdout, result.stderr, duration - + except subprocess.TimeoutExpired: return False, "", "Command timed out after 300 seconds", time.time() - start_time except Exception as e: return False, "", str(e), time.time() - start_time - + def _needs_sudo(self, command: str) -> bool: """Determine if a command needs sudo.""" sudo_keywords = [ - "systemctl", "service", "apt", "apt-get", "dpkg", - "useradd", "usermod", "userdel", "groupadd", - "chmod", "chown", "mount", "umount", "fdisk", - "iptables", "ufw", "firewall-cmd", + "systemctl", + "service", + "apt", + "apt-get", + "dpkg", + "useradd", + "usermod", + "userdel", + "groupadd", + "chmod", + "chown", + "mount", + "umount", + "fdisk", + "iptables", + "ufw", + "firewall-cmd", ] - + system_paths = ["/etc/", "/var/", "/usr/", "/opt/", "/sys/", "/proc/"] - + cmd_parts = command.strip().split() if not cmd_parts: return False - + base_cmd = cmd_parts[0] - + if base_cmd in sudo_keywords: return True - + for part in cmd_parts: for path in system_paths: if path in part: - if any(op in command for op in [">", ">>", "cp ", "mv ", "rm ", "mkdir ", "touch ", "sed ", "tee "]): + if any( + op in command + for op in [ + ">", + ">>", + "cp ", + "mv ", + "rm ", + "mkdir ", + "touch ", + "sed ", + "tee ", + ] + ): return True - + return False - + def _diagnose_error( self, command: str, @@ -295,9 +336,10 @@ def _diagnose_error( """Diagnose why a command failed and suggest repairs.""" error_lower = stderr.lower() combined = (stderr + stdout).lower() - + if "permission denied" in error_lower: import re + path_match = None path_patterns = [ r"cannot (?:create|open|access|stat|remove|modify) (?:regular file |directory )?['\"]?([^'\":\n]+)['\"]?", @@ -309,21 +351,24 @@ def _diagnose_error( if match: path_match = match.group(1).strip() break - + return { "error_type": "Permission Denied", "description": f"Insufficient permissions to access: {path_match or 'unknown path'}", "can_auto_fix": True, - "fix_commands": [f"sudo {command}"] if not command.strip().startswith("sudo") else [], + "fix_commands": ( + [f"sudo {command}"] if not command.strip().startswith("sudo") else [] + ), "manual_suggestion": f"Run with sudo: sudo {command}", "reasoning": f"The command tried to access '{path_match or 'a protected resource'}' without sufficient privileges.", } - + if "no such file or directory" in error_lower: import re + path_match = re.search(r"['\"]?([^'\"\n]+)['\"]?: [Nn]o such file", stderr) missing_path = path_match.group(1) if path_match else None - + if missing_path: parent_dir = os.path.dirname(missing_path) if parent_dir: @@ -335,7 +380,7 @@ def _diagnose_error( "manual_suggestion": f"Create the directory: sudo mkdir -p {parent_dir}", "reasoning": f"The target path '{missing_path}' doesn't exist.", } - + return { "error_type": "File Not Found", "description": "A required file or directory does not exist", @@ -344,48 +389,53 @@ def _diagnose_error( "manual_suggestion": "Check the file path and ensure it exists", "reasoning": "The command references a non-existent path.", } - + if "command not found" in error_lower or "not found" in error_lower: import re + cmd_match = re.search(r"(\w+): (?:command )?not found", stderr) missing_cmd = cmd_match.group(1) if cmd_match else None - + return { "error_type": "Command Not Found", "description": f"Command not installed: {missing_cmd or 'unknown'}", "can_auto_fix": bool(missing_cmd), "fix_commands": [f"sudo apt install -y {missing_cmd}"] if missing_cmd else [], - "manual_suggestion": f"Install: sudo apt install {missing_cmd}" if missing_cmd else "Install the required command", + "manual_suggestion": ( + f"Install: sudo apt install {missing_cmd}" + if missing_cmd + else "Install the required command" + ), "reasoning": f"The command '{missing_cmd or 'required'}' is not installed.", } - + return { "error_type": "Unknown Error", "description": stderr[:200] if stderr else "Command failed with no error output", "can_auto_fix": False, "fix_commands": [], "manual_suggestion": f"Review the error and try: {command}", - "reasoning": f"The command failed with an unexpected error.", + "reasoning": "The command failed with an unexpected error.", } - + def _generate_failure_reasoning(self, task: TaskNode, diagnosis: dict) -> str: """Generate detailed reasoning for why a task failed.""" parts = [ f"Error type: {diagnosis.get('error_type', 'Unknown')}", f"Description: {diagnosis.get('description', 'No details available')}", ] - + if task.repair_attempts > 0: parts.append(f"Repair attempts: {task.repair_attempts} (all failed)") - + if diagnosis.get("reasoning"): parts.append(f"Analysis: {diagnosis['reasoning']}") - + if diagnosis.get("manual_suggestion"): parts.append(f"Suggestion: {diagnosis['manual_suggestion']}") - + return " | ".join(parts) - + def _identify_paths_needing_privileges(self, commands: list[str]) -> list[str]: """Identify paths in commands that need privilege grants.""" paths = [] @@ -395,7 +445,7 @@ def _identify_paths_needing_privileges(self, commands: list[str]) -> list[str]: if part.startswith("/") and self.paths_manager.is_protected(part): paths.append(part) return paths - + def _supervise_manual_intervention( self, task: TaskNode, @@ -404,28 +454,28 @@ def _supervise_manual_intervention( ) -> bool: """Supervise manual command execution with terminal monitoring.""" self._in_manual_mode = True - + console.print("\n[bold cyan]═══ Manual Intervention Mode ═══[/bold cyan]") - console.print(f"\n[yellow]Run this command in another terminal:[/yellow]") + console.print("\n[yellow]Run this command in another terminal:[/yellow]") console.print(f"[bold]{instruction}[/bold]") - + self._terminal_monitor = TerminalMonitor( notification_callback=lambda title, msg: notify_callback(title, msg) ) self._terminal_monitor.start() - + console.print("\n[dim]Cortex is now monitoring your terminal for issues...[/dim]") - + try: while True: choice = Confirm.ask( "\nHave you completed the manual step?", default=True, ) - + if choice: success = Confirm.ask("Was it successful?", default=True) - + if success: console.print("[green]✓ Manual step completed successfully[/green]") return True @@ -434,30 +484,30 @@ def _supervise_manual_intervention( console.print("1. Permission denied") console.print("2. File not found") console.print("3. Other error") - + try: error_choice = int(input("Enter choice (1-3): ")) except ValueError: error_choice = 3 - + if error_choice == 1: console.print(f"[yellow]Try: sudo {instruction}[/yellow]") elif error_choice == 2: console.print("[yellow]Check the file path exists[/yellow]") else: console.print("[yellow]Describe the error and try again[/yellow]") - + continue_trying = Confirm.ask("Continue trying?", default=True) if not continue_trying: return False else: console.print("[dim]Take your time. Cortex is still monitoring...[/dim]") - + finally: self._in_manual_mode = False if self._terminal_monitor: self._terminal_monitor.stop() - + def get_tree_summary(self) -> dict: """Get a summary of the task tree execution.""" return { @@ -465,4 +515,3 @@ def get_tree_summary(self) -> dict: "permission_requests": self._permission_sets_requested, "manual_commands": self._manual_commands_executed, } - diff --git a/cortex/do_runner/handler.py b/cortex/do_runner/handler.py index fbf954b23..17531d8bf 100644 --- a/cortex/do_runner/handler.py +++ b/cortex/do_runner/handler.py @@ -7,8 +7,9 @@ import subprocess import sys import time +from collections.abc import Callable from pathlib import Path -from typing import Any, Callable +from typing import Any from rich.console import Console from rich.panel import Panel @@ -38,7 +39,7 @@ class DoHandler: """Main handler for the --do functionality.""" - + def __init__(self, llm_callback: Callable[[str], dict] | None = None): self.db = DoRunDatabase() self.paths_manager = ProtectedPathsManager() @@ -46,19 +47,19 @@ def __init__(self, llm_callback: Callable[[str], dict] | None = None): self.current_run: DoRun | None = None self._granted_privileges: list[str] = [] self.llm_callback = llm_callback - + self._task_tree: TaskTree | None = None self._permission_requests_count = 0 - + self._terminal_monitor: TerminalMonitor | None = None - + # Manual intervention tracking self._expected_manual_commands: list[str] = [] self._completed_manual_commands: list[str] = [] - + # Session tracking self.current_session_id: str | None = None - + # Initialize helper classes self._diagnoser = ErrorDiagnoser() self._auto_fixer = AutoFixer(llm_callback=llm_callback) @@ -66,23 +67,27 @@ def __init__(self, llm_callback: Callable[[str], dict] | None = None): self._conflict_detector = ConflictDetector() self._verification_runner = VerificationRunner() self._file_analyzer = FileUsefulnessAnalyzer() - + # Execution state tracking for interruption handling self._current_process: subprocess.Popen | None = None self._current_command: str | None = None self._executed_commands: list[dict] = [] self._interrupted = False - self._interrupted_command: str | None = None # Track which command was interrupted for retry - self._remaining_commands: list[tuple[str, str, list[str]]] = [] # Commands that weren't executed + self._interrupted_command: str | None = ( + None # Track which command was interrupted for retry + ) + self._remaining_commands: list[tuple[str, str, list[str]]] = ( + [] + ) # Commands that weren't executed self._original_sigtstp = None self._original_sigint = None - + def cleanup(self) -> None: """Clean up any running threads or resources.""" if self._terminal_monitor: self._terminal_monitor.stop() self._terminal_monitor = None - + def _is_json_like(self, text: str) -> bool: """Check if text looks like raw JSON that shouldn't be displayed.""" if not text: @@ -90,31 +95,31 @@ def _is_json_like(self, text: str) -> bool: text = text.strip() # Check for obvious JSON patterns json_indicators = [ - text.startswith(('{', '[', ']', '}')), + text.startswith(("{", "[", "]", "}")), '"response_type"' in text, '"do_commands"' in text, '"command":' in text, '"requires_sudo"' in text, '{"' in text and '":' in text, - text.count('"') > 6 and ':' in text, # Multiple quoted keys + text.count('"') > 6 and ":" in text, # Multiple quoted keys ] return any(json_indicators) - + def _setup_signal_handlers(self): """Set up signal handlers for Ctrl+Z and Ctrl+C.""" self._original_sigtstp = signal.signal(signal.SIGTSTP, self._handle_interrupt) self._original_sigint = signal.signal(signal.SIGINT, self._handle_interrupt) - + def _restore_signal_handlers(self): """Restore original signal handlers.""" if self._original_sigtstp is not None: signal.signal(signal.SIGTSTP, self._original_sigtstp) if self._original_sigint is not None: signal.signal(signal.SIGINT, self._original_sigint) - + def _handle_interrupt(self, signum, frame): """Handle Ctrl+Z (SIGTSTP) or Ctrl+C (SIGINT) to stop current command only. - + This does NOT exit the session - it only stops the currently executing command. The session continues so the user can decide what to do next. """ @@ -122,10 +127,10 @@ def _handle_interrupt(self, signum, frame): # Store the interrupted command for potential retry self._interrupted_command = self._current_command signal_name = "Ctrl+Z" if signum == signal.SIGTSTP else "Ctrl+C" - + console.print() console.print(f"[yellow]⚠ {signal_name} detected - Stopping current command...[/yellow]") - + # Kill current subprocess if running if self._current_process and self._current_process.poll() is None: try: @@ -138,27 +143,31 @@ def _handle_interrupt(self, signum, frame): console.print(f"[yellow] Stopped: {self._current_command}[/yellow]") except Exception as e: console.print(f"[dim] Error stopping process: {e}[/dim]") - + # Note: We do NOT raise KeyboardInterrupt here # The session continues - only the current command is stopped - + def _track_command_start(self, command: str, process: subprocess.Popen | None = None): """Track when a command starts executing.""" self._current_command = command self._current_process = process - - def _track_command_complete(self, command: str, success: bool, output: str = "", error: str = ""): + + def _track_command_complete( + self, command: str, success: bool, output: str = "", error: str = "" + ): """Track when a command completes.""" - self._executed_commands.append({ - "command": command, - "success": success, - "output": output[:500] if output else "", - "error": error[:200] if error else "", - "timestamp": datetime.datetime.now().isoformat(), - }) + self._executed_commands.append( + { + "command": command, + "success": success, + "output": output[:500] if output else "", + "error": error[:200] if error else "", + "timestamp": datetime.datetime.now().isoformat(), + } + ) self._current_command = None self._current_process = None - + def _reset_execution_state(self): """Reset execution tracking state for a new run.""" self._current_process = None @@ -167,81 +176,88 @@ def _reset_execution_state(self): self._interrupted = False self._interrupted_command = None self._remaining_commands = [] - + def __del__(self): """Destructor to ensure cleanup.""" self.cleanup() - + def _show_expandable_output(self, output: str, command: str) -> None: """Show output with expand/collapse capability.""" from rich.panel import Panel - from rich.text import Text from rich.prompt import Prompt - - lines = output.split('\n') + from rich.text import Text + + lines = output.split("\n") total_lines = len(lines) - + # Always show first 3 lines as preview preview_count = 3 - + if total_lines <= preview_count + 2: # Small output - just show it all - console.print(Panel( - output, - title=f"[dim]Output[/dim]", - title_align="left", - border_style="dim", - padding=(0, 1), - )) + console.print( + Panel( + output, + title="[dim]Output[/dim]", + title_align="left", + border_style="dim", + padding=(0, 1), + ) + ) return - + # Show collapsed preview - preview = '\n'.join(lines[:preview_count]) + preview = "\n".join(lines[:preview_count]) remaining = total_lines - preview_count - + content = Text() content.append(preview) content.append(f"\n\n[dim]─── {remaining} more lines hidden ───[/dim]", style="dim") - - console.print(Panel( - content, - title=f"[dim]Output ({total_lines} lines)[/dim]", - subtitle="[dim italic]Press Enter to continue, 'e' to expand[/dim italic]", - subtitle_align="right", - title_align="left", - border_style="dim", - padding=(0, 1), - )) - + + console.print( + Panel( + content, + title=f"[dim]Output ({total_lines} lines)[/dim]", + subtitle="[dim italic]Press Enter to continue, 'e' to expand[/dim italic]", + subtitle_align="right", + title_align="left", + border_style="dim", + padding=(0, 1), + ) + ) + # Quick check if user wants to expand try: response = input().strip().lower() - if response == 'e': + if response == "e": # Show full output - console.print(Panel( - output, - title=f"[dim]Full Output ({total_lines} lines)[/dim]", - title_align="left", - border_style="green", - padding=(0, 1), - )) + console.print( + Panel( + output, + title=f"[dim]Full Output ({total_lines} lines)[/dim]", + title_align="left", + border_style="green", + padding=(0, 1), + ) + ) except (EOFError, KeyboardInterrupt): pass - + # Initialize notification manager try: from cortex.notification_manager import NotificationManager + self.notifier = NotificationManager() except ImportError: self.notifier = None - + def _send_notification(self, title: str, message: str, level: str = "normal"): """Send a desktop notification.""" if self.notifier: self.notifier.send(title, message, level=level) else: console.print(f"[bold yellow]🔔 {title}:[/bold yellow] {message}") - + def setup_cortex_user(self) -> bool: """Ensure the cortex user exists.""" if not self.user_manager.user_exists(): @@ -253,14 +269,13 @@ def setup_cortex_user(self) -> bool: console.print(f"[red]✗ {message}[/red]") return success return True - + def analyze_commands_for_protected_paths( - self, - commands: list[tuple[str, str]] + self, commands: list[tuple[str, str]] ) -> list[tuple[str, str, list[str]]]: """Analyze commands and identify protected paths they access.""" results = [] - + for command, purpose in commands: protected = [] parts = command.split() @@ -269,24 +284,23 @@ def analyze_commands_for_protected_paths( path = os.path.expanduser(part) if self.paths_manager.is_protected(path): protected.append(path) - + results.append((command, purpose, protected)) - + return results - + def request_user_confirmation( self, commands: list[tuple[str, str, list[str]]], ) -> bool: """Show commands to user and request confirmation with improved visual UI.""" + from rich import box + from rich.columns import Columns from rich.panel import Panel - from rich.table import Table from rich.text import Text - from rich.columns import Columns - from rich import box - + console.print() - + # Create a table for commands cmd_table = Table( show_header=True, @@ -299,108 +313,169 @@ def request_user_confirmation( cmd_table.add_column("#", style="bold cyan", width=3, justify="right") cmd_table.add_column("Command", style="bold white") cmd_table.add_column("Purpose", style="dim italic") - + all_protected = [] for i, (cmd, purpose, protected) in enumerate(commands, 1): # Truncate long commands for display cmd_display = cmd if len(cmd) <= 60 else cmd[:57] + "..." purpose_display = purpose if len(purpose) <= 50 else purpose[:47] + "..." - + # Add protected path indicator if protected: cmd_display = f"{cmd_display} [yellow]⚠[/yellow]" all_protected.extend(protected) - + cmd_table.add_row(str(i), cmd_display, purpose_display) - + # Create header header_text = Text() header_text.append("🔐 ", style="bold") header_text.append("Permission Required", style="bold white") - header_text.append(f" ({len(commands)} command{'s' if len(commands) > 1 else ''})", style="dim") - - console.print(Panel( - cmd_table, - title=header_text, - title_align="left", - border_style="blue", - padding=(1, 1), - )) - + header_text.append( + f" ({len(commands)} command{'s' if len(commands) > 1 else ''})", style="dim" + ) + + console.print( + Panel( + cmd_table, + title=header_text, + title_align="left", + border_style="blue", + padding=(1, 1), + ) + ) + # Show protected paths if any if all_protected: protected_set = set(all_protected) protected_text = Text() protected_text.append("⚠ Protected paths: ", style="bold yellow") protected_text.append(", ".join(protected_set), style="dim yellow") - console.print(Panel( - protected_text, - border_style="yellow", - padding=(0, 1), - expand=False, - )) - + console.print( + Panel( + protected_text, + border_style="yellow", + padding=(0, 1), + expand=False, + ) + ) + console.print() return Confirm.ask("[bold]Proceed?[/bold]", default=False) - + def _needs_sudo(self, cmd: str, protected_paths: list[str]) -> bool: """Determine if a command needs sudo to execute.""" sudo_commands = [ - "systemctl", "service", "apt", "apt-get", "dpkg", - "mount", "umount", "fdisk", "mkfs", "chown", "chmod", - "useradd", "userdel", "usermod", "groupadd", "groupdel", + "systemctl", + "service", + "apt", + "apt-get", + "dpkg", + "mount", + "umount", + "fdisk", + "mkfs", + "chown", + "chmod", + "useradd", + "userdel", + "usermod", + "groupadd", + "groupdel", ] - + cmd_parts = cmd.split() if not cmd_parts: return False - + base_cmd = cmd_parts[0] - + if base_cmd in sudo_commands: return True - + if protected_paths: return True - + if any(p in cmd for p in ["/etc/", "/var/lib/", "/usr/", "/opt/", "/root/"]): return True - + return False - + # Commands that benefit from streaming output (long-running with progress) STREAMING_COMMANDS = [ - "docker pull", "docker push", "docker build", - "apt install", "apt-get install", "apt update", "apt-get update", "apt upgrade", "apt-get upgrade", - "pip install", "pip3 install", "pip download", "pip3 download", - "npm install", "npm ci", "yarn install", "yarn add", - "cargo build", "cargo install", - "go build", "go install", "go get", - "gem install", "bundle install", - "wget", "curl -o", "curl -O", - "git clone", "git pull", "git fetch", - "make", "cmake", "ninja", - "rsync", "scp", + "docker pull", + "docker push", + "docker build", + "apt install", + "apt-get install", + "apt update", + "apt-get update", + "apt upgrade", + "apt-get upgrade", + "pip install", + "pip3 install", + "pip download", + "pip3 download", + "npm install", + "npm ci", + "yarn install", + "yarn add", + "cargo build", + "cargo install", + "go build", + "go install", + "go get", + "gem install", + "bundle install", + "wget", + "curl -o", + "curl -O", + "git clone", + "git pull", + "git fetch", + "make", + "cmake", + "ninja", + "rsync", + "scp", ] - + # Interactive commands that need a TTY - cannot be run in background/automated INTERACTIVE_COMMANDS = [ - "docker exec -it", "docker exec -ti", "docker run -it", "docker run -ti", + "docker exec -it", + "docker exec -ti", + "docker run -it", + "docker run -ti", "docker attach", - "ollama run", "ollama chat", - "ssh ", - "bash -i", "sh -i", "zsh -i", - "vi ", "vim ", "nano ", "emacs ", - "python -i", "python3 -i", "ipython", "node -i", - "mysql -u", "psql -U", "mongo ", "redis-cli", - "htop", "top -i", "less ", "more ", + "ollama run", + "ollama chat", + "ssh ", + "bash -i", + "sh -i", + "zsh -i", + "vi ", + "vim ", + "nano ", + "emacs ", + "python -i", + "python3 -i", + "ipython", + "node -i", + "mysql -u", + "psql -U", + "mongo ", + "redis-cli", + "htop", + "top -i", + "less ", + "more ", ] - + def _should_stream_output(self, cmd: str) -> bool: """Check if command should use streaming output.""" cmd_lower = cmd.lower() return any(streaming_cmd in cmd_lower for streaming_cmd in self.STREAMING_COMMANDS) - + def _is_interactive_command(self, cmd: str) -> bool: """Check if command requires interactive TTY and cannot be automated.""" cmd_lower = cmd.lower() @@ -408,31 +483,35 @@ def _is_interactive_command(self, cmd: str) -> bool: if any(interactive in cmd_lower for interactive in self.INTERACTIVE_COMMANDS): return True # Check for -it or -ti flags in docker commands - if "docker" in cmd_lower and (" -it " in cmd_lower or " -ti " in cmd_lower or - cmd_lower.endswith(" -it") or cmd_lower.endswith(" -ti")): + if "docker" in cmd_lower and ( + " -it " in cmd_lower + or " -ti " in cmd_lower + or cmd_lower.endswith(" -it") + or cmd_lower.endswith(" -ti") + ): return True return False - + # Timeout settings by command type (in seconds) COMMAND_TIMEOUTS = { - "docker pull": 1800, # 30 minutes for large images - "docker push": 1800, # 30 minutes for large images - "docker build": 3600, # 1 hour for complex builds - "apt install": 900, # 15 minutes + "docker pull": 1800, # 30 minutes for large images + "docker push": 1800, # 30 minutes for large images + "docker build": 3600, # 1 hour for complex builds + "apt install": 900, # 15 minutes "apt-get install": 900, - "apt update": 300, # 5 minutes + "apt update": 300, # 5 minutes "apt-get update": 300, - "apt upgrade": 1800, # 30 minutes + "apt upgrade": 1800, # 30 minutes "apt-get upgrade": 1800, - "pip install": 600, # 10 minutes + "pip install": 600, # 10 minutes "pip3 install": 600, - "npm install": 900, # 15 minutes + "npm install": 900, # 15 minutes "yarn install": 900, - "git clone": 600, # 10 minutes - "make": 1800, # 30 minutes + "git clone": 600, # 10 minutes + "make": 1800, # 30 minutes "cargo build": 1800, } - + def _get_command_timeout(self, cmd: str) -> int: """Get appropriate timeout for a command.""" cmd_lower = cmd.lower() @@ -440,7 +519,7 @@ def _get_command_timeout(self, cmd: str) -> int: if cmd_pattern in cmd_lower: return timeout return 600 # Default 10 minutes for streaming commands - + def _execute_with_streaming( self, cmd: str, @@ -450,18 +529,20 @@ def _execute_with_streaming( """Execute a command with real-time output streaming.""" import select import sys - + # Auto-detect timeout if not specified if timeout is None: timeout = self._get_command_timeout(cmd) - + # Show timeout info for long operations if timeout > 300: - console.print(f"[dim] ⏱️ Timeout: {timeout // 60} minutes (large operation)[/dim]") - + console.print( + f"[dim] ⏱️ Timeout: {timeout // 60} minutes (large operation)[/dim]" + ) + stdout_lines = [] stderr_lines = [] - + try: if needs_sudo: process = subprocess.Popen( @@ -480,17 +561,22 @@ def _execute_with_streaming( text=True, bufsize=1, ) - + # Use select for non-blocking reads on both stdout and stderr import time + start_time = time.time() - + while True: # Check timeout if time.time() - start_time > timeout: process.kill() - return False, "\n".join(stdout_lines), f"Command timed out after {timeout} seconds" - + return ( + False, + "\n".join(stdout_lines), + f"Command timed out after {timeout} seconds", + ) + # Check if process has finished if process.poll() is not None: # Read any remaining output @@ -504,11 +590,11 @@ def _execute_with_streaming( stderr_lines.append(line) self._print_progress_line(line, is_stderr=True) break - + # Try to read from stdout/stderr without blocking try: readable, _, _ = select.select([process.stdout, process.stderr], [], [], 0.1) - + for stream in readable: line = stream.readline() if line: @@ -522,31 +608,53 @@ def _execute_with_streaming( except (ValueError, OSError): # Stream closed break - + return ( process.returncode == 0, "\n".join(stdout_lines).strip(), "\n".join(stderr_lines).strip(), ) - + except Exception as e: return False, "\n".join(stdout_lines), str(e) - + def _print_progress_line(self, line: str, is_stderr: bool = False) -> None: """Print a progress line with appropriate formatting.""" if not line.strip(): return - + line = line.strip() - + # Docker pull progress patterns - if any(p in line for p in ["Pulling from", "Digest:", "Status:", "Pull complete", "Downloading", "Extracting"]): + if any( + p in line + for p in [ + "Pulling from", + "Digest:", + "Status:", + "Pull complete", + "Downloading", + "Extracting", + ] + ): console.print(f"[dim] 📦 {line}[/dim]") # Docker build progress elif line.startswith("Step ") or line.startswith("---> "): console.print(f"[dim] 🔨 {line}[/dim]") # apt progress patterns - elif any(p in line for p in ["Get:", "Hit:", "Fetched", "Reading", "Building", "Setting up", "Processing", "Unpacking"]): + elif any( + p in line + for p in [ + "Get:", + "Hit:", + "Fetched", + "Reading", + "Building", + "Setting up", + "Processing", + "Unpacking", + ] + ): console.print(f"[dim] 📦 {line}[/dim]") # pip progress patterns elif any(p in line for p in ["Collecting", "Downloading", "Installing", "Successfully"]): @@ -555,43 +663,44 @@ def _print_progress_line(self, line: str, is_stderr: bool = False) -> None: elif any(p in line for p in ["npm", "added", "packages", "audited"]): console.print(f"[dim] 📦 {line}[/dim]") # git progress patterns - elif any(p in line for p in ["Cloning", "remote:", "Receiving", "Resolving", "Checking out"]): + elif any( + p in line for p in ["Cloning", "remote:", "Receiving", "Resolving", "Checking out"] + ): console.print(f"[dim] 📦 {line}[/dim]") # wget/curl progress elif "%" in line and any(c.isdigit() for c in line): # Progress percentage - update in place console.print(f"[dim] ⬇️ {line[:80]}[/dim]", end="\r") # Error lines - elif is_stderr and any(p in line.lower() for p in ["error", "fail", "denied", "cannot", "unable"]): + elif is_stderr and any( + p in line.lower() for p in ["error", "fail", "denied", "cannot", "unable"] + ): console.print(f"[yellow] ⚠ {line}[/yellow]") # Truncate very long lines elif len(line) > 100: console.print(f"[dim] {line[:100]}...[/dim]") - + def _execute_single_command( - self, - cmd: str, - needs_sudo: bool, - timeout: int = 120 + self, cmd: str, needs_sudo: bool, timeout: int = 120 ) -> tuple[bool, str, str]: """Execute a single command with proper privilege handling and interruption support.""" # Check for interactive commands that need a TTY if self._is_interactive_command(cmd): return self._handle_interactive_command(cmd, needs_sudo) - + # Use streaming for long-running commands if self._should_stream_output(cmd): return self._execute_with_streaming(cmd, needs_sudo, timeout=300) - + # Track command start self._track_command_start(cmd) - + try: # Flush output before sudo to handle password prompts cleanly if needs_sudo: sys.stdout.flush() sys.stderr.flush() - + # Use Popen for interruptibility if needs_sudo: process = subprocess.Popen( @@ -608,73 +717,86 @@ def _execute_single_command( stderr=subprocess.PIPE, text=True, ) - + # Store process for interruption handling self._current_process = process - + try: stdout, stderr = process.communicate(timeout=timeout) - + # Check if interrupted during execution if self._interrupted: - self._track_command_complete(cmd, False, stdout or "", "Command interrupted by user") + self._track_command_complete( + cmd, False, stdout or "", "Command interrupted by user" + ) return False, stdout.strip() if stdout else "", "Command interrupted by user" - + success = process.returncode == 0 - + # Track completion self._track_command_complete(cmd, success, stdout, stderr) - + # After sudo, reset console state if needs_sudo: - sys.stdout.write('') # Force flush + sys.stdout.write("") # Force flush sys.stdout.flush() - + return (success, stdout.strip(), stderr.strip()) - + except subprocess.TimeoutExpired: process.kill() stdout, stderr = process.communicate() - self._track_command_complete(cmd, False, stdout, f"Command timed out after {timeout} seconds") - return False, stdout.strip() if stdout else "", f"Command timed out after {timeout} seconds" + self._track_command_complete( + cmd, False, stdout, f"Command timed out after {timeout} seconds" + ) + return ( + False, + stdout.strip() if stdout else "", + f"Command timed out after {timeout} seconds", + ) except Exception as e: self._track_command_complete(cmd, False, "", str(e)) return False, "", str(e) - - def _handle_interactive_command( - self, - cmd: str, - needs_sudo: bool - ) -> tuple[bool, str, str]: + + def _handle_interactive_command(self, cmd: str, needs_sudo: bool) -> tuple[bool, str, str]: """Handle interactive commands that need a TTY. - + These commands cannot be run in the background - they need user interaction. We'll either: 1. Try to open in a new terminal window 2. Or inform the user to run it manually """ console.print() - console.print(f"[yellow]⚡ Interactive command detected[/yellow]") - console.print(f"[dim] This command requires a terminal for interaction.[/dim]") + console.print("[yellow]⚡ Interactive command detected[/yellow]") + console.print("[dim] This command requires a terminal for interaction.[/dim]") console.print() - + full_cmd = f"sudo {cmd}" if needs_sudo else cmd - + # Try to detect if we can open a new terminal terminal_cmds = [ - ("gnome-terminal", f'gnome-terminal -- bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), - ("konsole", f'konsole -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), + ( + "gnome-terminal", + f'gnome-terminal -- bash -c "{full_cmd}; echo; echo Press Enter to close...; read"', + ), + ( + "konsole", + f'konsole -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"', + ), ("xterm", f'xterm -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), - ("x-terminal-emulator", f'x-terminal-emulator -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"'), + ( + "x-terminal-emulator", + f'x-terminal-emulator -e bash -c "{full_cmd}; echo; echo Press Enter to close...; read"', + ), ] - + # Check which terminal is available for term_name, term_cmd in terminal_cmds: if shutil.which(term_name): console.print(f"[cyan]🖥️ Opening in new terminal window ({term_name})...[/cyan]") console.print(f"[dim] Command: {full_cmd}[/dim]") console.print() - + try: # Start the terminal in background subprocess.Popen( @@ -687,19 +809,21 @@ def _handle_interactive_command( except Exception as e: console.print(f"[yellow] ⚠ Could not open terminal: {e}[/yellow]") break - + # Fallback: ask user to run manually - console.print(f"[bold cyan]📋 Please run this command manually in another terminal:[/bold cyan]") + console.print( + "[bold cyan]📋 Please run this command manually in another terminal:[/bold cyan]" + ) console.print() console.print(f" [green]{full_cmd}[/green]") console.print() - console.print(f"[dim] This command needs interactive input (TTY).[/dim]") - console.print(f"[dim] Cortex cannot capture its output automatically.[/dim]") + console.print("[dim] This command needs interactive input (TTY).[/dim]") + console.print("[dim] Cortex cannot capture its output automatically.[/dim]") console.print() - + # Return special status indicating manual run needed return True, "INTERACTIVE_COMMAND_MANUAL", f"Interactive command - run manually: {full_cmd}" - + def execute_commands_as_cortex( self, commands: list[tuple[str, str, list[str]]], @@ -715,124 +839,140 @@ def execute_commands_as_cortex( session_id=self.current_session_id or "", ) self.current_run = run - + console.print() console.print("[bold cyan]🚀 Executing commands with conflict detection...[/bold cyan]") console.print() - + # Phase 1: Conflict Detection console.print("[dim]Checking for conflicts...[/dim]") - + cleanup_commands = [] for cmd, purpose, protected in commands: conflict = self._conflict_detector.check_for_conflicts(cmd, purpose) if conflict["has_conflict"]: - console.print(f"[yellow] ⚠ {conflict['conflict_type']}: {conflict['suggestion']}[/yellow]") + console.print( + f"[yellow] ⚠ {conflict['conflict_type']}: {conflict['suggestion']}[/yellow]" + ) if conflict["cleanup_commands"]: cleanup_commands.extend(conflict["cleanup_commands"]) - + if cleanup_commands: console.print("[dim]Running cleanup commands...[/dim]") for cleanup_cmd in cleanup_commands: self._execute_single_command(cleanup_cmd, needs_sudo=True) - + console.print() - + all_protected = set() for _, _, protected in commands: all_protected.update(protected) - + if all_protected: console.print(f"[dim]📁 Protected paths involved: {', '.join(all_protected)}[/dim]") console.print() - + # Phase 2: Execute Commands from rich.panel import Panel from rich.text import Text - + for i, (cmd, purpose, protected) in enumerate(commands, 1): # Create a visually distinct panel for each command cmd_header = Text() cmd_header.append(f"[{i}/{len(commands)}] ", style="bold white on blue") cmd_header.append(f" {cmd}", style="bold cyan") - + console.print() - console.print(Panel( - f"[bold cyan]{cmd}[/bold cyan]\n[dim]└─ {purpose}[/dim]", - title=f"[bold white] Command {i}/{len(commands)} [/bold white]", - title_align="left", - border_style="blue", - padding=(0, 1), - )) - - file_check = self._file_analyzer.check_file_exists_and_usefulness(cmd, purpose, user_query) - + console.print( + Panel( + f"[bold cyan]{cmd}[/bold cyan]\n[dim]└─ {purpose}[/dim]", + title=f"[bold white] Command {i}/{len(commands)} [/bold white]", + title_align="left", + border_style="blue", + padding=(0, 1), + ) + ) + + file_check = self._file_analyzer.check_file_exists_and_usefulness( + cmd, purpose, user_query + ) + if file_check["recommendations"]: self._file_analyzer.apply_file_recommendations(file_check["recommendations"]) - + cmd_log = CommandLog( command=cmd, purpose=purpose, timestamp=datetime.datetime.now().isoformat(), status=CommandStatus.RUNNING, ) - + start_time = time.time() needs_sudo = self._needs_sudo(cmd, protected) - + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) - + if not success: diagnosis = self._diagnoser.diagnose_error(cmd, stderr) - + # Create error panel for visual grouping error_info = ( f"[bold red]⚠ {diagnosis['description']}[/bold red]\n" f"[dim]Type: {diagnosis['error_type']} | Category: {diagnosis.get('category', 'unknown')}[/dim]" ) - console.print(Panel( - error_info, - title="[bold red] ❌ Error Detected [/bold red]", - title_align="left", - border_style="red", - padding=(0, 1), - )) - + console.print( + Panel( + error_info, + title="[bold red] ❌ Error Detected [/bold red]", + title_align="left", + border_style="red", + padding=(0, 1), + ) + ) + # Check if this is a login/credential required error if diagnosis.get("category") == "login_required": - console.print(Panel( - "[bold cyan]🔐 Authentication required for this command[/bold cyan]", - border_style="cyan", - padding=(0, 1), - expand=False, - )) - - login_success, login_msg = self._login_handler.handle_login(cmd, stderr) - - if login_success: - console.print(Panel( - f"[bold green]✓ {login_msg}[/bold green]\n[dim]Retrying command...[/dim]", - border_style="green", + console.print( + Panel( + "[bold cyan]🔐 Authentication required for this command[/bold cyan]", + border_style="cyan", padding=(0, 1), expand=False, - )) - - # Retry the command after successful login - success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) - - if success: - console.print(Panel( - "[bold green]✓ Command succeeded after authentication![/bold green]", + ) + ) + + login_success, login_msg = self._login_handler.handle_login(cmd, stderr) + + if login_success: + console.print( + Panel( + f"[bold green]✓ {login_msg}[/bold green]\n[dim]Retrying command...[/dim]", border_style="green", padding=(0, 1), expand=False, - )) + ) + ) + + # Retry the command after successful login + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) + + if success: + console.print( + Panel( + "[bold green]✓ Command succeeded after authentication![/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) else: - console.print(Panel( - f"[bold yellow]Command still failed after login[/bold yellow]\n[dim]{stderr[:100]}[/dim]", - border_style="yellow", - padding=(0, 1), - )) + console.print( + Panel( + f"[bold yellow]Command still failed after login[/bold yellow]\n[dim]{stderr[:100]}[/dim]", + border_style="yellow", + padding=(0, 1), + ) + ) else: console.print(f"[yellow]{login_msg}[/yellow]") else: @@ -844,142 +984,167 @@ def execute_commands_as_cortex( for key, value in diagnosis["extracted_info"].items(): if value: extra_info.append(f"[dim]{key}:[/dim] {value}") - + if extra_info: - console.print(Panel( - "\n".join(extra_info), - title="[dim] Error Details [/dim]", - title_align="left", - border_style="dim", - padding=(0, 1), - expand=False, - )) - + console.print( + Panel( + "\n".join(extra_info), + title="[dim] Error Details [/dim]", + title_align="left", + border_style="dim", + padding=(0, 1), + expand=False, + ) + ) + fixed, fix_message, fix_commands = self._auto_fixer.auto_fix_error( cmd, stderr, diagnosis, max_attempts=3 ) - + if fixed: success = True - console.print(Panel( - f"[bold green]✓ Auto-fixed:[/bold green] {fix_message}", - title="[bold green] Fix Successful [/bold green]", - title_align="left", - border_style="green", - padding=(0, 1), - expand=False, - )) + console.print( + Panel( + f"[bold green]✓ Auto-fixed:[/bold green] {fix_message}", + title="[bold green] Fix Successful [/bold green]", + title_align="left", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) _, stdout, stderr = self._execute_single_command(cmd, needs_sudo=True) else: fix_info = [] if fix_commands: - fix_info.append(f"[dim]Attempted:[/dim] {len(fix_commands)} fix command(s)") + fix_info.append( + f"[dim]Attempted:[/dim] {len(fix_commands)} fix command(s)" + ) fix_info.append(f"[bold yellow]Result:[/bold yellow] {fix_message}") - console.print(Panel( - "\n".join(fix_info), - title="[bold yellow] Fix Incomplete [/bold yellow]", - title_align="left", - border_style="yellow", - padding=(0, 1), - )) - + console.print( + Panel( + "\n".join(fix_info), + title="[bold yellow] Fix Incomplete [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + ) + ) + cmd_log.duration_seconds = time.time() - start_time cmd_log.output = stdout cmd_log.error = stderr cmd_log.status = CommandStatus.SUCCESS if success else CommandStatus.FAILED - + run.commands.append(cmd_log) run.files_accessed.extend(protected) - + if success: - console.print(Panel( - f"[bold green]✓ Success[/bold green] [dim]({cmd_log.duration_seconds:.2f}s)[/dim]", - border_style="green", - padding=(0, 1), - expand=False, - )) + console.print( + Panel( + f"[bold green]✓ Success[/bold green] [dim]({cmd_log.duration_seconds:.2f}s)[/dim]", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) if stdout: self._show_expandable_output(stdout, cmd) else: - console.print(Panel( - f"[bold red]✗ Failed[/bold red]\n[dim]{stderr[:200]}[/dim]", - border_style="red", - padding=(0, 1), - )) - + console.print( + Panel( + f"[bold red]✗ Failed[/bold red]\n[dim]{stderr[:200]}[/dim]", + border_style="red", + padding=(0, 1), + ) + ) + final_diagnosis = self._diagnoser.diagnose_error(cmd, stderr) if final_diagnosis["fix_commands"] and not final_diagnosis["can_auto_fix"]: # Create a manual intervention panel - manual_content = [f"[bold yellow]Issue:[/bold yellow] {final_diagnosis['description']}", ""] + manual_content = [ + f"[bold yellow]Issue:[/bold yellow] {final_diagnosis['description']}", + "", + ] manual_content.append("[bold]Suggested commands:[/bold]") for fix_cmd in final_diagnosis["fix_commands"]: if not fix_cmd.startswith("#"): manual_content.append(f" [cyan]$ {fix_cmd}[/cyan]") else: manual_content.append(f" [dim]{fix_cmd}[/dim]") - - console.print(Panel( - "\n".join(manual_content), - title="[bold yellow] 💡 Manual Intervention Required [/bold yellow]", - title_align="left", - border_style="yellow", - padding=(0, 1), - )) - + + console.print( + Panel( + "\n".join(manual_content), + title="[bold yellow] 💡 Manual Intervention Required [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + ) + ) + console.print() - + self._granted_privileges = [] - + # Phase 3: Verification Tests console.print() - console.print(Panel( - "[bold]Running verification tests...[/bold]", - title="[bold cyan] 🧪 Verification Phase [/bold cyan]", - title_align="left", - border_style="cyan", - padding=(0, 1), - expand=False, - )) - all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) - - # Phase 4: Auto-repair if tests failed - if not all_tests_passed: - console.print() - console.print(Panel( - "[bold yellow]Attempting to repair test failures...[/bold yellow]", - title="[bold yellow] 🔧 Auto-Repair Phase [/bold yellow]", + console.print( + Panel( + "[bold]Running verification tests...[/bold]", + title="[bold cyan] 🧪 Verification Phase [/bold cyan]", title_align="left", - border_style="yellow", + border_style="cyan", padding=(0, 1), expand=False, - )) - + ) + ) + all_tests_passed, test_results = self._verification_runner.run_verification_tests( + run.commands, user_query + ) + + # Phase 4: Auto-repair if tests failed + if not all_tests_passed: + console.print() + console.print( + Panel( + "[bold yellow]Attempting to repair test failures...[/bold yellow]", + title="[bold yellow] 🔧 Auto-Repair Phase [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + expand=False, + ) + ) + repair_success = self._handle_test_failures(test_results, run) - + if repair_success: console.print("[dim]Re-running verification tests...[/dim]") - all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) - + all_tests_passed, test_results = self._verification_runner.run_verification_tests( + run.commands, user_query + ) + run.completed_at = datetime.datetime.now().isoformat() run.summary = self._generate_summary(run) - + if test_results: passed = sum(1 for t in test_results if t["passed"]) run.summary += f" | Tests: {passed}/{len(test_results)} passed" - + self.db.save_run(run) - + # Generate LLM summary/answer llm_answer = self._generate_llm_answer(run, user_query) - + # Print condensed execution summary with answer self._print_execution_summary(run, answer=llm_answer) - + console.print() console.print(f"[dim]Run ID: {run.run_id}[/dim]") - + return run - + def _handle_resource_conflict( self, idx: int, @@ -989,11 +1154,11 @@ def _handle_resource_conflict( cleanup_commands: list, ) -> bool: """Handle any resource conflict with user options. - + This is a GENERAL handler for all resource types: - Docker containers - Services - - Files/directories + - Files/directories - Packages - Ports - Users/groups @@ -1007,7 +1172,7 @@ def _handle_resource_conflict( suggestion = conflict.get("suggestion", "") is_active = conflict.get("is_active", True) alternatives = conflict.get("alternative_actions", []) - + # Resource type icons icons = { "container": "🐳", @@ -1027,143 +1192,166 @@ def _handle_resource_conflict( "cron_job": "⏰", } icon = icons.get(resource_type, "📌") - + # Display the conflict with visual grouping from rich.panel import Panel - - status_text = "[bold cyan]Active[/bold cyan]" if is_active else "[dim yellow]Inactive[/dim yellow]" + + status_text = ( + "[bold cyan]Active[/bold cyan]" if is_active else "[dim yellow]Inactive[/dim yellow]" + ) conflict_content = ( f"{icon} [bold]{resource_type.replace('_', ' ').title()}:[/bold] '{resource_name}'\n" f"[dim]Status:[/dim] {status_text}\n" f"[dim]{suggestion}[/dim]" ) - + console.print() - console.print(Panel( - conflict_content, - title="[bold yellow] ⚠️ Resource Conflict [/bold yellow]", - title_align="left", - border_style="yellow", - padding=(0, 1), - )) - + console.print( + Panel( + conflict_content, + title="[bold yellow] ⚠️ Resource Conflict [/bold yellow]", + title_align="left", + border_style="yellow", + padding=(0, 1), + ) + ) + # If there are alternatives, show them if alternatives: options_content = ["[bold]What would you like to do?[/bold]", ""] for j, alt in enumerate(alternatives, 1): options_content.append(f" {j}. {alt['description']}") - - console.print(Panel( - "\n".join(options_content), - border_style="dim", - padding=(0, 1), - )) - + + console.print( + Panel( + "\n".join(options_content), + border_style="dim", + padding=(0, 1), + ) + ) + from rich.prompt import Prompt + choice = Prompt.ask( " Choose an option", choices=[str(k) for k in range(1, len(alternatives) + 1)], - default="1" + default="1", ) - + selected = alternatives[int(choice) - 1] action = selected["action"] action_commands = selected.get("commands", []) - + # Handle different actions if action in ["use_existing", "use_different"]: - console.print(f"[green] ✓ Using existing {resource_type} '{resource_name}'[/green]") + console.print( + f"[green] ✓ Using existing {resource_type} '{resource_name}'[/green]" + ) commands_to_skip.add(idx) return True - + elif action == "start_existing": console.print(f"[cyan] Starting existing {resource_type}...[/cyan]") for start_cmd in action_commands: needs_sudo = start_cmd.startswith("sudo") - success, _, stderr = self._execute_single_command(start_cmd, needs_sudo=needs_sudo) + success, _, stderr = self._execute_single_command( + start_cmd, needs_sudo=needs_sudo + ) if success: console.print(f"[green] ✓ {start_cmd}[/green]") else: console.print(f"[red] ✗ {start_cmd}: {stderr[:50]}[/red]") commands_to_skip.add(idx) return True - + elif action in ["restart", "upgrade", "reinstall"]: console.print(f"[cyan] {action.title()}ing {resource_type}...[/cyan]") for action_cmd in action_commands: needs_sudo = action_cmd.startswith("sudo") - success, _, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + success, _, stderr = self._execute_single_command( + action_cmd, needs_sudo=needs_sudo + ) if success: console.print(f"[green] ✓ {action_cmd}[/green]") else: console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") commands_to_skip.add(idx) return True - + elif action in ["recreate", "backup", "replace", "stop_existing"]: console.print(f"[cyan] Preparing to {action.replace('_', ' ')}...[/cyan]") for action_cmd in action_commands: needs_sudo = action_cmd.startswith("sudo") - success, _, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + success, _, stderr = self._execute_single_command( + action_cmd, needs_sudo=needs_sudo + ) if success: console.print(f"[green] ✓ {action_cmd}[/green]") else: console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") # Don't skip - let the original command run after cleanup return True - + elif action == "modify": console.print(f"[cyan] Will modify existing {resource_type}[/cyan]") # Don't skip - let the original command run to modify return True - + elif action == "install_first": # Install a missing tool/dependency first - console.print(f"[cyan] Installing required dependency '{resource_name}'...[/cyan]") + console.print( + f"[cyan] Installing required dependency '{resource_name}'...[/cyan]" + ) all_success = True for action_cmd in action_commands: needs_sudo = action_cmd.startswith("sudo") - success, stdout, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + success, stdout, stderr = self._execute_single_command( + action_cmd, needs_sudo=needs_sudo + ) if success: console.print(f"[green] ✓ {action_cmd}[/green]") else: console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") all_success = False - + if all_success: - console.print(f"[green] ✓ '{resource_name}' installed. Continuing with original command...[/green]") + console.print( + f"[green] ✓ '{resource_name}' installed. Continuing with original command...[/green]" + ) # Don't skip - run the original command now that the tool is installed return True else: console.print(f"[red] ✗ Failed to install '{resource_name}'[/red]") commands_to_skip.add(idx) return True - + elif action == "use_apt": # User chose to use apt instead of snap - console.print(f"[cyan] Skipping snap command - use apt instead[/cyan]") + console.print("[cyan] Skipping snap command - use apt instead[/cyan]") commands_to_skip.add(idx) return True - + elif action == "refresh": # Refresh snap package - console.print(f"[cyan] Refreshing snap package...[/cyan]") + console.print("[cyan] Refreshing snap package...[/cyan]") for action_cmd in action_commands: needs_sudo = action_cmd.startswith("sudo") - success, _, stderr = self._execute_single_command(action_cmd, needs_sudo=needs_sudo) + success, _, stderr = self._execute_single_command( + action_cmd, needs_sudo=needs_sudo + ) if success: console.print(f"[green] ✓ {action_cmd}[/green]") else: console.print(f"[red] ✗ {action_cmd}: {stderr[:50]}[/red]") commands_to_skip.add(idx) return True - + # No alternatives - use default behavior (add to cleanup if available) if conflict.get("cleanup_commands"): cleanup_commands.extend(conflict["cleanup_commands"]) - + return False - + def _handle_test_failures( self, test_results: list[dict[str, Any]], @@ -1171,41 +1359,46 @@ def _handle_test_failures( ) -> bool: """Handle failed verification tests by attempting auto-repair.""" failed_tests = [t for t in test_results if not t["passed"]] - + if not failed_tests: return True - + console.print() console.print("[bold yellow]🔧 Attempting to fix test failures...[/bold yellow]") - + all_fixed = True - + for test in failed_tests: test_name = test["test"] output = test["output"] - + console.print(f"[dim] Fixing: {test_name}[/dim]") - + if "nginx -t" in test_name: diagnosis = self._diagnoser.diagnose_error("nginx -t", output) - fixed, msg, _ = self._auto_fixer.auto_fix_error("nginx -t", output, diagnosis, max_attempts=3) + fixed, msg, _ = self._auto_fixer.auto_fix_error( + "nginx -t", output, diagnosis, max_attempts=3 + ) if fixed: console.print(f"[green] ✓ Fixed: {msg}[/green]") else: console.print(f"[red] ✗ Could not fix: {msg}[/red]") all_fixed = False - + elif "apache2ctl" in test_name: diagnosis = self._diagnoser.diagnose_error("apache2ctl configtest", output) - fixed, msg, _ = self._auto_fixer.auto_fix_error("apache2ctl configtest", output, diagnosis, max_attempts=3) + fixed, msg, _ = self._auto_fixer.auto_fix_error( + "apache2ctl configtest", output, diagnosis, max_attempts=3 + ) if fixed: console.print(f"[green] ✓ Fixed: {msg}[/green]") else: all_fixed = False - + elif "systemctl is-active" in test_name: import re - svc_match = re.search(r'is-active\s+(\S+)', test_name) + + svc_match = re.search(r"is-active\s+(\S+)", test_name) if svc_match: service = svc_match.group(1) success, _, err = self._execute_single_command( @@ -1214,20 +1407,23 @@ def _handle_test_failures( if success: console.print(f"[green] ✓ Started service {service}[/green]") else: - console.print(f"[yellow] ⚠ Could not start {service}: {err[:50]}[/yellow]") - + console.print( + f"[yellow] ⚠ Could not start {service}: {err[:50]}[/yellow]" + ) + elif "file exists" in test_name: import re - path_match = re.search(r'file exists: (.+)', test_name) + + path_match = re.search(r"file exists: (.+)", test_name) if path_match: path = path_match.group(1) parent = os.path.dirname(path) if parent and not os.path.exists(parent): self._execute_single_command(f"sudo mkdir -p {parent}", needs_sudo=True) console.print(f"[green] ✓ Created directory {parent}[/green]") - + return all_fixed - + def execute_with_task_tree( self, commands: list[tuple[str, str, list[str]]], @@ -1236,7 +1432,7 @@ def execute_with_task_tree( """Execute commands using the task tree system with advanced auto-repair.""" # Reset execution state for new run self._reset_execution_state() - + run = DoRun( run_id=self.db._generate_run_id(), summary="", @@ -1247,39 +1443,41 @@ def execute_with_task_tree( ) self.current_run = run self._permission_requests_count = 0 - + self._task_tree = TaskTree() for cmd, purpose, protected in commands: task = self._task_tree.add_root_task(cmd, purpose) task.reasoning = f"Protected paths: {', '.join(protected)}" if protected else "" - + console.print() - console.print(Panel( - "[bold cyan]🌳 Task Tree Execution Mode[/bold cyan]\n" - "[dim]Commands will be executed with auto-repair capabilities.[/dim]\n" - "[dim]Conflict detection and verification tests enabled.[/dim]\n" - "[dim yellow]Press Ctrl+Z or Ctrl+C to stop execution at any time.[/dim yellow]", - expand=False, - )) + console.print( + Panel( + "[bold cyan]🌳 Task Tree Execution Mode[/bold cyan]\n" + "[dim]Commands will be executed with auto-repair capabilities.[/dim]\n" + "[dim]Conflict detection and verification tests enabled.[/dim]\n" + "[dim yellow]Press Ctrl+Z or Ctrl+C to stop execution at any time.[/dim yellow]", + expand=False, + ) + ) console.print() - + # Set up signal handlers for Ctrl+Z and Ctrl+C self._setup_signal_handlers() - + # Phase 1: Conflict Detection - Claude-like header console.print("[bold blue]━━━[/bold blue] [bold]Checking for Conflicts[/bold]") - + conflicts_found = [] cleanup_commands = [] commands_to_skip = set() # Track commands that should be skipped (use existing) commands_to_replace = {} # Track commands that should be replaced resource_decisions = {} # Track user decisions for each resource to avoid duplicate prompts - + for i, (cmd, purpose, protected) in enumerate(commands): conflict = self._conflict_detector.check_for_conflicts(cmd, purpose) if conflict["has_conflict"]: conflicts_found.append((i, cmd, conflict)) - + if conflicts_found: # Deduplicate conflicts by resource name unique_resources = {} @@ -1288,42 +1486,45 @@ def execute_with_task_tree( if resource_name not in unique_resources: unique_resources[resource_name] = [] unique_resources[resource_name].append((idx, cmd, conflict)) - - console.print(f" [yellow]●[/yellow] Found [bold]{len(unique_resources)}[/bold] unique conflict(s)") - + + console.print( + f" [yellow]●[/yellow] Found [bold]{len(unique_resources)}[/bold] unique conflict(s)" + ) + for resource_name, resource_conflicts in unique_resources.items(): # Only ask once per unique resource first_idx, first_cmd, first_conflict = resource_conflicts[0] - + # Handle the first conflict to get user's decision - decision = self._handle_resource_conflict(first_idx, first_cmd, first_conflict, commands_to_skip, cleanup_commands) + decision = self._handle_resource_conflict( + first_idx, first_cmd, first_conflict, commands_to_skip, cleanup_commands + ) resource_decisions[resource_name] = decision - + # Apply the same decision to all other commands affecting this resource if len(resource_conflicts) > 1: for idx, cmd, conflict in resource_conflicts[1:]: if first_idx in commands_to_skip: commands_to_skip.add(idx) - + # Run cleanup commands for non-Docker conflicts if cleanup_commands: console.print("[dim] Running cleanup commands...[/dim]") for cleanup_cmd in cleanup_commands: self._execute_single_command(cleanup_cmd, needs_sudo=True) console.print(f"[dim] ✓ {cleanup_cmd}[/dim]") - + # Filter out skipped commands if commands_to_skip: filtered_commands = [ - (cmd, purpose, protected) - for i, (cmd, purpose, protected) in enumerate(commands) + (cmd, purpose, protected) + for i, (cmd, purpose, protected) in enumerate(commands) if i not in commands_to_skip ] # Update task tree to skip these tasks for task in self._task_tree.root_tasks: task_idx = next( - (i for i, (c, p, pr) in enumerate(commands) if c == task.command), - None + (i for i, (c, p, pr) in enumerate(commands) if c == task.command), None ) if task_idx in commands_to_skip: task.status = CommandStatus.SKIPPED @@ -1331,23 +1532,23 @@ def execute_with_task_tree( commands = filtered_commands else: console.print(" [green]●[/green] No conflicts detected") - + console.print() - + all_protected = set() for _, _, protected in commands: all_protected.update(protected) - + if all_protected: console.print(f"[dim]📁 Protected paths: {', '.join(all_protected)}[/dim]") console.print() - + try: # Phase 2: Execute Commands - Claude-like header console.print() console.print("[bold blue]━━━[/bold blue] [bold]Executing Commands[/bold]") console.print() - + # Track remaining commands for resume functionality executed_tasks = set() for i, root_task in enumerate(self._task_tree.root_tasks): @@ -1362,31 +1563,37 @@ def execute_with_task_tree( break self._execute_task_node(root_task, run, commands) executed_tasks.add(root_task.id) - + if not self._interrupted: # Phase 3: Verification Tests - Claude-like header console.print() console.print("[bold blue]━━━[/bold blue] [bold]Verification[/bold]") - - all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) - + + all_tests_passed, test_results = self._verification_runner.run_verification_tests( + run.commands, user_query + ) + # Phase 4: Auto-repair if tests failed if not all_tests_passed: console.print() console.print("[bold blue]━━━[/bold blue] [bold]Auto-Repair[/bold]") - + repair_success = self._handle_test_failures(test_results, run) - + if repair_success: console.print() console.print("[dim] Re-running verification tests...[/dim]") - all_tests_passed, test_results = self._verification_runner.run_verification_tests(run.commands, user_query) + all_tests_passed, test_results = ( + self._verification_runner.run_verification_tests( + run.commands, user_query + ) + ) else: all_tests_passed = False test_results = [] - + run.completed_at = datetime.datetime.now().isoformat() - + if self._interrupted: run.summary = f"INTERRUPTED after {len(self._executed_commands)} command(s)" else: @@ -1394,45 +1601,47 @@ def execute_with_task_tree( if test_results: passed = sum(1 for t in test_results if t["passed"]) run.summary += f" | Tests: {passed}/{len(test_results)} passed" - + self.db.save_run(run) - + console.print() console.print("[bold]Task Execution Tree:[/bold]") self._task_tree.print_tree() - + # Generate LLM summary/answer if available llm_answer = None if not self._interrupted: llm_answer = self._generate_llm_answer(run, user_query) - + # Print condensed execution summary with answer self._print_execution_summary(run, answer=llm_answer) - + console.print() if self._interrupted: console.print(f"[dim]Run ID: {run.run_id} (interrupted)[/dim]") elif all_tests_passed: console.print(f"[dim]Run ID: {run.run_id}[/dim]") - + if self._permission_requests_count > 1: - console.print(f"[dim]Permission requests made: {self._permission_requests_count}[/dim]") - + console.print( + f"[dim]Permission requests made: {self._permission_requests_count}[/dim]" + ) + # Reset interrupted flag before interactive session # This allows the user to continue the session even after stopping a command was_interrupted = self._interrupted self._interrupted = False - + # Always go to interactive session - even after interruption # User can decide what to do next (retry, skip, exit) self._interactive_session(run, commands, user_query, was_interrupted=was_interrupted) - + return run - + finally: # Always restore signal handlers self._restore_signal_handlers() - + def _interactive_session( self, run: DoRun, @@ -1441,21 +1650,22 @@ def _interactive_session( was_interrupted: bool = False, ) -> None: """Interactive session after task completion - suggest next steps. - + If was_interrupted is True, the previous command execution was stopped by Ctrl+Z/Ctrl+C. We still continue the session so the user can decide what to do next (retry, skip remaining, run different command, etc). """ import sys + from rich.prompt import Prompt - + # Flush any pending output to ensure clean display sys.stdout.flush() sys.stderr.flush() - + # Generate context-aware suggestions based on what was done suggestions = self._generate_suggestions(run, commands, user_query) - + # If interrupted, add special suggestions at the beginning if was_interrupted: interrupted_suggestions = [ @@ -1471,7 +1681,7 @@ def _interactive_session( }, ] suggestions = interrupted_suggestions + suggestions - + # Track context for natural language processing context = { "original_query": user_query, @@ -1479,39 +1689,50 @@ def _interactive_session( "session_actions": [], "was_interrupted": was_interrupted, } - + console.print() if was_interrupted: - console.print("[bold yellow]━━━[/bold yellow] [bold]Execution Interrupted - What would you like to do?[/bold]") + console.print( + "[bold yellow]━━━[/bold yellow] [bold]Execution Interrupted - What would you like to do?[/bold]" + ) else: console.print("[bold blue]━━━[/bold blue] [bold]Next Steps[/bold]") console.print() - + # Display suggestions self._display_suggestions(suggestions) - + console.print() console.print("[dim]You can type any request in natural language[/dim]") console.print() - + # Ensure prompt is visible sys.stdout.flush() - + while True: try: - response = Prompt.ask( - "[bold cyan]>[/bold cyan]", - default="exit" - ) - + response = Prompt.ask("[bold cyan]>[/bold cyan]", default="exit") + response_stripped = response.strip() response_lower = response_stripped.lower() - + # Check for exit keywords - if response_lower in ["exit", "quit", "done", "no", "n", "bye", "thanks", "nothing", ""]: - console.print("[dim]👋 Session ended. Run 'cortex do history' to see past runs.[/dim]") + if response_lower in [ + "exit", + "quit", + "done", + "no", + "n", + "bye", + "thanks", + "nothing", + "", + ]: + console.print( + "[dim]👋 Session ended. Run 'cortex do history' to see past runs.[/dim]" + ) break - + # Try to parse as number (for suggestion selection) try: choice = int(response_stripped) @@ -1519,14 +1740,16 @@ def _interactive_session( suggestion = suggestions[choice - 1] self._execute_suggestion(suggestion, run, user_query) context["session_actions"].append(suggestion.get("label", "")) - + # Update last query to the suggestion for context-aware follow-ups suggestion_label = suggestion.get("label", "") context["last_query"] = suggestion_label - + # Continue the session with suggestions based on what was just done console.print() - suggestions = self._generate_suggestions_for_query(suggestion_label, context) + suggestions = self._generate_suggestions_for_query( + suggestion_label, context + ) self._display_suggestions(suggestions) console.print() continue @@ -1535,194 +1758,222 @@ def _interactive_session( break except ValueError: pass - + # Handle natural language request handled = self._handle_natural_language_request( - response_stripped, - suggestions, - context, - run, - commands + response_stripped, suggestions, context, run, commands ) - + if handled: context["session_actions"].append(response_stripped) # Update context with the new query for better suggestions context["last_query"] = response_stripped - + # Refresh suggestions based on NEW query (not combined) # This ensures suggestions are relevant to what user just asked console.print() suggestions = self._generate_suggestions_for_query(response_stripped, context) self._display_suggestions(suggestions) console.print() - + except (EOFError, KeyboardInterrupt): console.print("\n[dim]👋 Session ended.[/dim]") break - + # Cleanup: ensure any terminal monitors are stopped if self._terminal_monitor: self._terminal_monitor.stop() self._terminal_monitor = None - + def _generate_suggestions_for_query(self, query: str, context: dict) -> list[dict]: """Generate suggestions based on the current query and context. - + This generates follow-up suggestions relevant to what the user just asked/did, not tied to the original task. """ suggestions = [] query_lower = query.lower() - + # User management related queries if any(w in query_lower for w in ["user", "locked", "password", "account", "login"]): - suggestions.append({ - "type": "info", - "icon": "👥", - "label": "List all users", - "description": "Show all system users", - "command": "cat /etc/passwd | cut -d: -f1", - "purpose": "List all users", - }) - suggestions.append({ - "type": "info", - "icon": "🔐", - "label": "Check sudo users", - "description": "Show users with sudo access", - "command": "getent group sudo", - "purpose": "List sudo group members", - }) - suggestions.append({ - "type": "action", - "icon": "🔓", - "label": "Unlock a user", - "description": "Unlock a locked user account", - "demo_type": "unlock_user", - }) - + suggestions.append( + { + "type": "info", + "icon": "👥", + "label": "List all users", + "description": "Show all system users", + "command": "cat /etc/passwd | cut -d: -f1", + "purpose": "List all users", + } + ) + suggestions.append( + { + "type": "info", + "icon": "🔐", + "label": "Check sudo users", + "description": "Show users with sudo access", + "command": "getent group sudo", + "purpose": "List sudo group members", + } + ) + suggestions.append( + { + "type": "action", + "icon": "🔓", + "label": "Unlock a user", + "description": "Unlock a locked user account", + "demo_type": "unlock_user", + } + ) + # Service/process related queries - elif any(w in query_lower for w in ["service", "systemctl", "running", "process", "status"]): - suggestions.append({ - "type": "info", - "icon": "📊", - "label": "List running services", - "description": "Show all active services", - "command": "systemctl list-units --type=service --state=running", - "purpose": "List running services", - }) - suggestions.append({ - "type": "info", - "icon": "🔍", - "label": "Check failed services", - "description": "Show services that failed to start", - "command": "systemctl list-units --type=service --state=failed", - "purpose": "List failed services", - }) - + elif any( + w in query_lower for w in ["service", "systemctl", "running", "process", "status"] + ): + suggestions.append( + { + "type": "info", + "icon": "📊", + "label": "List running services", + "description": "Show all active services", + "command": "systemctl list-units --type=service --state=running", + "purpose": "List running services", + } + ) + suggestions.append( + { + "type": "info", + "icon": "🔍", + "label": "Check failed services", + "description": "Show services that failed to start", + "command": "systemctl list-units --type=service --state=failed", + "purpose": "List failed services", + } + ) + # Disk/storage related queries elif any(w in query_lower for w in ["disk", "storage", "space", "mount", "partition"]): - suggestions.append({ - "type": "info", - "icon": "💾", - "label": "Check disk usage", - "description": "Show disk space by partition", - "command": "df -h", - "purpose": "Check disk usage", - }) - suggestions.append({ - "type": "info", - "icon": "📁", - "label": "Find large files", - "description": "Show largest files on disk", - "command": "sudo du -ah / 2>/dev/null | sort -rh | head -20", - "purpose": "Find large files", - }) - + suggestions.append( + { + "type": "info", + "icon": "💾", + "label": "Check disk usage", + "description": "Show disk space by partition", + "command": "df -h", + "purpose": "Check disk usage", + } + ) + suggestions.append( + { + "type": "info", + "icon": "📁", + "label": "Find large files", + "description": "Show largest files on disk", + "command": "sudo du -ah / 2>/dev/null | sort -rh | head -20", + "purpose": "Find large files", + } + ) + # Network related queries elif any(w in query_lower for w in ["network", "ip", "port", "connection", "firewall"]): - suggestions.append({ - "type": "info", - "icon": "🌐", - "label": "Show network interfaces", - "description": "Display IP addresses and interfaces", - "command": "ip addr show", - "purpose": "Show network interfaces", - }) - suggestions.append({ - "type": "info", - "icon": "🔌", - "label": "List open ports", - "description": "Show listening ports", - "command": "sudo ss -tlnp", - "purpose": "List open ports", - }) - + suggestions.append( + { + "type": "info", + "icon": "🌐", + "label": "Show network interfaces", + "description": "Display IP addresses and interfaces", + "command": "ip addr show", + "purpose": "Show network interfaces", + } + ) + suggestions.append( + { + "type": "info", + "icon": "🔌", + "label": "List open ports", + "description": "Show listening ports", + "command": "sudo ss -tlnp", + "purpose": "List open ports", + } + ) + # Security related queries elif any(w in query_lower for w in ["security", "audit", "log", "auth", "fail"]): - suggestions.append({ - "type": "info", - "icon": "🔒", - "label": "Check auth logs", - "description": "Show recent authentication attempts", - "command": "sudo tail -50 /var/log/auth.log", - "purpose": "Check auth logs", - }) - suggestions.append({ - "type": "info", - "icon": "⚠️", - "label": "Check failed logins", - "description": "Show failed login attempts", - "command": "sudo lastb | head -20", - "purpose": "Check failed logins", - }) - + suggestions.append( + { + "type": "info", + "icon": "🔒", + "label": "Check auth logs", + "description": "Show recent authentication attempts", + "command": "sudo tail -50 /var/log/auth.log", + "purpose": "Check auth logs", + } + ) + suggestions.append( + { + "type": "info", + "icon": "⚠️", + "label": "Check failed logins", + "description": "Show failed login attempts", + "command": "sudo lastb | head -20", + "purpose": "Check failed logins", + } + ) + # Package/installation related queries elif any(w in query_lower for w in ["install", "package", "apt", "update"]): - suggestions.append({ - "type": "action", - "icon": "📦", - "label": "Update system", - "description": "Update package lists and upgrade", - "command": "sudo apt update && sudo apt upgrade -y", - "purpose": "Update system packages", - }) - suggestions.append({ - "type": "info", - "icon": "📋", - "label": "List installed packages", - "description": "Show recently installed packages", - "command": "apt list --installed 2>/dev/null | tail -20", - "purpose": "List installed packages", - }) - + suggestions.append( + { + "type": "action", + "icon": "📦", + "label": "Update system", + "description": "Update package lists and upgrade", + "command": "sudo apt update && sudo apt upgrade -y", + "purpose": "Update system packages", + } + ) + suggestions.append( + { + "type": "info", + "icon": "📋", + "label": "List installed packages", + "description": "Show recently installed packages", + "command": "apt list --installed 2>/dev/null | tail -20", + "purpose": "List installed packages", + } + ) + # Default: generic helpful suggestions if not suggestions: - suggestions.append({ - "type": "info", - "icon": "📊", - "label": "System overview", - "description": "Show system info and resource usage", - "command": "uname -a && uptime && free -h", - "purpose": "System overview", - }) - suggestions.append({ - "type": "info", - "icon": "🔍", - "label": "Check system logs", - "description": "View recent system messages", - "command": "sudo journalctl -n 20 --no-pager", - "purpose": "Check system logs", - }) - + suggestions.append( + { + "type": "info", + "icon": "📊", + "label": "System overview", + "description": "Show system info and resource usage", + "command": "uname -a && uptime && free -h", + "purpose": "System overview", + } + ) + suggestions.append( + { + "type": "info", + "icon": "🔍", + "label": "Check system logs", + "description": "View recent system messages", + "command": "sudo journalctl -n 20 --no-pager", + "purpose": "Check system logs", + } + ) + return suggestions - + def _display_suggestions(self, suggestions: list[dict]) -> None: """Display numbered suggestions.""" if not suggestions: console.print("[dim]No specific suggestions available.[/dim]") return - + for i, suggestion in enumerate(suggestions, 1): icon = suggestion.get("icon", "💡") label = suggestion.get("label", "") @@ -1730,9 +1981,9 @@ def _display_suggestions(self, suggestions: list[dict]) -> None: console.print(f" [cyan]{i}.[/cyan] {icon} {label}") if desc: console.print(f" [dim]{desc}[/dim]") - + console.print(f" [cyan]{len(suggestions) + 1}.[/cyan] 🚪 Exit session") - + def _handle_natural_language_request( self, request: str, @@ -1742,12 +1993,12 @@ def _handle_natural_language_request( commands: list[tuple[str, str, list[str]]], ) -> bool: """Handle a natural language request from the user. - + Uses LLM if available for full understanding, falls back to pattern matching. Returns True if the request was handled, False otherwise. """ request_lower = request.lower() - + # Quick keyword matching for common actions (fast path) keyword_handlers = [ (["start", "run", "begin", "launch", "execute"], "start"), @@ -1755,7 +2006,7 @@ def _handle_natural_language_request( (["demo", "example", "sample", "code"], "demo"), (["test", "verify", "check", "validate"], "test"), ] - + # Check if request is a simple match to existing suggestions for keywords, action_type in keyword_handlers: if any(kw in request_lower for kw in keywords): @@ -1765,17 +2016,17 @@ def _handle_natural_language_request( if suggestion.get("type") == action_type: self._execute_suggestion(suggestion, run, context["original_query"]) return True - + # Use LLM for full understanding if available console.print() - console.print(f"[cyan]🤔 Understanding your request...[/cyan]") - + console.print("[cyan]🤔 Understanding your request...[/cyan]") + if self.llm_callback: return self._handle_request_with_llm(request, context, run, commands) else: # Fall back to pattern matching return self._handle_request_with_patterns(request, context, run) - + def _handle_request_with_llm( self, request: str, @@ -1787,23 +2038,28 @@ def _handle_request_with_llm( try: # Call LLM to understand the request llm_response = self.llm_callback(request, context) - + if not llm_response or llm_response.get("response_type") == "error": - console.print(f"[yellow]⚠ Could not process request: {llm_response.get('error', 'Unknown error')}[/yellow]") + console.print( + f"[yellow]⚠ Could not process request: {llm_response.get('error', 'Unknown error')}[/yellow]" + ) return False - + response_type = llm_response.get("response_type") - + # HARD CHECK: Filter out any raw JSON from reasoning field reasoning = llm_response.get("reasoning", "") if reasoning: # Remove any JSON-like content from reasoning import re + # If reasoning looks like JSON or contains JSON patterns, clean it - if (reasoning.strip().startswith(('{', '[', ']', '"response_type"')) or - re.search(r'"do_commands"\s*:', reasoning) or - re.search(r'"command"\s*:', reasoning) or - re.search(r'"requires_sudo"\s*:', reasoning)): + if ( + reasoning.strip().startswith(("{", "[", "]", '"response_type"')) + or re.search(r'"do_commands"\s*:', reasoning) + or re.search(r'"command"\s*:', reasoning) + or re.search(r'"requires_sudo"\s*:', reasoning) + ): # Extract just the text explanation if possible text_match = re.search(r'"reasoning"\s*:\s*"([^"]+)"', reasoning) if text_match: @@ -1811,18 +2067,18 @@ def _handle_request_with_llm( else: reasoning = "Processing your request..." llm_response["reasoning"] = reasoning - + # Handle do_commands - execute with confirmation if response_type == "do_commands" and llm_response.get("do_commands"): do_commands = llm_response["do_commands"] reasoning = llm_response.get("reasoning", "") - + # Final safety check: don't print JSON-looking reasoning if reasoning and not self._is_json_like(reasoning): console.print() console.print(f"[cyan]🤖 {reasoning}[/cyan]") console.print() - + # Show commands and ask for confirmation console.print("[bold]📋 Commands to execute:[/bold]") for i, cmd_info in enumerate(do_commands, 1): @@ -1833,113 +2089,125 @@ def _handle_request_with_llm( if purpose: console.print(f" [dim]{purpose}[/dim]") console.print() - + if not Confirm.ask("Execute these commands?", default=True): console.print("[dim]Skipped.[/dim]") return False - + # Execute the commands console.print() from rich.panel import Panel - + executed_in_session = [] for idx, cmd_info in enumerate(do_commands, 1): cmd = cmd_info.get("command", "") purpose = cmd_info.get("purpose", "Execute command") needs_sudo = cmd_info.get("requires_sudo", False) or self._needs_sudo(cmd, []) - + # Create visual grouping for each command console.print() - console.print(Panel( - f"[bold cyan]{cmd}[/bold cyan]\n[dim]└─ {purpose}[/dim]", - title=f"[bold] Command {idx}/{len(do_commands)} [/bold]", - title_align="left", - border_style="blue", - padding=(0, 1), - )) - + console.print( + Panel( + f"[bold cyan]{cmd}[/bold cyan]\n[dim]└─ {purpose}[/dim]", + title=f"[bold] Command {idx}/{len(do_commands)} [/bold]", + title_align="left", + border_style="blue", + padding=(0, 1), + ) + ) + success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) - + if success: - console.print(Panel( - f"[bold green]✓ Success[/bold green]", - border_style="green", - padding=(0, 1), - expand=False, - )) + console.print( + Panel( + "[bold green]✓ Success[/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) if stdout: - output_preview = stdout[:300] + ('...' if len(stdout) > 300 else '') + output_preview = stdout[:300] + ("..." if len(stdout) > 300 else "") console.print(f"[dim]{output_preview}[/dim]") executed_in_session.append(cmd) else: - console.print(Panel( - f"[bold red]✗ Failed[/bold red]\n[dim]{stderr[:150]}[/dim]", - border_style="red", - padding=(0, 1), - )) - + console.print( + Panel( + f"[bold red]✗ Failed[/bold red]\n[dim]{stderr[:150]}[/dim]", + border_style="red", + padding=(0, 1), + ) + ) + # Offer to diagnose and fix if Confirm.ask("Try to auto-fix?", default=True): diagnosis = self._diagnoser.diagnose_error(cmd, stderr) fixed, msg, _ = self._auto_fixer.auto_fix_error(cmd, stderr, diagnosis) if fixed: - console.print(Panel( - f"[bold green]✓ Fixed:[/bold green] {msg}", - border_style="green", - padding=(0, 1), - expand=False, - )) + console.print( + Panel( + f"[bold green]✓ Fixed:[/bold green] {msg}", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) executed_in_session.append(cmd) - + # Track executed commands in context for suggestion generation if "executed_commands" not in context: context["executed_commands"] = [] context["executed_commands"].extend(executed_in_session) - + return True - + # Handle single command - execute directly elif response_type == "command" and llm_response.get("command"): cmd = llm_response["command"] reasoning = llm_response.get("reasoning", "") - + console.print() console.print(f"[cyan]📋 Running:[/cyan] [green]{cmd}[/green]") if reasoning: console.print(f" [dim]{reasoning}[/dim]") - + needs_sudo = self._needs_sudo(cmd, []) success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) - + if success: - console.print(f"[green]✓ Success[/green]") + console.print("[green]✓ Success[/green]") if stdout: - console.print(f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]") + console.print( + f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]" + ) else: console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") - + return True - + # Handle answer - just display it (filter raw JSON) elif response_type == "answer" and llm_response.get("answer"): answer = llm_response["answer"] # Don't print raw JSON or internal processing messages - if not (self._is_json_like(answer) or - "I'm processing your request" in answer or - "I have a plan to execute" in answer): + if not ( + self._is_json_like(answer) + or "I'm processing your request" in answer + or "I have a plan to execute" in answer + ): console.print() console.print(answer) return True - + else: - console.print(f"[yellow]I didn't understand that. Could you rephrase?[/yellow]") + console.print("[yellow]I didn't understand that. Could you rephrase?[/yellow]") return False - + except Exception as e: console.print(f"[yellow]⚠ Error processing request: {e}[/yellow]") # Fall back to pattern matching return self._handle_request_with_patterns(request, context, run) - + def _handle_request_with_patterns( self, request: str, @@ -1949,49 +2217,51 @@ def _handle_request_with_patterns( """Handle request using pattern matching (fallback when LLM not available).""" # Try to generate a command from the natural language request generated = self._generate_command_from_request(request, context) - + if generated: cmd = generated.get("command") purpose = generated.get("purpose", "Execute user request") needs_confirm = generated.get("needs_confirmation", True) - + console.print() - console.print(f"[cyan]📋 I'll run this command:[/cyan]") + console.print("[cyan]📋 I'll run this command:[/cyan]") console.print(f" [green]{cmd}[/green]") console.print(f" [dim]{purpose}[/dim]") console.print() - + if needs_confirm: if not Confirm.ask("Proceed?", default=True): console.print("[dim]Skipped.[/dim]") return False - + # Execute the command needs_sudo = self._needs_sudo(cmd, []) success, stdout, stderr = self._execute_single_command(cmd, needs_sudo) - + if success: - console.print(f"[green]✓ Success[/green]") + console.print("[green]✓ Success[/green]") if stdout: - output_preview = stdout[:500] + ('...' if len(stdout) > 500 else '') + output_preview = stdout[:500] + ("..." if len(stdout) > 500 else "") console.print(f"[dim]{output_preview}[/dim]") else: console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") - + # Offer to diagnose the error if Confirm.ask("Would you like me to try to fix this?", default=True): diagnosis = self._diagnoser.diagnose_error(cmd, stderr) fixed, msg, _ = self._auto_fixer.auto_fix_error(cmd, stderr, diagnosis) if fixed: console.print(f"[green]✓ Fixed: {msg}[/green]") - + return True - + # Couldn't understand the request - console.print(f"[yellow]I'm not sure how to do that. Could you be more specific?[/yellow]") - console.print(f"[dim]Try something like: 'run the container', 'show me the config', or select a number.[/dim]") + console.print("[yellow]I'm not sure how to do that. Could you be more specific?[/yellow]") + console.print( + "[dim]Try something like: 'run the container', 'show me the config', or select a number.[/dim]" + ) return False - + def _generate_command_from_request( self, request: str, @@ -2001,7 +2271,7 @@ def _generate_command_from_request( request_lower = request.lower() executed_cmds = context.get("executed_commands", []) cmd_context = " ".join(executed_cmds).lower() - + # Pattern matching for common requests patterns = [ # Docker patterns @@ -2011,37 +2281,40 @@ def _generate_command_from_request( (r"(?:show|list).*(?:containers?|images?)", self._gen_docker_list), (r"logs?(?:\s+of)?(?:\s+the)?(?:\s+container)?", self._gen_docker_logs), (r"exec.*(?:container|docker)|shell.*(?:container|docker)", self._gen_docker_exec), - # Service patterns - (r"(?:start|restart).*(?:service|nginx|apache|postgres|mysql|redis)", self._gen_service_start), + ( + r"(?:start|restart).*(?:service|nginx|apache|postgres|mysql|redis)", + self._gen_service_start, + ), (r"stop.*(?:service|nginx|apache|postgres|mysql|redis)", self._gen_service_stop), (r"status.*(?:service|nginx|apache|postgres|mysql|redis)", self._gen_service_status), - # Package patterns (r"install\s+(.+)", self._gen_install_package), (r"update\s+(?:packages?|system)", self._gen_update_packages), - # File patterns - (r"(?:show|cat|view|read).*(?:config|file|log)(?:.*?([/\w\.\-]+))?", self._gen_show_file), + ( + r"(?:show|cat|view|read).*(?:config|file|log)(?:.*?([/\w\.\-]+))?", + self._gen_show_file, + ), (r"edit.*(?:config|file)(?:.*?([/\w\.\-]+))?", self._gen_edit_file), - # Info patterns (r"(?:check|show|what).*(?:version|status)", self._gen_check_version), (r"(?:how|where).*(?:connect|access|use)", self._gen_show_connection_info), ] - + import re + for pattern, handler in patterns: match = re.search(pattern, request_lower) if match: return handler(request, match, context) - + # Use LLM if available to generate command if self.llm_callback: return self._llm_generate_command(request, context) - + return None - + # Command generators def _gen_docker_run(self, request: str, match, context: dict) -> dict: # Find the image from context @@ -2051,50 +2324,58 @@ def _gen_docker_run(self, request: str, match, context: dict) -> dict: if "docker pull" in cmd: image = cmd.split("docker pull")[-1].strip() break - + # Check for port in request port = match.group(1) if match.lastindex and match.group(1) else "8080" container_name = image.split("/")[-1].split(":")[0] - + return { "command": f"docker run -d --name {container_name} -p {port}:{port} {image}", "purpose": f"Run {image} container on port {port}", "needs_confirmation": True, } - + def _gen_docker_stop(self, request: str, match, context: dict) -> dict: return { "command": "docker ps -q | xargs -r docker stop", "purpose": "Stop all running containers", "needs_confirmation": True, } - + def _gen_docker_remove(self, request: str, match, context: dict) -> dict: return { "command": "docker ps -aq | xargs -r docker rm", "purpose": "Remove all containers", "needs_confirmation": True, } - + def _gen_docker_list(self, request: str, match, context: dict) -> dict: if "image" in request.lower(): - return {"command": "docker images", "purpose": "List Docker images", "needs_confirmation": False} - return {"command": "docker ps -a", "purpose": "List all containers", "needs_confirmation": False} - + return { + "command": "docker images", + "purpose": "List Docker images", + "needs_confirmation": False, + } + return { + "command": "docker ps -a", + "purpose": "List all containers", + "needs_confirmation": False, + } + def _gen_docker_logs(self, request: str, match, context: dict) -> dict: return { "command": "docker logs $(docker ps -lq) --tail 50", "purpose": "Show logs of the most recent container", "needs_confirmation": False, } - + def _gen_docker_exec(self, request: str, match, context: dict) -> dict: return { "command": "docker exec -it $(docker ps -lq) /bin/sh", "purpose": "Open shell in the most recent container", "needs_confirmation": True, } - + def _gen_service_start(self, request: str, match, context: dict) -> dict: # Extract service name services = ["nginx", "apache2", "postgresql", "mysql", "redis", "docker"] @@ -2103,11 +2384,19 @@ def _gen_service_start(self, request: str, match, context: dict) -> dict: if svc in request.lower(): service = svc break - + if "restart" in request.lower(): - return {"command": f"sudo systemctl restart {service}", "purpose": f"Restart {service}", "needs_confirmation": True} - return {"command": f"sudo systemctl start {service}", "purpose": f"Start {service}", "needs_confirmation": True} - + return { + "command": f"sudo systemctl restart {service}", + "purpose": f"Restart {service}", + "needs_confirmation": True, + } + return { + "command": f"sudo systemctl start {service}", + "purpose": f"Start {service}", + "needs_confirmation": True, + } + def _gen_service_stop(self, request: str, match, context: dict) -> dict: services = ["nginx", "apache2", "postgresql", "mysql", "redis", "docker"] service = "nginx" @@ -2115,8 +2404,12 @@ def _gen_service_stop(self, request: str, match, context: dict) -> dict: if svc in request.lower(): service = svc break - return {"command": f"sudo systemctl stop {service}", "purpose": f"Stop {service}", "needs_confirmation": True} - + return { + "command": f"sudo systemctl stop {service}", + "purpose": f"Stop {service}", + "needs_confirmation": True, + } + def _gen_service_status(self, request: str, match, context: dict) -> dict: services = ["nginx", "apache2", "postgresql", "mysql", "redis", "docker"] service = "nginx" @@ -2124,8 +2417,12 @@ def _gen_service_status(self, request: str, match, context: dict) -> dict: if svc in request.lower(): service = svc break - return {"command": f"systemctl status {service}", "purpose": f"Check {service} status", "needs_confirmation": False} - + return { + "command": f"systemctl status {service}", + "purpose": f"Check {service} status", + "needs_confirmation": False, + } + def _gen_install_package(self, request: str, match, context: dict) -> dict: package = match.group(1).strip() if match.group(1) else "package-name" # Clean up common words @@ -2135,18 +2432,18 @@ def _gen_install_package(self, request: str, match, context: dict) -> dict: "purpose": f"Install {package}", "needs_confirmation": True, } - + def _gen_update_packages(self, request: str, match, context: dict) -> dict: return { "command": "sudo apt update && sudo apt upgrade -y", "purpose": "Update all packages", "needs_confirmation": True, } - + def _gen_show_file(self, request: str, match, context: dict) -> dict: # Try to extract file path or use common config locations file_path = match.group(1) if match.lastindex and match.group(1) else None - + if not file_path: if "nginx" in request.lower(): file_path = "/etc/nginx/nginx.conf" @@ -2156,9 +2453,13 @@ def _gen_show_file(self, request: str, match, context: dict) -> dict: file_path = "/etc/postgresql/*/main/postgresql.conf" else: file_path = "/etc/hosts" - - return {"command": f"cat {file_path}", "purpose": f"Show {file_path}", "needs_confirmation": False} - + + return { + "command": f"cat {file_path}", + "purpose": f"Show {file_path}", + "needs_confirmation": False, + } + def _gen_edit_file(self, request: str, match, context: dict) -> dict: file_path = match.group(1) if match.lastindex and match.group(1) else "/etc/hosts" return { @@ -2166,7 +2467,7 @@ def _gen_edit_file(self, request: str, match, context: dict) -> dict: "purpose": f"Edit {file_path}", "needs_confirmation": True, } - + def _gen_check_version(self, request: str, match, context: dict) -> dict: # Try to determine what to check version of tools = { @@ -2176,21 +2477,25 @@ def _gen_check_version(self, request: str, match, context: dict) -> dict: "nginx": "nginx -v", "postgres": "psql --version", } - + for tool, cmd in tools.items(): if tool in request.lower(): - return {"command": cmd, "purpose": f"Check {tool} version", "needs_confirmation": False} - + return { + "command": cmd, + "purpose": f"Check {tool} version", + "needs_confirmation": False, + } + # Default: show multiple versions return { "command": "docker --version; node --version 2>/dev/null; python3 --version", "purpose": "Check installed tool versions", "needs_confirmation": False, } - + def _gen_show_connection_info(self, request: str, match, context: dict) -> dict: executed = context.get("executed_commands", []) - + # Check what was installed to provide relevant connection info if any("ollama" in cmd for cmd in executed): return { @@ -2210,18 +2515,18 @@ def _gen_show_connection_info(self, request: str, match, context: dict) -> dict: "purpose": "Show Nginx connection info", "needs_confirmation": False, } - + return { "command": "ss -tlnp | head -20", "purpose": "Show listening ports and services", "needs_confirmation": False, } - + def _llm_generate_command(self, request: str, context: dict) -> dict | None: """Use LLM to generate a command from the request.""" if not self.llm_callback: return None - + try: prompt = f"""Given this context: - User originally asked: {context.get('original_query', 'N/A')} @@ -2244,9 +2549,9 @@ def _llm_generate_command(self, request: str, context: dict) -> dict | None: } except Exception: pass - + return None - + def _generate_suggestions( self, run: DoRun, @@ -2255,12 +2560,12 @@ def _generate_suggestions( ) -> list[dict]: """Generate context-aware suggestions based on what was installed/configured.""" suggestions = [] - + # Analyze what was done executed_cmds = [cmd for cmd, _, _ in commands] cmd_str = " ".join(executed_cmds).lower() query_lower = user_query.lower() - + # Docker-related suggestions if "docker" in cmd_str or "docker" in query_lower: if "pull" in cmd_str: @@ -2268,158 +2573,192 @@ def _generate_suggestions( for cmd, _, _ in commands: if "docker pull" in cmd: image = cmd.split("docker pull")[-1].strip() - suggestions.append({ - "type": "start", - "icon": "🚀", - "label": f"Start the container", - "description": f"Run {image} in a container", - "command": f"docker run -d --name {image.split('/')[-1].split(':')[0]} {image}", - "purpose": f"Start {image} container", - }) - suggestions.append({ - "type": "demo", - "icon": "📝", - "label": "Show demo usage", - "description": f"Example docker-compose and run commands", - "demo_type": "docker", - "image": image, - }) + suggestions.append( + { + "type": "start", + "icon": "🚀", + "label": "Start the container", + "description": f"Run {image} in a container", + "command": f"docker run -d --name {image.split('/')[-1].split(':')[0]} {image}", + "purpose": f"Start {image} container", + } + ) + suggestions.append( + { + "type": "demo", + "icon": "📝", + "label": "Show demo usage", + "description": "Example docker-compose and run commands", + "demo_type": "docker", + "image": image, + } + ) break - + # Ollama/Model runner suggestions if "ollama" in cmd_str or "ollama" in query_lower or "model" in query_lower: - suggestions.append({ - "type": "start", - "icon": "🚀", - "label": "Start Ollama server", - "description": "Run Ollama in the background", - "command": "docker run -d --name ollama -p 11434:11434 -v ollama:/root/.ollama ollama/ollama", - "purpose": "Start Ollama server container", - }) - suggestions.append({ - "type": "setup", - "icon": "⚙️", - "label": "Pull a model", - "description": "Download a model like llama2, mistral, or codellama", - "command": "docker exec ollama ollama pull llama2", - "purpose": "Download llama2 model", - }) - suggestions.append({ - "type": "demo", - "icon": "📝", - "label": "Show API demo", - "description": "Example curl commands and Python code", - "demo_type": "ollama", - }) - suggestions.append({ - "type": "test", - "icon": "🧪", - "label": "Test the installation", - "description": "Verify Ollama is running correctly", - "command": "curl http://localhost:11434/api/tags", - "purpose": "Check Ollama API", - }) - + suggestions.append( + { + "type": "start", + "icon": "🚀", + "label": "Start Ollama server", + "description": "Run Ollama in the background", + "command": "docker run -d --name ollama -p 11434:11434 -v ollama:/root/.ollama ollama/ollama", + "purpose": "Start Ollama server container", + } + ) + suggestions.append( + { + "type": "setup", + "icon": "⚙️", + "label": "Pull a model", + "description": "Download a model like llama2, mistral, or codellama", + "command": "docker exec ollama ollama pull llama2", + "purpose": "Download llama2 model", + } + ) + suggestions.append( + { + "type": "demo", + "icon": "📝", + "label": "Show API demo", + "description": "Example curl commands and Python code", + "demo_type": "ollama", + } + ) + suggestions.append( + { + "type": "test", + "icon": "🧪", + "label": "Test the installation", + "description": "Verify Ollama is running correctly", + "command": "curl http://localhost:11434/api/tags", + "purpose": "Check Ollama API", + } + ) + # Nginx suggestions if "nginx" in cmd_str or "nginx" in query_lower: - suggestions.append({ - "type": "start", - "icon": "🚀", - "label": "Start Nginx", - "description": "Start the Nginx web server", - "command": "sudo systemctl start nginx", - "purpose": "Start Nginx service", - }) - suggestions.append({ - "type": "setup", - "icon": "⚙️", - "label": "Configure a site", - "description": "Set up a new virtual host", - "demo_type": "nginx_config", - }) - suggestions.append({ - "type": "test", - "icon": "🧪", - "label": "Test configuration", - "description": "Verify Nginx config is valid", - "command": "sudo nginx -t", - "purpose": "Test Nginx configuration", - }) - + suggestions.append( + { + "type": "start", + "icon": "🚀", + "label": "Start Nginx", + "description": "Start the Nginx web server", + "command": "sudo systemctl start nginx", + "purpose": "Start Nginx service", + } + ) + suggestions.append( + { + "type": "setup", + "icon": "⚙️", + "label": "Configure a site", + "description": "Set up a new virtual host", + "demo_type": "nginx_config", + } + ) + suggestions.append( + { + "type": "test", + "icon": "🧪", + "label": "Test configuration", + "description": "Verify Nginx config is valid", + "command": "sudo nginx -t", + "purpose": "Test Nginx configuration", + } + ) + # PostgreSQL suggestions if "postgres" in cmd_str or "postgresql" in query_lower: - suggestions.append({ - "type": "start", - "icon": "🚀", - "label": "Start PostgreSQL", - "description": "Start the database server", - "command": "sudo systemctl start postgresql", - "purpose": "Start PostgreSQL service", - }) - suggestions.append({ - "type": "setup", - "icon": "⚙️", - "label": "Create a database", - "description": "Create a new database and user", - "demo_type": "postgres_setup", - }) - suggestions.append({ - "type": "test", - "icon": "🧪", - "label": "Test connection", - "description": "Verify PostgreSQL is accessible", - "command": "sudo -u postgres psql -c '\\l'", - "purpose": "List PostgreSQL databases", - }) - + suggestions.append( + { + "type": "start", + "icon": "🚀", + "label": "Start PostgreSQL", + "description": "Start the database server", + "command": "sudo systemctl start postgresql", + "purpose": "Start PostgreSQL service", + } + ) + suggestions.append( + { + "type": "setup", + "icon": "⚙️", + "label": "Create a database", + "description": "Create a new database and user", + "demo_type": "postgres_setup", + } + ) + suggestions.append( + { + "type": "test", + "icon": "🧪", + "label": "Test connection", + "description": "Verify PostgreSQL is accessible", + "command": "sudo -u postgres psql -c '\\l'", + "purpose": "List PostgreSQL databases", + } + ) + # Node.js/npm suggestions if "node" in cmd_str or "npm" in cmd_str or "nodejs" in query_lower: - suggestions.append({ - "type": "demo", - "icon": "📝", - "label": "Show starter code", - "description": "Example Express.js server", - "demo_type": "nodejs", - }) - suggestions.append({ - "type": "test", - "icon": "🧪", - "label": "Verify installation", - "description": "Check Node.js and npm versions", - "command": "node --version && npm --version", - "purpose": "Check Node.js installation", - }) - + suggestions.append( + { + "type": "demo", + "icon": "📝", + "label": "Show starter code", + "description": "Example Express.js server", + "demo_type": "nodejs", + } + ) + suggestions.append( + { + "type": "test", + "icon": "🧪", + "label": "Verify installation", + "description": "Check Node.js and npm versions", + "command": "node --version && npm --version", + "purpose": "Check Node.js installation", + } + ) + # Python/pip suggestions if "python" in cmd_str or "pip" in cmd_str: - suggestions.append({ - "type": "demo", - "icon": "📝", - "label": "Show example code", - "description": "Example Python usage", - "demo_type": "python", - }) - suggestions.append({ - "type": "test", - "icon": "🧪", - "label": "Test import", - "description": "Verify packages are importable", - "demo_type": "python_test", - }) - + suggestions.append( + { + "type": "demo", + "icon": "📝", + "label": "Show example code", + "description": "Example Python usage", + "demo_type": "python", + } + ) + suggestions.append( + { + "type": "test", + "icon": "🧪", + "label": "Test import", + "description": "Verify packages are importable", + "demo_type": "python_test", + } + ) + # Generic suggestions if nothing specific matched if not suggestions: # Add a generic test suggestion - suggestions.append({ - "type": "test", - "icon": "🧪", - "label": "Run a quick test", - "description": "Verify the installation works", - "demo_type": "generic_test", - }) - + suggestions.append( + { + "type": "test", + "icon": "🧪", + "label": "Run a quick test", + "description": "Verify the installation works", + "demo_type": "generic_test", + } + ) + return suggestions[:5] # Limit to 5 suggestions - + def _execute_suggestion( self, suggestion: dict, @@ -2428,24 +2767,27 @@ def _execute_suggestion( ) -> None: """Execute a suggestion.""" suggestion_type = suggestion.get("type") - + if suggestion_type == "retry_interrupted": # Retry the command that was interrupted if self._interrupted_command: console.print() console.print(f"[cyan]🔄 Retrying:[/cyan] {self._interrupted_command}") console.print() - - needs_sudo = "sudo" in self._interrupted_command or self._needs_sudo(self._interrupted_command, []) + + needs_sudo = "sudo" in self._interrupted_command or self._needs_sudo( + self._interrupted_command, [] + ) success, stdout, stderr = self._execute_single_command( - self._interrupted_command, - needs_sudo=needs_sudo + self._interrupted_command, needs_sudo=needs_sudo ) - + if success: - console.print(f"[green]✓ Success[/green]") + console.print("[green]✓ Success[/green]") if stdout: - console.print(f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]") + console.print( + f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]" + ) self._interrupted_command = None # Clear after successful retry else: console.print(f"[red]✗ Failed: {stderr[:200]}[/red]") @@ -2456,13 +2798,15 @@ def _execute_suggestion( console.print() console.print("[cyan]⏭️ Skipping interrupted command and continuing...[/cyan]") self._interrupted_command = None - + if self._remaining_commands: console.print(f"[dim]Remaining commands: {len(self._remaining_commands)}[/dim]") for cmd, purpose, protected in self._remaining_commands: console.print(f"[dim] • {cmd[:60]}{'...' if len(cmd) > 60 else ''}[/dim]") console.print() - console.print("[dim]Use 'continue all' to execute remaining commands, or type a new request.[/dim]") + console.print( + "[dim]Use 'continue all' to execute remaining commands, or type a new request.[/dim]" + ) else: console.print("[dim]No remaining commands to execute.[/dim]") elif suggestion_type == "demo": @@ -2474,15 +2818,14 @@ def _execute_suggestion( console.print() console.print(f"[cyan]Executing:[/cyan] {suggestion['command']}") console.print() - + needs_sudo = "sudo" in suggestion["command"] success, stdout, stderr = self._execute_single_command( - suggestion["command"], - needs_sudo=needs_sudo + suggestion["command"], needs_sudo=needs_sudo ) - + if success: - console.print(f"[green]✓ Success[/green]") + console.print("[green]✓ Success[/green]") if stdout: console.print(f"[dim]{stdout[:500]}{'...' if len(stdout) > 500 else ''}[/dim]") else: @@ -2497,130 +2840,155 @@ def _execute_suggestion( console.print("[dim]Copy and run these commands in your terminal.[/dim]") else: console.print("[yellow]No specific action available for this suggestion.[/yellow]") - + def _show_test_commands(self, run: DoRun, user_query: str) -> None: """Show test commands based on what was installed/configured.""" from rich.panel import Panel - + console.print() console.print("[bold cyan]🧪 Quick Test Commands[/bold cyan]") console.print() - + test_commands = [] query_lower = user_query.lower() - + # Detect what was installed and suggest appropriate tests executed_cmds = [c.command.lower() for c in run.commands if c.status.value == "success"] all_cmds_str = " ".join(executed_cmds) - + # Web server tests if "apache" in all_cmds_str or "apache2" in query_lower: - test_commands.extend([ - ("Check Apache status", "systemctl status apache2"), - ("Test Apache config", "sudo apache2ctl -t"), - ("View in browser", "curl -I http://localhost"), - ]) - + test_commands.extend( + [ + ("Check Apache status", "systemctl status apache2"), + ("Test Apache config", "sudo apache2ctl -t"), + ("View in browser", "curl -I http://localhost"), + ] + ) + if "nginx" in all_cmds_str or "nginx" in query_lower: - test_commands.extend([ - ("Check Nginx status", "systemctl status nginx"), - ("Test Nginx config", "sudo nginx -t"), - ("View in browser", "curl -I http://localhost"), - ]) - + test_commands.extend( + [ + ("Check Nginx status", "systemctl status nginx"), + ("Test Nginx config", "sudo nginx -t"), + ("View in browser", "curl -I http://localhost"), + ] + ) + # Database tests if "mysql" in all_cmds_str or "mysql" in query_lower: - test_commands.extend([ - ("Check MySQL status", "systemctl status mysql"), - ("Test MySQL connection", "sudo mysql -e 'SELECT VERSION();'"), - ]) - + test_commands.extend( + [ + ("Check MySQL status", "systemctl status mysql"), + ("Test MySQL connection", "sudo mysql -e 'SELECT VERSION();'"), + ] + ) + if "postgresql" in all_cmds_str or "postgres" in query_lower: - test_commands.extend([ - ("Check PostgreSQL status", "systemctl status postgresql"), - ("Test PostgreSQL", "sudo -u postgres psql -c 'SELECT version();'"), - ]) - + test_commands.extend( + [ + ("Check PostgreSQL status", "systemctl status postgresql"), + ("Test PostgreSQL", "sudo -u postgres psql -c 'SELECT version();'"), + ] + ) + # Docker tests if "docker" in all_cmds_str or "docker" in query_lower: - test_commands.extend([ - ("Check Docker status", "systemctl status docker"), - ("List containers", "docker ps -a"), - ("Test Docker", "docker run hello-world"), - ]) - + test_commands.extend( + [ + ("Check Docker status", "systemctl status docker"), + ("List containers", "docker ps -a"), + ("Test Docker", "docker run hello-world"), + ] + ) + # PHP tests if "php" in all_cmds_str or "php" in query_lower or "lamp" in query_lower: - test_commands.extend([ - ("Check PHP version", "php -v"), - ("Test PHP info", "php -i | head -20"), - ]) - + test_commands.extend( + [ + ("Check PHP version", "php -v"), + ("Test PHP info", "php -i | head -20"), + ] + ) + # Node.js tests if "node" in all_cmds_str or "nodejs" in query_lower: - test_commands.extend([ - ("Check Node version", "node -v"), - ("Check npm version", "npm -v"), - ]) - + test_commands.extend( + [ + ("Check Node version", "node -v"), + ("Check npm version", "npm -v"), + ] + ) + # Python tests if "python" in all_cmds_str or "python" in query_lower: - test_commands.extend([ - ("Check Python version", "python3 --version"), - ("Check pip version", "pip3 --version"), - ]) - + test_commands.extend( + [ + ("Check Python version", "python3 --version"), + ("Check pip version", "pip3 --version"), + ] + ) + # Generic service tests if not test_commands: # Try to extract service names from commands for cmd_log in run.commands: if "systemctl" in cmd_log.command and cmd_log.status.value == "success": import re - match = re.search(r'systemctl\s+(?:start|enable|restart)\s+(\S+)', cmd_log.command) + + match = re.search( + r"systemctl\s+(?:start|enable|restart)\s+(\S+)", cmd_log.command + ) if match: service = match.group(1) - test_commands.append((f"Check {service} status", f"systemctl status {service}")) - + test_commands.append( + (f"Check {service} status", f"systemctl status {service}") + ) + if not test_commands: test_commands = [ ("Check system status", "systemctl --failed"), ("View recent logs", "journalctl -n 20 --no-pager"), ] - + # Display test commands for i, (desc, cmd) in enumerate(test_commands[:6], 1): # Limit to 6 console.print(f" [bold]{i}.[/bold] {desc}") console.print(f" [green]$ {cmd}[/green]") console.print() - + console.print("[dim]Copy and run these commands to verify your installation.[/dim]") console.print() - + # Offer to run the first test try: response = input("[dim]Run first test? [y/N]: [/dim]").strip().lower() - if response in ['y', 'yes']: + if response in ["y", "yes"]: if test_commands: desc, cmd = test_commands[0] console.print() console.print(f"[cyan]Running:[/cyan] {cmd}") needs_sudo = cmd.strip().startswith("sudo") - success, stdout, stderr = self._execute_single_command(cmd, needs_sudo=needs_sudo) + success, stdout, stderr = self._execute_single_command( + cmd, needs_sudo=needs_sudo + ) if success: console.print(f"[green]✓ {desc} - Passed[/green]") if stdout: - console.print(Panel(stdout[:500], title="[dim]Output[/dim]", border_style="dim")) + console.print( + Panel(stdout[:500], title="[dim]Output[/dim]", border_style="dim") + ) else: console.print(f"[red]✗ {desc} - Failed[/red]") if stderr: console.print(f"[dim red]{stderr[:200]}[/dim red]") except (EOFError, KeyboardInterrupt): pass - + def _show_demo(self, demo_type: str, suggestion: dict) -> None: """Show demo code/commands for a specific type.""" console.print() - + if demo_type == "docker": image = suggestion.get("image", "your-image") console.print("[bold cyan]📝 Docker Usage Examples[/bold cyan]") @@ -2636,7 +3004,7 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: console.print() console.print("[dim]# Run with volume mount:[/dim]") console.print(f"[green]docker run -d -v /host/path:/container/path {image}[/green]") - + elif demo_type == "ollama": console.print("[bold cyan]📝 Ollama API Examples[/bold cyan]") console.print() @@ -2644,22 +3012,22 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: console.print("[green]curl http://localhost:11434/api/tags[/green]") console.print() console.print("[dim]# Generate text:[/dim]") - console.print('''[green]curl http://localhost:11434/api/generate -d '{ + console.print("""[green]curl http://localhost:11434/api/generate -d '{ "model": "llama2", "prompt": "Hello, how are you?" -}'[/green]''') +}'[/green]""") console.print() console.print("[dim]# Python example:[/dim]") - console.print('''[green]import requests + console.print("""[green]import requests -response = requests.post('http://localhost:11434/api/generate', +response = requests.post('http://localhost:11434/api/generate', json={ 'model': 'llama2', 'prompt': 'Explain quantum computing in simple terms', 'stream': False }) -print(response.json()['response'])[/green]''') - +print(response.json()['response'])[/green]""") + elif demo_type == "nginx_config": console.print("[bold cyan]📝 Nginx Configuration Example[/bold cyan]") console.print() @@ -2667,10 +3035,10 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: console.print("[green]sudo nano /etc/nginx/sites-available/mysite[/green]") console.print() console.print("[dim]# Example config:[/dim]") - console.print('''[green]server { + console.print("""[green]server { listen 80; server_name example.com; - + location / { proxy_pass http://localhost:3000; proxy_http_version 1.1; @@ -2678,12 +3046,14 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: proxy_set_header Connection 'upgrade'; proxy_set_header Host $host; } -}[/green]''') +}[/green]""") console.print() console.print("[dim]# Enable the site:[/dim]") - console.print("[green]sudo ln -s /etc/nginx/sites-available/mysite /etc/nginx/sites-enabled/[/green]") + console.print( + "[green]sudo ln -s /etc/nginx/sites-available/mysite /etc/nginx/sites-enabled/[/green]" + ) console.print("[green]sudo nginx -t && sudo systemctl reload nginx[/green]") - + elif demo_type == "postgres_setup": console.print("[bold cyan]📝 PostgreSQL Setup Example[/bold cyan]") console.print() @@ -2695,7 +3065,7 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: console.print("[green]psql -U myuser -d mydb[/green]") console.print() console.print("[dim]# Python connection example:[/dim]") - console.print('''[green]import psycopg2 + console.print("""[green]import psycopg2 conn = psycopg2.connect( dbname="mydb", @@ -2705,13 +3075,13 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: ) cursor = conn.cursor() cursor.execute("SELECT version();") -print(cursor.fetchone())[/green]''') - +print(cursor.fetchone())[/green]""") + elif demo_type == "nodejs": console.print("[bold cyan]📝 Node.js Example[/bold cyan]") console.print() console.print("[dim]# Create a simple Express server:[/dim]") - console.print('''[green]// server.js + console.print("""[green]// server.js const express = require('express'); const app = express(); @@ -2721,11 +3091,11 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: app.listen(3000, () => { console.log('Server running on http://localhost:3000'); -});[/green]''') +});[/green]""") console.print() console.print("[dim]# Run it:[/dim]") console.print("[green]npm init -y && npm install express && node server.js[/green]") - + elif demo_type == "python": console.print("[bold cyan]📝 Python Example[/bold cyan]") console.print() @@ -2733,7 +3103,7 @@ def _show_demo(self, demo_type: str, suggestion: dict) -> None: console.print("[green]python3 -m http.server 8000[/green]") console.print() console.print("[dim]# Flask web app:[/dim]") - console.print('''[green]from flask import Flask + console.print("""[green]from flask import Flask app = Flask(__name__) @app.route('/') @@ -2741,13 +3111,15 @@ def hello(): return {'message': 'Hello from Python!'} if __name__ == '__main__': - app.run(debug=True)[/green]''') - + app.run(debug=True)[/green]""") + else: - console.print("[dim]No specific demo available. Check the documentation for usage examples.[/dim]") - + console.print( + "[dim]No specific demo available. Check the documentation for usage examples.[/dim]" + ) + console.print() - + def _execute_task_node( self, task: TaskNode, @@ -2758,13 +3130,17 @@ def _execute_task_node( """Execute a single task node with auto-repair capabilities.""" indent = " " * depth task_num = f"[{task.task_type.value.upper()}]" - + # Check if task was marked as skipped (e.g., using existing resource) if task.status == CommandStatus.SKIPPED: # Claude-like skipped output - console.print(f"{indent}[dim]○[/dim] [cyan]{task.command[:65]}{'...' if len(task.command) > 65 else ''}[/cyan]") - console.print(f"{indent} [dim italic]↳ Skipped: {task.output or 'Using existing resource'}[/dim italic]") - + console.print( + f"{indent}[dim]○[/dim] [cyan]{task.command[:65]}{'...' if len(task.command) > 65 else ''}[/cyan]" + ) + console.print( + f"{indent} [dim italic]↳ Skipped: {task.output or 'Using existing resource'}[/dim italic]" + ) + # Log the skipped command cmd_log = CommandLog( command=task.command, @@ -2775,33 +3151,37 @@ def _execute_task_node( ) run.commands.append(cmd_log) return - + # Claude-like command output - console.print(f"{indent}[bold cyan]●[/bold cyan] [bold]{task.command[:65]}{'...' if len(task.command) > 65 else ''}[/bold]") + console.print( + f"{indent}[bold cyan]●[/bold cyan] [bold]{task.command[:65]}{'...' if len(task.command) > 65 else ''}[/bold]" + ) console.print(f"{indent} [dim italic]↳ {task.purpose}[/dim italic]") - + protected_paths = [] user_query = run.user_query if run else "" for cmd, _, protected in original_commands: if cmd == task.command: protected_paths = protected break - - file_check = self._file_analyzer.check_file_exists_and_usefulness(task.command, task.purpose, user_query) - + + file_check = self._file_analyzer.check_file_exists_and_usefulness( + task.command, task.purpose, user_query + ) + if file_check["recommendations"]: self._file_analyzer.apply_file_recommendations(file_check["recommendations"]) - + task.status = CommandStatus.RUNNING start_time = time.time() - + needs_sudo = self._needs_sudo(task.command, protected_paths) success, stdout, stderr = self._execute_single_command(task.command, needs_sudo) - + task.output = stdout task.error = stderr task.duration_seconds = time.time() - start_time - + # Check if command was interrupted by Ctrl+Z/Ctrl+C if self._interrupted: task.status = CommandStatus.INTERRUPTED @@ -2814,10 +3194,12 @@ def _execute_task_node( error="Command interrupted by user (Ctrl+Z/Ctrl+C)", duration_seconds=task.duration_seconds, ) - console.print(f"{indent} [yellow]⚠[/yellow] [dim]Interrupted ({task.duration_seconds:.2f}s)[/dim]") + console.print( + f"{indent} [yellow]⚠[/yellow] [dim]Interrupted ({task.duration_seconds:.2f}s)[/dim]" + ) run.commands.append(cmd_log) return - + cmd_log = CommandLog( command=task.command, purpose=task.purpose, @@ -2827,75 +3209,91 @@ def _execute_task_node( error=stderr, duration_seconds=task.duration_seconds, ) - + if success: task.status = CommandStatus.SUCCESS # Claude-like success output - console.print(f"{indent} [green]✓[/green] [dim]Done ({task.duration_seconds:.2f}s)[/dim]") + console.print( + f"{indent} [green]✓[/green] [dim]Done ({task.duration_seconds:.2f}s)[/dim]" + ) if stdout: - output_preview = stdout[:100] + ('...' if len(stdout) > 100 else '') + output_preview = stdout[:100] + ("..." if len(stdout) > 100 else "") console.print(f"{indent} [dim]{output_preview}[/dim]") console.print() run.commands.append(cmd_log) return - + task.status = CommandStatus.NEEDS_REPAIR diagnosis = self._diagnoser.diagnose_error(task.command, stderr) task.failure_reason = diagnosis.get("description", "Unknown error") - + # Claude-like error output console.print(f"{indent} [red]✗[/red] [bold red]{diagnosis['error_type']}[/bold red]") - console.print(f"{indent} [dim]{diagnosis['description'][:80]}{'...' if len(diagnosis['description']) > 80 else ''}[/dim]") - + console.print( + f"{indent} [dim]{diagnosis['description'][:80]}{'...' if len(diagnosis['description']) > 80 else ''}[/dim]" + ) + # Check if this is a login/credential required error if diagnosis.get("category") == "login_required": console.print(f"{indent}[cyan] 🔐 Authentication required[/cyan]") - + login_success, login_msg = self._login_handler.handle_login(task.command, stderr) - + if login_success: console.print(f"{indent}[green] ✓ {login_msg}[/green]") console.print(f"{indent}[cyan] Retrying command...[/cyan]") - + # Retry the command needs_sudo = self._needs_sudo(task.command, []) - success, new_stdout, new_stderr = self._execute_single_command(task.command, needs_sudo) - + success, new_stdout, new_stderr = self._execute_single_command( + task.command, needs_sudo + ) + if success: task.status = CommandStatus.SUCCESS task.reasoning = "Succeeded after authentication" cmd_log.status = CommandStatus.SUCCESS cmd_log.stdout = new_stdout[:500] if new_stdout else "" - console.print(f"{indent}[green] ✓ Command succeeded after authentication![/green]") + console.print( + f"{indent}[green] ✓ Command succeeded after authentication![/green]" + ) run.commands.append(cmd_log) return else: # Still failed after login stderr = new_stderr diagnosis = self._diagnoser.diagnose_error(task.command, stderr) - console.print(f"{indent}[yellow] Command still failed: {stderr[:100]}[/yellow]") + console.print( + f"{indent}[yellow] Command still failed: {stderr[:100]}[/yellow]" + ) else: console.print(f"{indent}[yellow] {login_msg}[/yellow]") - + if diagnosis.get("extracted_path"): console.print(f"{indent}[dim] Path: {diagnosis['extracted_path']}[/dim]") - + # Handle timeout errors specially - don't blindly retry if diagnosis.get("category") == "timeout" or "timed out" in stderr.lower(): console.print(f"{indent}[yellow] ⏱️ This operation timed out[/yellow]") - + # Check if it's a docker pull - those might still be running if "docker pull" in task.command.lower(): - console.print(f"{indent}[cyan] ℹ️ Docker pull may still be downloading in background[/cyan]") - console.print(f"{indent}[dim] Check with: docker images | grep [/dim]") - console.print(f"{indent}[dim] Or retry with: docker pull --timeout=0 [/dim]") + console.print( + f"{indent}[cyan] ℹ️ Docker pull may still be downloading in background[/cyan]" + ) + console.print( + f"{indent}[dim] Check with: docker images | grep [/dim]" + ) + console.print( + f"{indent}[dim] Or retry with: docker pull --timeout=0 [/dim]" + ) elif "apt" in task.command.lower(): console.print(f"{indent}[cyan] ℹ️ Package installation timed out[/cyan]") console.print(f"{indent}[dim] Check apt status: sudo dpkg --configure -a[/dim]") console.print(f"{indent}[dim] Then retry the command[/dim]") else: console.print(f"{indent}[cyan] ℹ️ You can retry this command manually[/cyan]") - + # Mark as needing manual intervention, not auto-fix task.status = CommandStatus.NEEDS_REPAIR task.failure_reason = "Operation timed out - may need manual retry" @@ -2903,19 +3301,22 @@ def _execute_task_node( cmd_log.error = stderr run.commands.append(cmd_log) return - + if task.repair_attempts < task.max_repair_attempts: import sys + task.repair_attempts += 1 - console.print(f"{indent}[cyan] 🔧 Auto-fix attempt {task.repair_attempts}/{task.max_repair_attempts}[/cyan]") - + console.print( + f"{indent}[cyan] 🔧 Auto-fix attempt {task.repair_attempts}/{task.max_repair_attempts}[/cyan]" + ) + # Flush output before auto-fix to ensure clean display after sudo prompts sys.stdout.flush() - + fixed, fix_message, fix_commands = self._auto_fixer.auto_fix_error( task.command, stderr, diagnosis, max_attempts=3 ) - + for fix_cmd in fix_commands: repair_task = self._task_tree.add_repair_task( parent=task, @@ -2924,7 +3325,7 @@ def _execute_task_node( reasoning=fix_message, ) repair_task.status = CommandStatus.SUCCESS - + if fixed: task.status = CommandStatus.SUCCESS task.reasoning = f"Auto-fixed: {fix_message}" @@ -2934,12 +3335,12 @@ def _execute_task_node( return else: console.print(f"{indent}[yellow] Auto-fix incomplete: {fix_message}[/yellow]") - + task.status = CommandStatus.FAILED task.reasoning = self._generate_task_failure_reasoning(task, diagnosis) - + error_type = diagnosis.get("error_type", "unknown") - + # Check if this is a "soft failure" that shouldn't warrant manual intervention # These are cases where a tool/command simply isn't available and that's OK soft_failure_types = { @@ -2948,7 +3349,7 @@ def _execute_task_node( "no_such_command", "unable_to_locate_package", # Package doesn't exist in repos } - + # Also check for patterns in the error message that indicate optional tools optional_tool_patterns = [ "sensors", # lm-sensors - optional hardware monitoring @@ -2961,36 +3362,42 @@ def _execute_task_node( "iotop", # optional I/O monitor "iftop", # optional network monitor ] - + cmd_base = task.command.split()[0] if task.command else "" is_optional_tool = any(pattern in cmd_base.lower() for pattern in optional_tool_patterns) is_soft_failure = error_type in soft_failure_types and is_optional_tool - + if is_soft_failure: # Mark as skipped instead of failed - this is an optional tool that's not available task.status = CommandStatus.SKIPPED task.reasoning = f"Tool '{cmd_base}' not available (optional)" - console.print(f"{indent}[yellow] ○ Skipped: {cmd_base} not available (optional tool)[/yellow]") - console.print(f"{indent}[dim] This tool provides additional info but isn't required[/dim]") + console.print( + f"{indent}[yellow] ○ Skipped: {cmd_base} not available (optional tool)[/yellow]" + ) + console.print( + f"{indent}[dim] This tool provides additional info but isn't required[/dim]" + ) cmd_log.status = CommandStatus.SKIPPED else: console.print(f"{indent}[red] ✗ Failed: {diagnosis['description'][:100]}[/red]") console.print(f"{indent}[dim] Reasoning: {task.reasoning}[/dim]") - + # Only offer manual intervention for errors that could actually be fixed manually # Don't offer for missing commands/packages that auto-fix couldn't resolve - should_offer_manual = ( - diagnosis.get("fix_commands") or stderr - ) and error_type not in {"command_not_found", "not_found", "unable_to_locate_package"} - + should_offer_manual = (diagnosis.get("fix_commands") or stderr) and error_type not in { + "command_not_found", + "not_found", + "unable_to_locate_package", + } + if should_offer_manual: console.print(f"\n{indent}[yellow]💡 Manual intervention available[/yellow]") - + suggested_cmds = diagnosis.get("fix_commands", [f"sudo {task.command}"]) console.print(f"{indent}[dim] Suggested commands:[/dim]") for cmd in suggested_cmds[:3]: console.print(f"{indent}[cyan] $ {cmd}[/cyan]") - + if Confirm.ask(f"{indent}Run manually while Cortex monitors?", default=False): manual_success = self._supervise_manual_intervention_for_task( task, suggested_cmds, run @@ -2999,10 +3406,10 @@ def _execute_task_node( task.status = CommandStatus.SUCCESS task.reasoning = "Completed via monitored manual intervention" cmd_log.status = CommandStatus.SUCCESS - + cmd_log.status = task.status run.commands.append(cmd_log) - + def _supervise_manual_intervention_for_task( self, task: TaskNode, @@ -3012,7 +3419,7 @@ def _supervise_manual_intervention_for_task( """Supervise manual intervention for a specific task with terminal monitoring.""" from rich.panel import Panel from rich.prompt import Prompt - + # If no suggested commands provided, use the task command with sudo if not suggested_commands: if task and task.command: @@ -3021,49 +3428,52 @@ def _supervise_manual_intervention_for_task( if not cmd.strip().startswith("sudo"): cmd = f"sudo {cmd}" suggested_commands = [cmd] - + # Claude-like manual intervention UI console.print() console.print("[bold blue]━━━[/bold blue] [bold]Manual Intervention[/bold]") console.print() - + # Show the task context if task and task.purpose: console.print(f"[bold]Task:[/bold] {task.purpose}") console.print() - + console.print("[dim]Run these commands in another terminal:[/dim]") console.print() - + # Show commands in a clear box if suggested_commands: from rich.panel import Panel + cmd_text = "\n".join(f" {i}. {cmd}" for i, cmd in enumerate(suggested_commands, 1)) - console.print(Panel( - cmd_text, - title="[bold cyan]📋 Commands to Run[/bold cyan]", - border_style="cyan", - padding=(0, 1), - )) + console.print( + Panel( + cmd_text, + title="[bold cyan]📋 Commands to Run[/bold cyan]", + border_style="cyan", + padding=(0, 1), + ) + ) else: console.print(" [yellow]⚠ No specific commands - check the task above[/yellow]") - + console.print() - + # Track expected commands for matching self._expected_manual_commands = suggested_commands.copy() if suggested_commands else [] self._completed_manual_commands: list[str] = [] - + # Start terminal monitoring with detailed output self._terminal_monitor = TerminalMonitor( notification_callback=lambda title, msg: self._send_notification(title, msg) ) self._terminal_monitor.start(expected_commands=suggested_commands) - + console.print() console.print("[dim]Type 'done' when finished, 'help' for tips, or 'cancel' to abort[/dim]") console.print() - + try: while True: try: @@ -3071,20 +3481,29 @@ def _supervise_manual_intervention_for_task( except (EOFError, KeyboardInterrupt): console.print("\n[yellow]Manual intervention cancelled[/yellow]") return False - + # Handle natural language responses - if user_input in ["done", "finished", "complete", "completed", "success", "worked", "yes", "y"]: + if user_input in [ + "done", + "finished", + "complete", + "completed", + "success", + "worked", + "yes", + "y", + ]: # Show observed commands and check for matches observed = self._terminal_monitor.get_observed_commands() matched_commands = [] unmatched_commands = [] - + if observed: console.print(f"\n[cyan]📊 Observed {len(observed)} command(s):[/cyan]") for obs in observed[-5:]: - obs_cmd = obs['command'] + obs_cmd = obs["command"] is_matched = False - + # Check if this matches any expected command for expected in self._expected_manual_commands: if self._commands_match(obs_cmd, expected): @@ -3093,44 +3512,50 @@ def _supervise_manual_intervention_for_task( console.print(f" • {obs_cmd[:60]}... [green]✓[/green]") is_matched = True break - + if not is_matched: unmatched_commands.append(obs_cmd) console.print(f" • {obs_cmd[:60]}... [yellow]?[/yellow]") - + # Check if expected commands were actually run if self._expected_manual_commands and not matched_commands: console.print() - console.print("[yellow]⚠ None of the expected commands were detected.[/yellow]") + console.print( + "[yellow]⚠ None of the expected commands were detected.[/yellow]" + ) console.print("[dim]Expected:[/dim]") for cmd in self._expected_manual_commands[:3]: console.print(f" [cyan]$ {cmd}[/cyan]") console.print() - + # Send notification with correct commands self._send_notification( "⚠️ Cortex: Expected Commands", - f"Run: {self._expected_manual_commands[0][:50]}..." + f"Run: {self._expected_manual_commands[0][:50]}...", + ) + + console.print( + "[dim]Type 'done' again to confirm, or run the expected commands first.[/dim]" ) - - console.print("[dim]Type 'done' again to confirm, or run the expected commands first.[/dim]") continue # Don't mark as success yet - let user try again - + # Check if any observed commands had errors (check last few) has_errors = False if observed: for obs in observed[-3:]: - if obs.get('has_error') or obs.get('status') == 'failed': + if obs.get("has_error") or obs.get("status") == "failed": has_errors = True - console.print("[yellow]⚠ Some commands may have failed. Please verify.[/yellow]") + console.print( + "[yellow]⚠ Some commands may have failed. Please verify.[/yellow]" + ) break - - if has_errors and not user_input in ["yes", "y", "worked", "success"]: + + if has_errors and user_input not in ["yes", "y", "worked", "success"]: console.print("[dim]Type 'success' to confirm it worked anyway.[/dim]") continue - + console.print("[green]✓ Manual step completed successfully[/green]") - + if self._task_tree: verify_task = self._task_tree.add_verify_task( parent=task, @@ -3138,13 +3563,13 @@ def _supervise_manual_intervention_for_task( purpose="User confirmed manual intervention success", ) verify_task.status = CommandStatus.SUCCESS - + # Mark matched commands as completed so they're not re-executed if matched_commands: task.manual_commands_completed = matched_commands - + return True - + elif user_input in ["help", "?", "hint", "tips"]: console.print() console.print("[bold]💡 Manual Intervention Tips:[/bold]") @@ -3154,16 +3579,16 @@ def _supervise_manual_intervention_for_task( console.print(" • Check services: [cyan]systemctl status [/cyan]") console.print(" • View logs: [cyan]journalctl -u -n 50[/cyan]") console.print() - + elif user_input in ["cancel", "abort", "quit", "exit", "no", "n"]: console.print("[yellow]Manual intervention cancelled[/yellow]") return False - + elif user_input in ["failed", "error", "problem", "issue"]: console.print() error_desc = Prompt.ask("[yellow]What error did you encounter?[/yellow]") error_lower = error_desc.lower() - + # Provide contextual help based on error description if "permission" in error_lower or "denied" in error_lower: console.print("\n[cyan]💡 Try running with sudo:[/cyan]") @@ -3183,16 +3608,18 @@ def _supervise_manual_intervention_for_task( console.print(" • Check the error message carefully") console.print(" • Try running with sudo") console.print(" • Check if all required packages are installed") - + console.print() console.print("[dim]Type 'done' when fixed, or 'cancel' to abort[/dim]") - + else: # Any other input - show status observed = self._terminal_monitor.get_observed_commands() - console.print(f"[dim]Still monitoring... ({len(observed)} commands observed)[/dim]") + console.print( + f"[dim]Still monitoring... ({len(observed)} commands observed)[/dim]" + ) console.print("[dim]Type 'done' when finished, 'help' for tips[/dim]") - + except KeyboardInterrupt: console.print("\n[yellow]Manual intervention cancelled[/yellow]") return False @@ -3201,20 +3628,22 @@ def _supervise_manual_intervention_for_task( observed = self._terminal_monitor.stop() # Log observed commands to run for obs in observed: - run.commands.append(CommandLog( - command=obs["command"], - purpose=f"Manual execution ({obs['source']})", - timestamp=obs["timestamp"], - status=CommandStatus.SUCCESS, - )) + run.commands.append( + CommandLog( + command=obs["command"], + purpose=f"Manual execution ({obs['source']})", + timestamp=obs["timestamp"], + status=CommandStatus.SUCCESS, + ) + ) self._terminal_monitor = None - + # Clear tracking self._expected_manual_commands = [] - + def _commands_match(self, observed: str, expected: str) -> bool: """Check if an observed command matches an expected command. - + Handles variations like: - With/without sudo - Different whitespace @@ -3223,20 +3652,20 @@ def _commands_match(self, observed: str, expected: str) -> bool: # Normalize commands obs_normalized = observed.strip().lower() exp_normalized = expected.strip().lower() - + # Remove sudo prefix for comparison if obs_normalized.startswith("sudo "): obs_normalized = obs_normalized[5:].strip() if exp_normalized.startswith("sudo "): exp_normalized = exp_normalized[5:].strip() - + # Exact match if obs_normalized == exp_normalized: return True - + obs_parts = obs_normalized.split() exp_parts = exp_normalized.split() - + # Check for service management commands first (need full match including service name) service_commands = ["systemctl", "service"] for svc_cmd in service_commands: @@ -3246,42 +3675,61 @@ def _commands_match(self, observed: str, expected: str) -> bool: exp_action = None obs_service = None exp_service = None - + for i, part in enumerate(obs_parts): - if part in ["restart", "start", "stop", "reload", "status", "enable", "disable"]: + if part in [ + "restart", + "start", + "stop", + "reload", + "status", + "enable", + "disable", + ]: obs_action = part # Service name is usually the next word if i + 1 < len(obs_parts): obs_service = obs_parts[i + 1] break - + for i, part in enumerate(exp_parts): - if part in ["restart", "start", "stop", "reload", "status", "enable", "disable"]: + if part in [ + "restart", + "start", + "stop", + "reload", + "status", + "enable", + "disable", + ]: exp_action = part if i + 1 < len(exp_parts): exp_service = exp_parts[i + 1] break - + if obs_action and exp_action and obs_service and exp_service: if obs_action == exp_action and obs_service == exp_service: return True else: return False # Different action or service - + # For non-service commands, check if first 2-3 words match if len(obs_parts) >= 2 and len(exp_parts) >= 2: # Skip if either is a service command (handled above) - if obs_parts[0] not in ["systemctl", "service"] and exp_parts[0] not in ["systemctl", "service"]: + if obs_parts[0] not in ["systemctl", "service"] and exp_parts[0] not in [ + "systemctl", + "service", + ]: # Compare first two words (command and subcommand) if obs_parts[:2] == exp_parts[:2]: return True - + return False - + def get_completed_manual_commands(self) -> list[str]: """Get list of commands completed during manual intervention.""" - return getattr(self, '_completed_manual_commands', []) - + return getattr(self, "_completed_manual_commands", []) + def _generate_task_failure_reasoning( self, task: TaskNode, @@ -3289,15 +3737,15 @@ def _generate_task_failure_reasoning( ) -> str: """Generate detailed reasoning for why a task failed.""" parts = [] - + parts.append(f"Error: {diagnosis.get('error_type', 'unknown')}") - + if task.repair_attempts > 0: parts.append(f"Repair attempts: {task.repair_attempts} (all failed)") - + if diagnosis.get("extracted_path"): parts.append(f"Problem path: {diagnosis['extracted_path']}") - + error_type = diagnosis.get("error_type", "") if "permission" in error_type.lower(): parts.append("Root cause: Insufficient file system permissions") @@ -3305,38 +3753,38 @@ def _generate_task_failure_reasoning( parts.append("Root cause: Required file or directory does not exist") elif "service" in error_type.lower(): parts.append("Root cause: System service issue") - + if diagnosis.get("fix_commands"): parts.append(f"Suggested fix: {diagnosis['fix_commands'][0][:50]}...") - + return " | ".join(parts) - + def _generate_tree_summary(self, run: DoRun) -> str: """Generate a summary from the task tree execution.""" if not self._task_tree: return self._generate_summary(run) - + summary = self._task_tree.get_summary() - + total = sum(summary.values()) success = summary.get("success", 0) failed = summary.get("failed", 0) repaired = summary.get("needs_repair", 0) - + parts = [ f"Total tasks: {total}", f"Successful: {success}", f"Failed: {failed}", ] - + if repaired > 0: parts.append(f"Repair attempted: {repaired}") - + if self._permission_requests_count > 1: parts.append(f"Permission requests: {self._permission_requests_count}") - + return " | ".join(parts) - + def provide_manual_instructions( self, commands: list[tuple[str, str, list[str]]], @@ -3352,107 +3800,117 @@ def provide_manual_instructions( session_id=self.current_session_id or "", ) self.current_run = run - + console.print() - console.print(Panel( - "[bold cyan]📋 Manual Execution Instructions[/bold cyan]", - expand=False, - )) + console.print( + Panel( + "[bold cyan]📋 Manual Execution Instructions[/bold cyan]", + expand=False, + ) + ) console.print() - + cwd = os.getcwd() - console.print(f"[bold]1. Open a new terminal and navigate to:[/bold]") + console.print("[bold]1. Open a new terminal and navigate to:[/bold]") console.print(f" [cyan]cd {cwd}[/cyan]") console.print() - - console.print(f"[bold]2. Execute the following commands in order:[/bold]") + + console.print("[bold]2. Execute the following commands in order:[/bold]") console.print() - + for i, (cmd, purpose, protected) in enumerate(commands, 1): console.print(f" [bold yellow]Step {i}:[/bold yellow] {purpose}") needs_sudo = self._needs_sudo(cmd, protected) - + if protected: console.print(f" [red]⚠️ Accesses protected paths: {', '.join(protected)}[/red]") - + if needs_sudo and not cmd.strip().startswith("sudo"): console.print(f" [cyan]sudo {cmd}[/cyan]") else: console.print(f" [cyan]{cmd}[/cyan]") console.print() - - run.commands.append(CommandLog( - command=cmd, - purpose=purpose, - timestamp=datetime.datetime.now().isoformat(), - status=CommandStatus.PENDING, - )) - + + run.commands.append( + CommandLog( + command=cmd, + purpose=purpose, + timestamp=datetime.datetime.now().isoformat(), + status=CommandStatus.PENDING, + ) + ) + console.print("[bold]3. Once done, return to this terminal and press Enter.[/bold]") console.print() - + monitor = TerminalMonitor( notification_callback=lambda title, msg: self._send_notification(title, msg, "normal") ) - + expected_commands = [cmd for cmd, _, _ in commands] monitor.start_monitoring(expected_commands) - + console.print("[dim]🔍 Monitoring terminal activity... (press Enter when done)[/dim]") - + try: input() except (EOFError, KeyboardInterrupt): pass - + observed = monitor.stop_monitoring() - + # Add observed commands to the run for obs in observed: - run.commands.append(CommandLog( - command=obs["command"], - purpose="User-executed command", - timestamp=obs["timestamp"], - status=CommandStatus.SUCCESS, - )) - + run.commands.append( + CommandLog( + command=obs["command"], + purpose="User-executed command", + timestamp=obs["timestamp"], + status=CommandStatus.SUCCESS, + ) + ) + run.completed_at = datetime.datetime.now().isoformat() run.summary = self._generate_summary(run) - + self.db.save_run(run) - + # Generate LLM summary/answer llm_answer = self._generate_llm_answer(run, user_query) - + # Print condensed execution summary with answer self._print_execution_summary(run, answer=llm_answer) - + console.print() console.print(f"[dim]Run ID: {run.run_id}[/dim]") - + return run - + def _generate_summary(self, run: DoRun) -> str: """Generate a summary of what was done in the run.""" successful = sum(1 for c in run.commands if c.status == CommandStatus.SUCCESS) failed = sum(1 for c in run.commands if c.status == CommandStatus.FAILED) - + mode_str = "automated" if run.mode == RunMode.CORTEX_EXEC else "manual" - + if failed == 0: return f"Successfully executed {successful} commands ({mode_str}) for: {run.user_query[:50]}" else: return f"Executed {successful} commands with {failed} failures ({mode_str}) for: {run.user_query[:50]}" - + def _generate_llm_answer(self, run: DoRun, user_query: str) -> str | None: """Generate an LLM-based answer/summary after command execution.""" if not self.llm_callback: return None - + # Collect command outputs command_results = [] for cmd in run.commands: - status = "✓" if cmd.status == CommandStatus.SUCCESS else "✗" if cmd.status == CommandStatus.FAILED else "○" + status = ( + "✓" + if cmd.status == CommandStatus.SUCCESS + else "✗" if cmd.status == CommandStatus.FAILED else "○" + ) result = { "command": cmd.command, "purpose": cmd.purpose, @@ -3462,7 +3920,7 @@ def _generate_llm_answer(self, run: DoRun, user_query: str) -> str | None: if cmd.error: result["error"] = cmd.error[:200] command_results.append(result) - + # Build prompt for LLM prompt = f"""The user asked: "{user_query}" @@ -3471,14 +3929,14 @@ def _generate_llm_answer(self, run: DoRun, user_query: str) -> str | None: for i, result in enumerate(command_results, 1): prompt += f"\n{i}. [{result['status']}] {result['command']}" prompt += f"\n Purpose: {result['purpose']}" - if result.get('output'): + if result.get("output"): # Only include meaningful output, not empty or whitespace-only - output_preview = result['output'].strip()[:200] + output_preview = result["output"].strip()[:200] if output_preview: prompt += f"\n Output: {output_preview}" - if result.get('error'): + if result.get("error"): prompt += f"\n Error: {result['error']}" - + prompt += """ Based on the above execution results, provide a helpful summary/answer for the user. @@ -3494,57 +3952,59 @@ def _generate_llm_answer(self, run: DoRun, user_query: str) -> str | None: try: from rich.console import Console from rich.status import Status - + console = Console() with Status("[cyan]Generating summary...[/cyan]", spinner="dots"): result = self.llm_callback(prompt) - + if result: # Handle different response formats if isinstance(result, dict): # Extract answer from various possible keys - answer = result.get("answer") or result.get("response") or result.get("text") or "" + answer = ( + result.get("answer") or result.get("response") or result.get("text") or "" + ) if not answer and "reasoning" in result: answer = result.get("reasoning", "") elif isinstance(result, str): answer = result else: return None - + # Clean the answer answer = answer.strip() - + # Filter out JSON-like responses - if answer.startswith('{') or answer.startswith('['): + if answer.startswith("{") or answer.startswith("["): return None - + return answer if answer else None except Exception as e: # Silently fail - summary is optional import logging + logging.debug(f"LLM summary generation failed: {e}") return None - + return None - + def _print_execution_summary(self, run: DoRun, answer: str | None = None): """Print a condensed execution summary with improved visual design.""" + from rich import box from rich.panel import Panel - from rich.table import Table from rich.text import Text - from rich import box - + # Count statuses successful = [c for c in run.commands if c.status == CommandStatus.SUCCESS] failed = [c for c in run.commands if c.status == CommandStatus.FAILED] skipped = [c for c in run.commands if c.status == CommandStatus.SKIPPED] interrupted = [c for c in run.commands if c.status == CommandStatus.INTERRUPTED] - + total = len(run.commands) - + # Build status header console.print() - + # Create a status bar if total > 0: status_text = Text() @@ -3557,20 +4017,22 @@ def _print_execution_summary(self, run: DoRun, answer: str | None = None): status_text.append(f"○ {len(skipped)} ", style="bold yellow") if interrupted: status_text.append(f"⚠ {len(interrupted)} ", style="bold yellow") - + # Calculate success rate success_rate = (len(successful) / total * 100) if total > 0 else 0 status_text.append(f" ({success_rate:.0f}% success)", style="dim") - - console.print(Panel( - status_text, - title="[bold white on blue] 📊 Execution Status [/bold white on blue]", - title_align="left", - border_style="blue", - padding=(0, 1), - expand=False, - )) - + + console.print( + Panel( + status_text, + title="[bold white on blue] 📊 Execution Status [/bold white on blue]", + title_align="left", + border_style="blue", + padding=(0, 1), + expand=False, + ) + ) + # Create a table for detailed results if successful or failed or skipped: result_table = Table( @@ -3582,71 +4044,84 @@ def _print_execution_summary(self, run: DoRun, answer: str | None = None): ) result_table.add_column("Status", width=8, justify="center") result_table.add_column("Action", style="white") - + # Add successful commands for cmd in successful[:4]: purpose = cmd.purpose[:60] + "..." if len(cmd.purpose) > 60 else cmd.purpose result_table.add_row("[green]✓ Done[/green]", purpose) if len(successful) > 4: - result_table.add_row("[dim]...[/dim]", f"[dim]and {len(successful) - 4} more completed[/dim]") - + result_table.add_row( + "[dim]...[/dim]", f"[dim]and {len(successful) - 4} more completed[/dim]" + ) + # Add failed commands for cmd in failed[:2]: - error_short = (cmd.error[:40] + "...") if cmd.error and len(cmd.error) > 40 else (cmd.error or "Unknown") - result_table.add_row("[red]✗ Failed[/red]", f"{cmd.command[:30]}... - {error_short}") - + error_short = ( + (cmd.error[:40] + "...") + if cmd.error and len(cmd.error) > 40 + else (cmd.error or "Unknown") + ) + result_table.add_row( + "[red]✗ Failed[/red]", f"{cmd.command[:30]}... - {error_short}" + ) + # Add skipped commands for cmd in skipped[:2]: purpose = cmd.purpose[:50] + "..." if len(cmd.purpose) > 50 else cmd.purpose result_table.add_row("[yellow]○ Skip[/yellow]", purpose) - - console.print(Panel( - result_table, - title="[bold] 📋 Details [/bold]", - title_align="left", - border_style="dim", - padding=(0, 0), - )) - + + console.print( + Panel( + result_table, + title="[bold] 📋 Details [/bold]", + title_align="left", + border_style="dim", + padding=(0, 0), + ) + ) + # Answer section (for questions) - make it prominent if answer: # Clean the answer - remove any JSON-like content that might have leaked clean_answer = answer - if clean_answer.startswith('{') or '{"' in clean_answer[:50]: + if clean_answer.startswith("{") or '{"' in clean_answer[:50]: # Looks like JSON leaked through, try to extract readable parts import re + # Try to extract just the answer field if present answer_match = re.search(r'"answer"\s*:\s*"([^"]*)"', clean_answer) if answer_match: clean_answer = answer_match.group(1) - + # Truncate very long answers if len(clean_answer) > 500: display_answer = clean_answer[:500] + "\n\n[dim]... (truncated)[/dim]" else: display_answer = clean_answer - - console.print(Panel( - display_answer, - title="[bold white on green] 💡 Answer [/bold white on green]", - title_align="left", - border_style="green", - padding=(1, 2), - )) - + + console.print( + Panel( + display_answer, + title="[bold white on green] 💡 Answer [/bold white on green]", + title_align="left", + border_style="green", + padding=(1, 2), + ) + ) + def get_run_history(self, limit: int = 20) -> list[DoRun]: """Get recent do run history.""" return self.db.get_recent_runs(limit) - + def get_run(self, run_id: str) -> DoRun | None: """Get a specific run by ID.""" return self.db.get_run(run_id) - + # Expose diagnosis and auto-fix methods for external use def _diagnose_error(self, cmd: str, stderr: str) -> dict[str, Any]: """Diagnose a command failure.""" return self._diagnoser.diagnose_error(cmd, stderr) - + def _auto_fix_error( self, cmd: str, @@ -3656,11 +4131,11 @@ def _auto_fix_error( ) -> tuple[bool, str, list[str]]: """Auto-fix an error.""" return self._auto_fixer.auto_fix_error(cmd, stderr, diagnosis, max_attempts) - + def _check_for_conflicts(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for conflicts.""" return self._conflict_detector.check_for_conflicts(cmd, purpose) - + def _run_verification_tests( self, commands_executed: list[CommandLog], @@ -3668,7 +4143,7 @@ def _run_verification_tests( ) -> tuple[bool, list[dict[str, Any]]]: """Run verification tests.""" return self._verification_runner.run_verification_tests(commands_executed, user_query) - + def _check_file_exists_and_usefulness( self, cmd: str, @@ -3677,7 +4152,7 @@ def _check_file_exists_and_usefulness( ) -> dict[str, Any]: """Check file existence and usefulness.""" return self._file_analyzer.check_file_exists_and_usefulness(cmd, purpose, user_query) - + def _analyze_file_usefulness( self, content: str, @@ -3697,4 +4172,3 @@ def setup_cortex_user() -> bool: def get_do_handler() -> DoHandler: """Get a DoHandler instance.""" return DoHandler() - diff --git a/cortex/do_runner/managers.py b/cortex/do_runner/managers.py index 762f9dabf..412a7d5bc 100644 --- a/cortex/do_runner/managers.py +++ b/cortex/do_runner/managers.py @@ -13,7 +13,7 @@ class ProtectedPathsManager: """Manages the list of protected files and folders requiring user authentication.""" - + SYSTEM_PROTECTED_PATHS: set[str] = { # System configuration "/etc", @@ -52,14 +52,14 @@ class ProtectedPathsManager: "/proc", "/sys", } - + USER_PROTECTED_PATHS: set[str] = set() - + def __init__(self): self.config_file = Path.home() / ".cortex" / "protected_paths.json" self._ensure_config_dir() self._load_user_paths() - + def _ensure_config_dir(self): """Ensure the config directory exists.""" try: @@ -67,7 +67,7 @@ def _ensure_config_dir(self): except OSError: self.config_file = Path("/tmp") / ".cortex" / "protected_paths.json" self.config_file.parent.mkdir(parents=True, exist_ok=True) - + def _load_user_paths(self): """Load user-configured protected paths.""" if self.config_file.exists(): @@ -77,7 +77,7 @@ def _load_user_paths(self): self.USER_PROTECTED_PATHS = set(data.get("paths", [])) except (json.JSONDecodeError, OSError): pass - + def _save_user_paths(self): """Save user-configured protected paths.""" try: @@ -86,13 +86,13 @@ def _save_user_paths(self): json.dump({"paths": list(self.USER_PROTECTED_PATHS)}, f, indent=2) except OSError as e: console.print(f"[yellow]Warning: Could not save protected paths: {e}[/yellow]") - + def add_protected_path(self, path: str) -> bool: """Add a path to user-protected paths.""" self.USER_PROTECTED_PATHS.add(path) self._save_user_paths() return True - + def remove_protected_path(self, path: str) -> bool: """Remove a path from user-protected paths.""" if path in self.USER_PROTECTED_PATHS: @@ -100,21 +100,21 @@ def remove_protected_path(self, path: str) -> bool: self._save_user_paths() return True return False - + def is_protected(self, path: str) -> bool: """Check if a path requires authentication for access.""" path = os.path.abspath(path) all_protected = self.SYSTEM_PROTECTED_PATHS | self.USER_PROTECTED_PATHS - + if path in all_protected: return True - + for protected in all_protected: if path.startswith(protected + "/") or path == protected: return True - + return False - + def get_all_protected(self) -> list[str]: """Get all protected paths.""" return sorted(self.SYSTEM_PROTECTED_PATHS | self.USER_PROTECTED_PATHS) @@ -122,10 +122,10 @@ def get_all_protected(self) -> list[str]: class CortexUserManager: """Manages the cortex system user for privilege-limited execution.""" - + CORTEX_USER = "cortex" CORTEX_GROUP = "cortex" - + @classmethod def user_exists(cls) -> bool: """Check if the cortex user exists.""" @@ -134,34 +134,38 @@ def user_exists(cls) -> bool: return True except KeyError: return False - + @classmethod def create_user(cls) -> tuple[bool, str]: """Create the cortex user with basic privileges.""" if cls.user_exists(): return True, "Cortex user already exists" - + try: subprocess.run( ["sudo", "groupadd", "-f", cls.CORTEX_GROUP], check=True, capture_output=True, ) - + subprocess.run( [ - "sudo", "useradd", + "sudo", + "useradd", "-r", - "-g", cls.CORTEX_GROUP, - "-d", "/var/lib/cortex", - "-s", "/bin/bash", + "-g", + cls.CORTEX_GROUP, + "-d", + "/var/lib/cortex", + "-s", + "/bin/bash", "-m", cls.CORTEX_USER, ], check=True, capture_output=True, ) - + subprocess.run( ["sudo", "mkdir", "-p", "/var/lib/cortex/workspace"], check=True, @@ -172,18 +176,21 @@ def create_user(cls) -> tuple[bool, str]: check=True, capture_output=True, ) - + return True, "Cortex user created successfully" - + except subprocess.CalledProcessError as e: - return False, f"Failed to create cortex user: {e.stderr.decode() if e.stderr else str(e)}" - + return ( + False, + f"Failed to create cortex user: {e.stderr.decode() if e.stderr else str(e)}", + ) + @classmethod def grant_privilege(cls, file_path: str, mode: str = "rw") -> tuple[bool, str]: """Grant cortex user privilege to access a specific file.""" if not cls.user_exists(): return False, "Cortex user does not exist. Run setup first." - + try: acl_mode = "" if "r" in mode: @@ -192,24 +199,24 @@ def grant_privilege(cls, file_path: str, mode: str = "rw") -> tuple[bool, str]: acl_mode += "w" if "x" in mode: acl_mode += "x" - + if not acl_mode: acl_mode = "r" - + subprocess.run( ["sudo", "setfacl", "-m", f"u:{cls.CORTEX_USER}:{acl_mode}", file_path], check=True, capture_output=True, ) - + return True, f"Granted {acl_mode} access to {file_path}" - + except subprocess.CalledProcessError as e: error_msg = e.stderr.decode() if e.stderr else str(e) if "setfacl" in error_msg or "not found" in error_msg.lower(): return cls._grant_privilege_chmod(file_path, mode) return False, f"Failed to grant privilege: {error_msg}" - + @classmethod def _grant_privilege_chmod(cls, file_path: str, mode: str) -> tuple[bool, str]: """Fallback privilege granting using chmod.""" @@ -221,17 +228,17 @@ def _grant_privilege_chmod(cls, file_path: str, mode: str) -> tuple[bool, str]: chmod_mode = "o+rw" if chmod_mode else "o+w" if "x" in mode: chmod_mode = chmod_mode + "x" if chmod_mode else "o+x" - + subprocess.run( ["sudo", "chmod", chmod_mode, file_path], check=True, capture_output=True, ) return True, f"Granted {mode} access to {file_path} (chmod fallback)" - + except subprocess.CalledProcessError as e: return False, f"Failed to grant privilege: {e.stderr.decode() if e.stderr else str(e)}" - + @classmethod def revoke_privilege(cls, file_path: str) -> tuple[bool, str]: """Revoke cortex user's privilege from a specific file.""" @@ -242,13 +249,13 @@ def revoke_privilege(cls, file_path: str) -> tuple[bool, str]: capture_output=True, ) return True, f"Revoked access to {file_path}" - + except subprocess.CalledProcessError as e: error_msg = e.stderr.decode() if e.stderr else str(e) if "setfacl" in error_msg or "not found" in error_msg.lower(): return cls._revoke_privilege_chmod(file_path) return False, f"Failed to revoke privilege: {error_msg}" - + @classmethod def _revoke_privilege_chmod(cls, file_path: str) -> tuple[bool, str]: """Fallback privilege revocation using chmod.""" @@ -261,13 +268,13 @@ def _revoke_privilege_chmod(cls, file_path: str) -> tuple[bool, str]: return True, f"Revoked access to {file_path} (chmod fallback)" except subprocess.CalledProcessError as e: return False, f"Failed to revoke privilege: {e.stderr.decode() if e.stderr else str(e)}" - + @classmethod def run_as_cortex(cls, command: str, timeout: int = 60) -> tuple[bool, str, str]: """Execute a command as the cortex user.""" if not cls.user_exists(): return False, "", "Cortex user does not exist" - + try: result = subprocess.run( ["sudo", "-u", cls.CORTEX_USER, "bash", "-c", command], @@ -284,4 +291,3 @@ def run_as_cortex(cls, command: str, timeout: int = 60) -> tuple[bool, str, str] return False, "", f"Command timed out after {timeout} seconds" except Exception as e: return False, "", str(e) - diff --git a/cortex/do_runner/models.py b/cortex/do_runner/models.py index 6f1081b75..8bb5fe390 100644 --- a/cortex/do_runner/models.py +++ b/cortex/do_runner/models.py @@ -11,6 +11,7 @@ class CommandStatus(str, Enum): """Status of a command execution.""" + PENDING = "pending" RUNNING = "running" SUCCESS = "success" @@ -22,12 +23,14 @@ class CommandStatus(str, Enum): class RunMode(str, Enum): """Mode of execution for a do run.""" + CORTEX_EXEC = "cortex_exec" USER_MANUAL = "user_manual" class TaskType(str, Enum): """Type of task in the task tree.""" + COMMAND = "command" DIAGNOSTIC = "diagnostic" REPAIR = "repair" @@ -38,29 +41,30 @@ class TaskType(str, Enum): @dataclass class TaskNode: """A node in the task tree representing a command or action.""" + id: str task_type: TaskType command: str purpose: str status: CommandStatus = CommandStatus.PENDING - + # Execution results output: str = "" error: str = "" duration_seconds: float = 0.0 - + # Tree structure parent_id: str | None = None children: list["TaskNode"] = field(default_factory=list) - + # Repair context failure_reason: str = "" repair_attempts: int = 0 max_repair_attempts: int = 3 - + # Reasoning reasoning: str = "" - + def to_dict(self) -> dict[str, Any]: return { "id": self.id, @@ -77,12 +81,12 @@ def to_dict(self) -> dict[str, Any]: "repair_attempts": self.repair_attempts, "reasoning": self.reasoning, } - + def add_child(self, child: "TaskNode"): """Add a child task.""" child.parent_id = self.id self.children.append(child) - + def get_depth(self) -> int: """Get the depth of this node in the tree.""" depth = 0 @@ -95,17 +99,17 @@ def get_depth(self) -> int: class TaskTree: """A tree structure for managing commands with auto-repair capabilities.""" - + def __init__(self): self.root_tasks: list[TaskNode] = [] self._task_counter = 0 self._all_tasks: dict[str, TaskNode] = {} - + def _generate_task_id(self, prefix: str = "task") -> str: """Generate a unique task ID.""" self._task_counter += 1 return f"{prefix}_{self._task_counter}" - + def add_root_task( self, command: str, @@ -122,7 +126,7 @@ def add_root_task( self.root_tasks.append(task) self._all_tasks[task.id] = task return task - + def add_repair_task( self, parent: TaskNode, @@ -141,7 +145,7 @@ def add_repair_task( parent.add_child(task) self._all_tasks[task.id] = task return task - + def add_diagnostic_task( self, parent: TaskNode, @@ -158,7 +162,7 @@ def add_diagnostic_task( parent.add_child(task) self._all_tasks[task.id] = task return task - + def add_verify_task( self, parent: TaskNode, @@ -175,7 +179,7 @@ def add_verify_task( parent.add_child(task) self._all_tasks[task.id] = task return task - + def add_alternative_task( self, parent: TaskNode, @@ -194,49 +198,49 @@ def add_alternative_task( parent.add_child(task) self._all_tasks[task.id] = task return task - + def get_task(self, task_id: str) -> TaskNode | None: """Get a task by ID.""" return self._all_tasks.get(task_id) - + def get_pending_tasks(self) -> list[TaskNode]: """Get all pending tasks in order.""" pending = [] for root in self.root_tasks: self._collect_pending(root, pending) return pending - + def _collect_pending(self, node: TaskNode, pending: list[TaskNode]): """Recursively collect pending tasks.""" if node.status == CommandStatus.PENDING: pending.append(node) for child in node.children: self._collect_pending(child, pending) - + def get_failed_tasks(self) -> list[TaskNode]: """Get all failed tasks.""" return [t for t in self._all_tasks.values() if t.status == CommandStatus.FAILED] - + def get_summary(self) -> dict[str, int]: """Get a summary of task statuses.""" summary = {status.value: 0 for status in CommandStatus} for task in self._all_tasks.values(): summary[task.status.value] += 1 return summary - + def to_dict(self) -> dict[str, Any]: """Convert tree to dictionary.""" return { "root_tasks": [t.to_dict() for t in self.root_tasks], "summary": self.get_summary(), } - + def print_tree(self, indent: str = ""): """Print the task tree structure.""" for i, root in enumerate(self.root_tasks): is_last = i == len(self.root_tasks) - 1 self._print_node(root, indent, is_last) - + def _print_node(self, node: TaskNode, indent: str, is_last: bool): """Print a single node with its children.""" status_icons = { @@ -247,7 +251,7 @@ def _print_node(self, node: TaskNode, indent: str, is_last: bool): CommandStatus.SKIPPED: "[yellow]○[/yellow]", CommandStatus.NEEDS_REPAIR: "[yellow]⚡[/yellow]", } - + type_colors = { TaskType.COMMAND: "white", TaskType.DIAGNOSTIC: "cyan", @@ -255,16 +259,20 @@ def _print_node(self, node: TaskNode, indent: str, is_last: bool): TaskType.VERIFY: "blue", TaskType.ALTERNATIVE: "magenta", } - + icon = status_icons.get(node.status, "?") color = type_colors.get(node.task_type, "white") prefix = "└── " if is_last else "├── " - - console.print(f"{indent}{prefix}{icon} [{color}][{node.task_type.value}][/{color}] {node.command[:50]}...") - + + console.print( + f"{indent}{prefix}{icon} [{color}][{node.task_type.value}][/{color}] {node.command[:50]}..." + ) + if node.reasoning: - console.print(f"{indent}{' ' if is_last else '│ '}[dim]Reason: {node.reasoning}[/dim]") - + console.print( + f"{indent}{' ' if is_last else '│ '}[dim]Reason: {node.reasoning}[/dim]" + ) + child_indent = indent + (" " if is_last else "│ ") for j, child in enumerate(node.children): self._print_node(child, child_indent, j == len(node.children) - 1) @@ -273,6 +281,7 @@ def _print_node(self, node: TaskNode, indent: str, is_last: bool): @dataclass class CommandLog: """Log entry for a single command execution.""" + command: str purpose: str timestamp: str @@ -281,7 +290,7 @@ class CommandLog: error: str = "" duration_seconds: float = 0.0 useful: bool = True - + def to_dict(self) -> dict[str, Any]: return { "command": self.command, @@ -293,7 +302,7 @@ def to_dict(self) -> dict[str, Any]: "duration_seconds": self.duration_seconds, "useful": self.useful, } - + @classmethod def from_dict(cls, data: dict[str, Any]) -> "CommandLog": return cls( @@ -311,6 +320,7 @@ def from_dict(cls, data: dict[str, Any]) -> "CommandLog": @dataclass class DoRun: """Represents a complete do run session.""" + run_id: str summary: str mode: RunMode @@ -321,7 +331,7 @@ class DoRun: files_accessed: list[str] = field(default_factory=list) privileges_granted: list[str] = field(default_factory=list) session_id: str = "" - + def to_dict(self) -> dict[str, Any]: return { "run_id": self.run_id, @@ -335,7 +345,7 @@ def to_dict(self) -> dict[str, Any]: "privileges_granted": self.privileges_granted, "session_id": self.session_id, } - + def get_commands_log_string(self) -> str: """Get all commands as a formatted string for storage.""" lines = [] @@ -349,4 +359,3 @@ def get_commands_log_string(self) -> str: lines.append(f" Duration: {cmd.duration_seconds:.2f}s | Useful: {cmd.useful}") lines.append("") return "\n".join(lines) - diff --git a/cortex/do_runner/terminal.py b/cortex/do_runner/terminal.py index 47593f6eb..a818c8fa4 100644 --- a/cortex/do_runner/terminal.py +++ b/cortex/do_runner/terminal.py @@ -7,8 +7,9 @@ import subprocess import threading import time +from collections.abc import Callable from pathlib import Path -from typing import Any, Callable +from typing import Any from rich.console import Console @@ -17,37 +18,38 @@ class ClaudeLLM: """Claude LLM client using the LLMRouter for intelligent error analysis.""" - + def __init__(self): self._router = None self._available: bool | None = None - + def _get_router(self): """Lazy initialize the router.""" if self._router is None: try: from cortex.llm_router import LLMRouter, TaskType + self._router = LLMRouter() self._task_type = TaskType except Exception: self._router = False # Mark as failed return self._router if self._router else None - + def is_available(self) -> bool: """Check if Claude API is available.""" if self._available is not None: return self._available - + router = self._get_router() self._available = router is not None and router.claude_client is not None return self._available - + def analyze_error(self, command: str, error_output: str, max_tokens: int = 300) -> dict | None: """Analyze an error using Claude and return diagnosis with solution.""" router = self._get_router() if not router: return None - + try: messages = [ { @@ -68,29 +70,22 @@ def analyze_error(self, command: str, error_output: str, max_tokens: int = 300) Be concise. Output format: CAUSE: FIX: -FIX: """ +FIX: """, }, - { - "role": "user", - "content": f"Command: {command}\n\nError:\n{error_output[:500]}" - } + {"role": "user", "content": f"Command: {command}\n\nError:\n{error_output[:500]}"}, ] - + response = router.complete( messages=messages, task_type=self._task_type.ERROR_DEBUGGING, max_tokens=max_tokens, temperature=0.3, ) - + # Parse response content = response.content - result = { - "cause": "", - "fixes": [], - "raw": content - } - + result = {"cause": "", "fixes": [], "raw": content} + for line in content.split("\n"): line = line.strip() if line.upper().startswith("CAUSE:"): @@ -99,9 +94,9 @@ def analyze_error(self, command: str, error_output: str, max_tokens: int = 300) fix = line[4:].strip() if fix and not fix.startswith("#"): result["fixes"].append(fix) - + return result - + except Exception as e: console.print(f"[dim]Claude analysis error: {e}[/dim]") return None @@ -109,84 +104,87 @@ def analyze_error(self, command: str, error_output: str, max_tokens: int = 300) class LocalLLM: """Local LLM client using Ollama with Mistral (fallback).""" - + def __init__(self, model: str = "mistral"): self.model = model self._available: bool | None = None - + def is_available(self) -> bool: """Check if Ollama with the model is available.""" if self._available is not None: return self._available - + try: - result = subprocess.run( - ["ollama", "list"], - capture_output=True, text=True, timeout=5 - ) + result = subprocess.run(["ollama", "list"], capture_output=True, text=True, timeout=5) self._available = result.returncode == 0 and self.model in result.stdout if not self._available: # Try to check if ollama is running at least result = subprocess.run( ["curl", "-s", "http://localhost:11434/api/tags"], - capture_output=True, text=True, timeout=5 + capture_output=True, + text=True, + timeout=5, ) if result.returncode == 0: self._available = self.model in result.stdout except (subprocess.TimeoutExpired, FileNotFoundError, Exception): self._available = False - + return self._available - + def analyze(self, prompt: str, max_tokens: int = 200, timeout: int = 10) -> str | None: """Call the local LLM for analysis.""" if not self.is_available(): return None - + try: - import urllib.request import urllib.error - + import urllib.request + # Use Ollama API directly via urllib (faster than curl subprocess) - data = json.dumps({ - "model": self.model, - "prompt": prompt, - "stream": False, - "options": { - "num_predict": max_tokens, - "temperature": 0.3, + data = json.dumps( + { + "model": self.model, + "prompt": prompt, + "stream": False, + "options": { + "num_predict": max_tokens, + "temperature": 0.3, + }, } - }).encode('utf-8') - + ).encode("utf-8") + req = urllib.request.Request( "http://localhost:11434/api/generate", data=data, - headers={"Content-Type": "application/json"} + headers={"Content-Type": "application/json"}, ) - + with urllib.request.urlopen(req, timeout=timeout) as response: - result = json.loads(response.read().decode('utf-8')) + result = json.loads(response.read().decode("utf-8")) return result.get("response", "").strip() - + except (urllib.error.URLError, json.JSONDecodeError, TimeoutError, Exception): pass - + return None class TerminalMonitor: """ Monitors terminal commands for the manual execution flow. - + Monitors ALL terminal sources by default: - Bash history file (~/.bash_history) - - Zsh history file (~/.zsh_history) + - Zsh history file (~/.zsh_history) - Fish history file (~/.local/share/fish/fish_history) - ALL Cursor terminal files (all projects) - External terminal output files """ - - def __init__(self, notification_callback: Callable[[str, str], None] | None = None, use_llm: bool = True): + + def __init__( + self, notification_callback: Callable[[str, str], None] | None = None, use_llm: bool = True + ): self.notification_callback = notification_callback self._monitoring = False self._monitor_thread: threading.Thread | None = None @@ -197,7 +195,7 @@ def __init__(self, notification_callback: Callable[[str, str], None] | None = No self._shell_history_files: list[Path] = [] self._output_buffer: list[dict[str, Any]] = [] # Buffer for terminal output self._show_live_output = True # Whether to print live output - + # Claude LLM for intelligent error analysis (primary) self._use_llm = use_llm self._claude: ClaudeLLM | None = None @@ -205,64 +203,66 @@ def __init__(self, notification_callback: Callable[[str, str], None] | None = No if use_llm: self._claude = ClaudeLLM() self._llm = LocalLLM(model="mistral") # Keep as fallback - + # Context for LLM self._session_context: list[str] = [] # Recent commands for context - + # Use existing auto-fix architecture - from cortex.do_runner.diagnosis import ErrorDiagnoser, AutoFixer + from cortex.do_runner.diagnosis import AutoFixer, ErrorDiagnoser + self._diagnoser = ErrorDiagnoser() self._auto_fixer = AutoFixer(llm_callback=self._llm_for_autofix if use_llm else None) - + # Notification manager for desktop notifications self.notifier = self._create_notifier() - + # Discover all terminal sources self._discover_terminal_sources() - + def _create_notifier(self): """Create notification manager for desktop notifications.""" try: from cortex.notification_manager import NotificationManager + return NotificationManager() except ImportError: return None - + def _llm_for_autofix(self, prompt: str) -> dict: """LLM callback for the AutoFixer.""" if not self._llm or not self._llm.is_available(): return {} - + result = self._llm.analyze(prompt, max_tokens=200, timeout=15) if result: return {"response": result, "fix_commands": []} return {} - + def _discover_terminal_sources(self, verbose: bool = False): """Discover all available terminal sources to monitor.""" home = Path.home() - + # Reset lists self._shell_history_files = [] self._cursor_terminals_dirs = [] - + # Shell history files shell_histories = [ - home / ".bash_history", # Bash - home / ".zsh_history", # Zsh - home / ".history", # Generic - home / ".sh_history", # Sh + home / ".bash_history", # Bash + home / ".zsh_history", # Zsh + home / ".history", # Generic + home / ".sh_history", # Sh home / ".local" / "share" / "fish" / "fish_history", # Fish - home / ".ksh_history", # Korn shell - home / ".tcsh_history", # Tcsh + home / ".ksh_history", # Korn shell + home / ".tcsh_history", # Tcsh ] - + for hist_file in shell_histories: if hist_file.exists(): self._shell_history_files.append(hist_file) if verbose: console.print(f"[dim]📝 Monitoring: {hist_file}[/dim]") - + # Find ALL Cursor terminal directories (all projects) cursor_base = home / ".cursor" / "projects" if cursor_base.exists(): @@ -272,67 +272,85 @@ def _discover_terminal_sources(self, verbose: bool = False): if terminals_path.exists(): self._cursor_terminals_dirs.append(terminals_path) if verbose: - console.print(f"[dim]🖥️ Monitoring Cursor terminals: {terminals_path.parent.name}[/dim]") - + console.print( + f"[dim]🖥️ Monitoring Cursor terminals: {terminals_path.parent.name}[/dim]" + ) + # Also check for tmux/screen panes self._tmux_available = self._check_command_exists("tmux") self._screen_available = self._check_command_exists("screen") - + if verbose: if self._tmux_available: console.print("[dim]📺 Tmux detected - will monitor tmux panes[/dim]") if self._screen_available: console.print("[dim]📺 Screen detected - will monitor screen sessions[/dim]") - + def _check_command_exists(self, cmd: str) -> bool: """Check if a command exists in PATH.""" import shutil + return shutil.which(cmd) is not None - - def start(self, verbose: bool = True, show_live: bool = True, expected_commands: list[str] | None = None): + + def start( + self, + verbose: bool = True, + show_live: bool = True, + expected_commands: list[str] | None = None, + ): """Start monitoring terminal for commands.""" - self.start_monitoring(expected_commands=expected_commands, verbose=verbose, show_live=show_live) - + self.start_monitoring( + expected_commands=expected_commands, verbose=verbose, show_live=show_live + ) + def _is_service_running(self) -> bool: """Check if the Cortex Watch systemd service is running.""" try: result = subprocess.run( ["systemctl", "--user", "is-active", "cortex-watch.service"], - capture_output=True, text=True, timeout=3 + capture_output=True, + text=True, + timeout=3, ) return result.stdout.strip() == "active" except Exception: return False - - def start_monitoring(self, expected_commands: list[str] | None = None, verbose: bool = True, show_live: bool = True, clear_old_logs: bool = True): + + def start_monitoring( + self, + expected_commands: list[str] | None = None, + verbose: bool = True, + show_live: bool = True, + clear_old_logs: bool = True, + ): """Start monitoring ALL terminal sources for commands.""" self._monitoring = True self._expected_commands = expected_commands or [] self._show_live_output = show_live self._output_buffer = [] self._session_context = [] - + # Mark this terminal as the Cortex terminal so watch hook won't log its commands os.environ["CORTEX_TERMINAL"] = "1" - + # Record the monitoring start time to filter out old commands self._monitoring_start_time = datetime.datetime.now() - + # Always clear old watch log to start fresh - this prevents reading old session commands watch_file = self.get_watch_file_path() if watch_file.exists(): # Truncate the file to clear old commands from previous sessions watch_file.write_text("") - + # Also record starting positions for bash/zsh history files self._history_start_positions: dict[str, int] = {} for hist_file in [Path.home() / ".bash_history", Path.home() / ".zsh_history"]: if hist_file.exists(): self._history_start_positions[str(hist_file)] = hist_file.stat().st_size - + # Re-discover sources in case new terminals opened self._discover_terminal_sources(verbose=verbose) - + # Check LLM availability llm_status = "" if self._llm and self._use_llm: @@ -340,46 +358,53 @@ def start_monitoring(self, expected_commands: list[str] | None = None, verbose: llm_status = "\n[green]🤖 AI Analysis: Mistral (local) - Active[/green]" else: llm_status = "\n[yellow]🤖 AI Analysis: Mistral not available (install with: ollama pull mistral)[/yellow]" - + if verbose: from rich.panel import Panel - + watch_file = self.get_watch_file_path() source_file = Path.home() / ".cortex" / "watch_hook.sh" - + # Check if systemd service is running (best option) service_running = self._is_service_running() - + # Check if auto-watch is already set up bashrc = Path.home() / ".bashrc" hook_installed = False if bashrc.exists() and "Cortex Terminal Watch Hook" in bashrc.read_text(): hook_installed = True - + # If service is running, we don't need the hook if service_running: - setup_info = "[bold green]✓ Cortex Watch Service is running[/bold green]\n" \ - "[dim]All terminal activity is being monitored automatically![/dim]" + setup_info = ( + "[bold green]✓ Cortex Watch Service is running[/bold green]\n" + "[dim]All terminal activity is being monitored automatically![/dim]" + ) else: # Not using the service, need to set up hooks if not hook_installed: # Auto-install the hook to .bashrc self.setup_auto_watch(permanent=True) hook_installed = True # Now installed - + # Ensure source file exists self.setup_auto_watch(permanent=False) - + # Create a super short activation command short_cmd = f"source {source_file}" - + # Try to copy to clipboard clipboard_copied = False try: # Try xclip first, then xsel - for clip_cmd in [["xclip", "-selection", "clipboard"], ["xsel", "--clipboard", "--input"]]: + for clip_cmd in [ + ["xclip", "-selection", "clipboard"], + ["xsel", "--clipboard", "--input"], + ]: try: - proc = subprocess.run(clip_cmd, input=short_cmd.encode(), capture_output=True, timeout=2) + proc = subprocess.run( + clip_cmd, input=short_cmd.encode(), capture_output=True, timeout=2 + ) if proc.returncode == 0: clipboard_copied = True break @@ -387,81 +412,94 @@ def start_monitoring(self, expected_commands: list[str] | None = None, verbose: continue except Exception: pass - + if hook_installed: - clipboard_msg = "[green]📋 Copied to clipboard![/green] " if clipboard_copied else "" - setup_info = "[green]✓ Terminal watch hook is installed in .bashrc[/green]\n" \ - "[dim](New terminals will auto-activate)[/dim]\n\n" \ - f"[bold yellow]For EXISTING terminals, paste this:[/bold yellow]\n" \ - f"[bold cyan]{short_cmd}[/bold cyan]\n" \ - f"{clipboard_msg}\n" \ - "[dim]Or type [/dim][green]cortex watch --install --service[/green][dim] for automatic monitoring![/dim]" - + clipboard_msg = ( + "[green]📋 Copied to clipboard![/green] " if clipboard_copied else "" + ) + setup_info = ( + "[green]✓ Terminal watch hook is installed in .bashrc[/green]\n" + "[dim](New terminals will auto-activate)[/dim]\n\n" + f"[bold yellow]For EXISTING terminals, paste this:[/bold yellow]\n" + f"[bold cyan]{short_cmd}[/bold cyan]\n" + f"{clipboard_msg}\n" + "[dim]Or type [/dim][green]cortex watch --install --service[/green][dim] for automatic monitoring![/dim]" + ) + # Send desktop notification with the command try: msg = f"Paste in your OTHER terminal:\n\n{short_cmd}" if clipboard_copied: msg += "\n\n(Already copied to clipboard!)" - subprocess.run([ - "notify-send", - "--urgency=critical", - "--icon=dialog-warning", - "--expire-time=15000", - "⚠️ Cortex: Activate Terminal Watching", - msg - ], capture_output=True, timeout=2) + subprocess.run( + [ + "notify-send", + "--urgency=critical", + "--icon=dialog-warning", + "--expire-time=15000", + "⚠️ Cortex: Activate Terminal Watching", + msg, + ], + capture_output=True, + timeout=2, + ) except Exception: pass else: - setup_info = f"[bold yellow]⚠ For real-time monitoring in OTHER terminals:[/bold yellow]\n\n" \ - f"[bold cyan]{short_cmd}[/bold cyan]\n\n" \ - "[dim]Or install the watch service: [/dim][green]cortex watch --install --service[/green]" - + setup_info = ( + f"[bold yellow]⚠ For real-time monitoring in OTHER terminals:[/bold yellow]\n\n" + f"[bold cyan]{short_cmd}[/bold cyan]\n\n" + "[dim]Or install the watch service: [/dim][green]cortex watch --install --service[/green]" + ) + console.print() - console.print(Panel( - "[bold cyan]🔍 Terminal Monitoring Active[/bold cyan]\n\n" - f"Watching {len(self._shell_history_files)} shell history files\n" - f"Watching {len(self._cursor_terminals_dirs)} Cursor terminal directories\n" - + ("Watching Tmux panes\n" if self._tmux_available else "") - + llm_status + - "\n\n" + setup_info, - title="[bold green]Live Terminal Monitor[/bold green]", - border_style="green", - )) + console.print( + Panel( + "[bold cyan]🔍 Terminal Monitoring Active[/bold cyan]\n\n" + f"Watching {len(self._shell_history_files)} shell history files\n" + f"Watching {len(self._cursor_terminals_dirs)} Cursor terminal directories\n" + + ("Watching Tmux panes\n" if self._tmux_available else "") + + llm_status + + "\n\n" + + setup_info, + title="[bold green]Live Terminal Monitor[/bold green]", + border_style="green", + ) + ) console.print() console.print("[dim]─" * 60 + "[/dim]") console.print("[bold]📡 Live Terminal Feed:[/bold]") console.print("[dim]─" * 60 + "[/dim]") console.print("[dim]Waiting for commands from other terminals...[/dim]") console.print() - + self._monitor_thread = threading.Thread(target=self._monitor_loop, daemon=True) self._monitor_thread.start() - + def stop_monitoring(self) -> list[dict[str, Any]]: """Stop monitoring and return observed commands.""" self._monitoring = False if self._monitor_thread: self._monitor_thread.join(timeout=2) self._monitor_thread = None - + with self._lock: result = list(self._commands_observed) return result - + def stop(self) -> list[dict[str, Any]]: """Stop monitoring terminal.""" return self.stop_monitoring() - + def get_observed_commands(self) -> list[dict[str, Any]]: """Get all observed commands so far.""" with self._lock: return list(self._commands_observed) - + def test_monitoring(self): """Test that monitoring is working by showing what files are being watched.""" console.print("\n[bold cyan]🔍 Terminal Monitoring Test[/bold cyan]\n") - + # Check shell history files console.print("[bold]Shell History Files:[/bold]") for hist_file in self._shell_history_files: @@ -469,7 +507,7 @@ def test_monitoring(self): size = hist_file.stat().st_size if exists else 0 status = "[green]✓[/green]" if exists else "[red]✗[/red]" console.print(f" {status} {hist_file} ({size} bytes)") - + # Check Cursor terminal directories console.print("\n[bold]Cursor Terminal Directories:[/bold]") for terminals_dir in self._cursor_terminals_dirs: @@ -483,74 +521,82 @@ def test_monitoring(self): console.print(f" ... and {len(files) - 5} more") else: console.print(f" [red]✗[/red] {terminals_dir} (not found)") - + # Check tmux console.print("\n[bold]Other Sources:[/bold]") - console.print(f" Tmux: {'[green]✓ available[/green]' if self._tmux_available else '[dim]not available[/dim]'}") - console.print(f" Screen: {'[green]✓ available[/green]' if self._screen_available else '[dim]not available[/dim]'}") - - console.print("\n[yellow]Tip: For bash history to update in real-time, run in your terminal:[/yellow]") + console.print( + f" Tmux: {'[green]✓ available[/green]' if self._tmux_available else '[dim]not available[/dim]'}" + ) + console.print( + f" Screen: {'[green]✓ available[/green]' if self._screen_available else '[dim]not available[/dim]'}" + ) + + console.print( + "\n[yellow]Tip: For bash history to update in real-time, run in your terminal:[/yellow]" + ) console.print("[green]export PROMPT_COMMAND='history -a'[/green]") console.print() - + def inject_test_command(self, command: str, source: str = "test"): """Inject a test command to verify the display is working.""" self._process_observed_command(command, source) - + def get_watch_file_path(self) -> Path: """Get the path to the cortex watch file.""" return Path.home() / ".cortex" / "terminal_watch.log" - + def setup_terminal_hook(self) -> str: """Generate a bash command to set up real-time terminal watching. - + Returns the command the user should run in their terminal. """ watch_file = self.get_watch_file_path() watch_file.parent.mkdir(parents=True, exist_ok=True) - + # Create a bash function that logs commands - hook_command = f''' + hook_command = f""" # Cortex Terminal Hook - paste this in your terminal: export CORTEX_WATCH_FILE="{watch_file}" export PROMPT_COMMAND='history -a; echo "$(date +%H:%M:%S) $(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" >> "$CORTEX_WATCH_FILE"' echo "✓ Cortex is now watching this terminal" -''' +""" return hook_command.strip() - + def print_setup_instructions(self): """Print instructions for setting up real-time terminal watching.""" from rich.panel import Panel - + watch_file = self.get_watch_file_path() - + console.print() - console.print(Panel( - "[bold yellow]⚠ For real-time terminal monitoring, run this in your OTHER terminal:[/bold yellow]\n\n" - f"[green]export PROMPT_COMMAND='history -a; echo \"$(date +%H:%M:%S) $(history 1 | sed \"s/^[ ]*[0-9]*[ ]*//\")\" >> {watch_file}'[/green]\n\n" - "[dim]This makes bash write commands immediately so Cortex can see them.[/dim]", - title="[cyan]Setup Required[/cyan]", - border_style="yellow", - )) + console.print( + Panel( + "[bold yellow]⚠ For real-time terminal monitoring, run this in your OTHER terminal:[/bold yellow]\n\n" + f'[green]export PROMPT_COMMAND=\'history -a; echo "$(date +%H:%M:%S) $(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" >> {watch_file}\'[/green]\n\n' + "[dim]This makes bash write commands immediately so Cortex can see them.[/dim]", + title="[cyan]Setup Required[/cyan]", + border_style="yellow", + ) + ) console.print() - + def setup_system_wide_watch(self) -> tuple[bool, str]: """ Install the terminal watch hook system-wide in /etc/profile.d/. - + This makes the hook active for ALL users and ALL new terminals automatically. Requires sudo. - + Returns: Tuple of (success, message) """ import subprocess - + watch_file = self.get_watch_file_path() profile_script = "/etc/profile.d/cortex-watch.sh" - + # The system-wide hook script - hook_content = f'''#!/bin/bash + hook_content = """#!/bin/bash # Cortex Terminal Watch Hook - System Wide # Installed by: cortex do watch --system # This enables real-time terminal command monitoring for Cortex AI @@ -568,110 +614,107 @@ def setup_system_wide_watch(self) -> tuple[bool, str]: mkdir -p "$HOME/.cortex" 2>/dev/null __cortex_last_histnum="" -__cortex_log_cmd() {{ - local histnum="$(history 1 2>/dev/null | awk '{{print $1}}')" +__cortex_log_cmd() { + local histnum="$(history 1 2>/dev/null | awk '{print $1}')" [[ "$histnum" == "$__cortex_last_histnum" ]] && return __cortex_last_histnum="$histnum" - + local cmd="$(history 1 2>/dev/null | sed "s/^[ ]*[0-9]*[ ]*//")" - [[ -z "${{cmd// /}}" ]] && return + [[ -z "${cmd// /}" ]] && return [[ "$cmd" == cortex* ]] && return [[ "$cmd" == *"watch_hook"* ]] && return - + echo "$cmd" >> "$CORTEX_WATCH_FILE" 2>/dev/null -}} +} # Add to PROMPT_COMMAND (preserve existing) if [[ -z "$PROMPT_COMMAND" ]]; then export PROMPT_COMMAND='history -a; __cortex_log_cmd' else - export PROMPT_COMMAND="${{PROMPT_COMMAND}}; __cortex_log_cmd" + export PROMPT_COMMAND="${PROMPT_COMMAND}; __cortex_log_cmd" fi -''' - +""" + try: # Write to a temp file first import tempfile - with tempfile.NamedTemporaryFile(mode='w', suffix='.sh', delete=False) as f: + + with tempfile.NamedTemporaryFile(mode="w", suffix=".sh", delete=False) as f: f.write(hook_content) temp_file = f.name - + # Use sudo to copy to /etc/profile.d/ result = subprocess.run( ["sudo", "cp", temp_file, profile_script], capture_output=True, text=True, - timeout=30 + timeout=30, ) - + if result.returncode != 0: return False, f"Failed to install: {result.stderr}" - + # Make it executable - subprocess.run( - ["sudo", "chmod", "+x", profile_script], - capture_output=True, - timeout=10 - ) - + subprocess.run(["sudo", "chmod", "+x", profile_script], capture_output=True, timeout=10) + # Clean up temp file Path(temp_file).unlink(missing_ok=True) - - return True, f"✓ Installed system-wide to {profile_script}\n" \ - "All NEW terminals will automatically have Cortex watching enabled.\n" \ - "For current terminals, run: source /etc/profile.d/cortex-watch.sh" - + + return ( + True, + f"✓ Installed system-wide to {profile_script}\n" + "All NEW terminals will automatically have Cortex watching enabled.\n" + "For current terminals, run: source /etc/profile.d/cortex-watch.sh", + ) + except subprocess.TimeoutExpired: return False, "Timeout waiting for sudo" except Exception as e: return False, f"Error: {e}" - + def uninstall_system_wide_watch(self) -> tuple[bool, str]: """Remove the system-wide terminal watch hook.""" import subprocess - + profile_script = "/etc/profile.d/cortex-watch.sh" - + try: if not Path(profile_script).exists(): return True, "System-wide hook not installed" - + result = subprocess.run( - ["sudo", "rm", profile_script], - capture_output=True, - text=True, - timeout=30 + ["sudo", "rm", profile_script], capture_output=True, text=True, timeout=30 ) - + if result.returncode != 0: return False, f"Failed to remove: {result.stderr}" - + return True, f"✓ Removed {profile_script}" - + except Exception as e: return False, f"Error: {e}" - + def is_system_wide_installed(self) -> bool: """Check if system-wide hook is installed.""" return Path("/etc/profile.d/cortex-watch.sh").exists() - + def setup_auto_watch(self, permanent: bool = True) -> tuple[bool, str]: """ Set up automatic terminal watching for new and existing terminals. - + Args: permanent: If True, adds the hook to ~/.bashrc for future terminals - + Returns: Tuple of (success, message) """ watch_file = self.get_watch_file_path() watch_file.parent.mkdir(parents=True, exist_ok=True) - + # The hook command - excludes cortex commands and source commands # Uses a function to filter out Cortex terminal commands # Added: tracks last logged command and history number to avoid duplicates - hook_line = f''' + hook_line = f""" __cortex_last_histnum="" __cortex_log_cmd() {{ # Get current history number @@ -679,7 +722,7 @@ def setup_auto_watch(self, permanent: bool = True) -> tuple[bool, str]: # Skip if same as last logged (prevents duplicate on terminal init) [[ "$histnum" == "$__cortex_last_histnum" ]] && return __cortex_last_histnum="$histnum" - + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" # Skip empty or whitespace-only commands [[ -z "${{cmd// /}}" ]] && return @@ -693,14 +736,14 @@ def setup_auto_watch(self, permanent: bool = True) -> tuple[bool, str]: echo "${{tty_name:-unknown}}|$cmd" >> {watch_file} }} export PROMPT_COMMAND='history -a; __cortex_log_cmd' -''' +""" marker = "# Cortex Terminal Watch Hook" - + bashrc = Path.home() / ".bashrc" zshrc = Path.home() / ".zshrc" - + added_to = [] - + if permanent: # Add to .bashrc if it exists and doesn't already have the hook if bashrc.exists(): @@ -713,14 +756,14 @@ def setup_auto_watch(self, permanent: bool = True) -> tuple[bool, str]: added_to.append(".bashrc") else: added_to.append(".bashrc (already configured)") - + # Add to .zshrc if it exists if zshrc.exists(): content = zshrc.read_text() if marker not in content: # Zsh uses precmd instead of PROMPT_COMMAND # Added tracking to avoid duplicates - zsh_hook = f''' + zsh_hook = f""" {marker} typeset -g __cortex_last_cmd="" cortex_watch_hook() {{ @@ -736,35 +779,35 @@ def setup_auto_watch(self, permanent: bool = True) -> tuple[bool, str]: echo "${{tty_name:-unknown}}|$cmd" >> {watch_file} }} precmd_functions+=(cortex_watch_hook) -''' +""" with open(zshrc, "a") as f: f.write(zsh_hook) added_to.append(".zshrc") else: added_to.append(".zshrc (already configured)") - + # Create a source file for existing terminals source_file = Path.home() / ".cortex" / "watch_hook.sh" - source_file.write_text(f'''#!/bin/bash + source_file.write_text(f"""#!/bin/bash {marker} {hook_line} echo "✓ Cortex is now watching this terminal" -''') +""") source_file.chmod(0o755) source_file.chmod(0o755) - + if added_to: msg = f"Added to: {', '.join(added_to)}\n" msg += f"For existing terminals, run: source {source_file}" return True, msg else: return True, f"Source file created: {source_file}\nRun: source {source_file}" - + def remove_auto_watch(self) -> tuple[bool, str]: """Remove the automatic terminal watching hook from shell configs.""" marker = "# Cortex Terminal Watch Hook" removed_from = [] - + for rc_file in [Path.home() / ".bashrc", Path.home() / ".zshrc"]: if rc_file.exists(): content = rc_file.read_text() @@ -773,44 +816,49 @@ def remove_auto_watch(self) -> tuple[bool, str]: lines = content.split("\n") new_lines = [] skip_until_blank = False - + for line in lines: if marker in line: skip_until_blank = True continue if skip_until_blank: - if line.strip() == "" or line.startswith("export PROMPT") or line.startswith("cortex_watch") or line.startswith("precmd_functions"): + if ( + line.strip() == "" + or line.startswith("export PROMPT") + or line.startswith("cortex_watch") + or line.startswith("precmd_functions") + ): continue if line.startswith("}"): continue skip_until_blank = False new_lines.append(line) - + rc_file.write_text("\n".join(new_lines)) removed_from.append(rc_file.name) - + # Remove source file source_file = Path.home() / ".cortex" / "watch_hook.sh" if source_file.exists(): source_file.unlink() removed_from.append("watch_hook.sh") - + if removed_from: return True, f"Removed from: {', '.join(removed_from)}" return True, "No hooks found to remove" - + def broadcast_hook_to_terminals(self) -> int: """ Attempt to set up the hook in all running bash terminals. Uses various methods to inject the hook. - + Returns the number of terminals that were set up. """ watch_file = self.get_watch_file_path() hook_cmd = f'export PROMPT_COMMAND=\'history -a; echo "$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" >> {watch_file}\'' - + count = 0 - + # Method 1: Write to all pts devices (requires proper permissions) try: pts_dir = Path("/dev/pts") @@ -820,27 +868,27 @@ def broadcast_hook_to_terminals(self) -> int: try: # This usually requires the same user with open(pts, "w") as f: - f.write(f"\n# Cortex: Setting up terminal watch...\n") - f.write(f"source ~/.cortex/watch_hook.sh\n") + f.write("\n# Cortex: Setting up terminal watch...\n") + f.write("source ~/.cortex/watch_hook.sh\n") count += 1 except (PermissionError, OSError): pass except Exception: pass - + return count - + def _monitor_loop(self): """Monitor loop that watches ALL terminal sources for activity.""" file_positions: dict[str, int] = {} last_check_time: dict[str, float] = {} - + # Cortex watch file (real-time if user sets up the hook) watch_file = self.get_watch_file_path() - + # Ensure watch file directory exists watch_file.parent.mkdir(parents=True, exist_ok=True) - + # Initialize positions for all shell history files - start at END to only see NEW commands for hist_file in self._shell_history_files: if hist_file.exists(): @@ -849,7 +897,7 @@ def _monitor_loop(self): last_check_time[str(hist_file)] = time.time() except OSError: pass - + # Initialize watch file position - ALWAYS start from END of existing content # This ensures we only see commands written AFTER monitoring starts if watch_file.exists(): @@ -861,7 +909,7 @@ def _monitor_loop(self): else: # File doesn't exist yet - will be created, start from 0 file_positions[str(watch_file)] = 0 - + # Initialize positions for all Cursor terminal files for terminals_dir in self._cursor_terminals_dirs: if terminals_dir.exists(): @@ -876,16 +924,16 @@ def _monitor_loop(self): file_positions[str(term_file)] = term_file.stat().st_size except OSError: pass - + check_count = 0 while self._monitoring: time.sleep(0.2) # Check very frequently (5 times per second) check_count += 1 - + # Check Cortex watch file FIRST (this is the real-time one) if watch_file.exists(): self._check_watch_file(watch_file, file_positions) - + # Check all shell history files for hist_file in self._shell_history_files: if hist_file.exists(): @@ -893,43 +941,47 @@ def _monitor_loop(self): self._check_file_for_new_commands( hist_file, file_positions, source=f"{shell_name}_history" ) - + # Check ALL Cursor terminal directories (these update in real-time!) for terminals_dir in self._cursor_terminals_dirs: if terminals_dir.exists(): project_name = terminals_dir.parent.name - + # IDE terminals - check ALL txt files for term_file in terminals_dir.glob("*.txt"): if not term_file.name.startswith("ext-"): self._check_file_for_new_commands( - term_file, file_positions, - source=f"cursor:{project_name}:{term_file.stem}" + term_file, + file_positions, + source=f"cursor:{project_name}:{term_file.stem}", ) - + # External terminals (iTerm, gnome-terminal, etc.) for term_file in terminals_dir.glob("ext-*.txt"): self._check_file_for_new_commands( - term_file, file_positions, - source=f"external:{project_name}:{term_file.stem}" + term_file, + file_positions, + source=f"external:{project_name}:{term_file.stem}", ) - + # Check tmux panes if available (every 5 checks = 1 second) if self._tmux_available and check_count % 5 == 0: self._check_tmux_panes() - + # Periodically show we're still monitoring (every 30 seconds) if check_count % 150 == 0 and self._show_live_output: - console.print(f"[dim]... still monitoring ({len(self._commands_observed)} commands observed so far)[/dim]") - + console.print( + f"[dim]... still monitoring ({len(self._commands_observed)} commands observed so far)[/dim]" + ) + def _is_cortex_terminal_command(self, command: str) -> bool: """Check if a command is from the Cortex terminal itself (should be ignored). - + This should be very conservative - only filter out commands that are DEFINITELY from Cortex's own terminal, not user commands. """ cmd_lower = command.lower().strip() - + # Only filter out commands that are clearly from Cortex terminal cortex_patterns = [ "cortex ask", @@ -939,83 +991,86 @@ def _is_cortex_terminal_command(self, command: str) -> bool: "source ~/.cortex/watch_hook", # Setting up the watch hook ".cortex/watch_hook", ] - + for pattern in cortex_patterns: if pattern in cmd_lower: return True - + # Check if command starts with "cortex " (the CLI) if cmd_lower.startswith("cortex "): return True - + # Don't filter out general commands - let them through! return False - + def _check_watch_file(self, watch_file: Path, positions: dict[str, int]): """Check the Cortex watch file for new commands (real-time).""" try: current_size = watch_file.stat().st_size key = str(watch_file) - + # Initialize position if not set # Start from 0 because we clear the file when monitoring starts # This ensures we capture all commands written after monitoring begins if key not in positions: positions[key] = 0 # Start from beginning since file was cleared - + # If file is smaller than our position (was truncated), reset if current_size < positions[key]: positions[key] = 0 - + if current_size > positions[key]: with open(watch_file) as f: f.seek(positions[key]) new_content = f.read() - + # Parse watch file - each line is a command for line in new_content.split("\n"): line = line.strip() if not line: continue - + # Skip very short lines or common noise if len(line) < 2: continue - + # Skip if we've already seen this exact command recently - if hasattr(self, '_recent_watch_commands'): + if hasattr(self, "_recent_watch_commands"): if line in self._recent_watch_commands: continue else: self._recent_watch_commands = [] - + # Keep track of recent commands to avoid duplicates self._recent_watch_commands.append(line) if len(self._recent_watch_commands) > 20: self._recent_watch_commands.pop(0) - + # Handle format with timestamp: "HH:MM:SS command" - if re.match(r'^\d{2}:\d{2}:\d{2}\s+', line): + if re.match(r"^\d{2}:\d{2}:\d{2}\s+", line): parts = line.split(" ", 1) if len(parts) == 2 and parts[1].strip(): self._process_observed_command(parts[1].strip(), "live_terminal") else: # Plain command self._process_observed_command(line, "live_terminal") - + positions[key] = current_size - + except OSError: pass - + def _check_tmux_panes(self): """Check tmux panes for recent commands.""" import subprocess + try: # Get list of tmux sessions result = subprocess.run( ["tmux", "list-panes", "-a", "-F", "#{pane_id}:#{pane_current_command}"], - capture_output=True, text=True, timeout=1 + capture_output=True, + text=True, + timeout=1, ) if result.returncode == 0: for line in result.stdout.strip().split("\n"): @@ -1025,7 +1080,7 @@ def _check_tmux_panes(self): self._process_observed_command(cmd, source=f"tmux:{pane_id}") except (subprocess.TimeoutExpired, FileNotFoundError, subprocess.SubprocessError): pass - + def _check_file_for_new_commands( self, file_path: Path, @@ -1036,16 +1091,16 @@ def _check_file_for_new_commands( try: current_size = file_path.stat().st_size key = str(file_path) - + if key not in positions: positions[key] = current_size return - + if current_size > positions[key]: with open(file_path) as f: f.seek(positions[key]) new_content = f.read() - + # For Cursor terminals, also extract output if "cursor" in source or "external" in source: self._process_terminal_content(new_content, source) @@ -1053,23 +1108,23 @@ def _check_file_for_new_commands( new_commands = self._extract_commands_from_content(new_content, source) for cmd in new_commands: self._process_observed_command(cmd, source) - + positions[key] = current_size - + except OSError: pass - + def _process_terminal_content(self, content: str, source: str): """Process terminal content including commands and their output.""" lines = content.split("\n") current_command = None output_lines = [] - + for line in lines: line_stripped = line.strip() if not line_stripped: continue - + # Check if this is a command line (has prompt) is_command = False for pattern in [ @@ -1085,30 +1140,30 @@ def _process_terminal_content(self, content: str, source: str): self._process_observed_command_with_output( current_command, "\n".join(output_lines), source ) - + current_command = match.group(1).strip() output_lines = [] is_command = True break - + if not is_command and current_command: # This is output from the current command output_lines.append(line_stripped) - + # Process the last command if current_command: self._process_observed_command_with_output( current_command, "\n".join(output_lines), source ) - + def _process_observed_command_with_output(self, command: str, output: str, source: str): """Process a command with its output for better feedback.""" # First process the command normally self._process_observed_command(command, source) - + if not self._show_live_output: return - + # Then show relevant output if there is any if output and len(output) > 5: # Check for errors in output @@ -1149,58 +1204,63 @@ def _process_observed_command_with_output(self, command: str, output: str, sourc (r"unknown directive", "Unknown directive"), (r"unexpected", "Unexpected error"), ] - + for pattern, msg in error_patterns: if re.search(pattern, output, re.IGNORECASE): # Show error in bordered panel from rich.panel import Panel from rich.text import Text - + output_preview = output[:200] + "..." if len(output) > 200 else output - + error_text = Text() error_text.append(f"✗ {msg}\n\n", style="bold red") - for line in output_preview.split('\n')[:3]: + for line in output_preview.split("\n")[:3]: if line.strip(): error_text.append(f" {line.strip()[:80]}\n", style="dim") - + console.print() - console.print(Panel( - error_text, - title="[red bold]Error[/red bold]", - border_style="red", - padding=(0, 1), - )) - + console.print( + Panel( + error_text, + title="[red bold]Error[/red bold]", + border_style="red", + padding=(0, 1), + ) + ) + # Get AI-powered help self._provide_error_help(command, output) break else: # Show success indicator for commands that completed if "✓" in output or "success" in output.lower() or "complete" in output.lower(): - console.print(f"[green] ✓ Command completed successfully[/green]") + console.print("[green] ✓ Command completed successfully[/green]") elif len(output.strip()) > 0: # Show a preview of the output output_lines = [l for l in output.split("\n") if l.strip()][:3] if output_lines: - console.print(f"[dim] Output: {output_lines[0][:60]}{'...' if len(output_lines[0]) > 60 else ''}[/dim]") - + console.print( + f"[dim] Output: {output_lines[0][:60]}{'...' if len(output_lines[0]) > 60 else ''}[/dim]" + ) + def _provide_error_help(self, command: str, output: str): """Provide contextual help for errors using Claude LLM and send solutions via notifications.""" + import subprocess + from rich.panel import Panel from rich.table import Table - import subprocess - + console.print() - + # First, try Claude for intelligent analysis claude_analysis = None if self._claude and self._use_llm and self._claude.is_available(): claude_analysis = self._claude.analyze_error(command, output) - + # Also use the existing ErrorDiagnoser for pattern-based analysis diagnosis = self._diagnoser.diagnose_error(command, output) - + error_type = diagnosis.get("error_type", "unknown") category = diagnosis.get("category", "unknown") description = diagnosis.get("description", output[:200]) @@ -1208,17 +1268,17 @@ def _provide_error_help(self, command: str, output: str): can_auto_fix = diagnosis.get("can_auto_fix", False) fix_strategy = diagnosis.get("fix_strategy", "") extracted_info = diagnosis.get("extracted_info", {}) - + # If Claude provided analysis, use it to enhance diagnosis if claude_analysis: cause = claude_analysis.get("cause", "") claude_fixes = claude_analysis.get("fixes", []) - + # Show Claude's analysis in bordered panel if cause or claude_fixes: from rich.panel import Panel from rich.text import Text - + analysis_text = Text() if cause: analysis_text.append("Cause: ", style="bold cyan") @@ -1227,15 +1287,17 @@ def _provide_error_help(self, command: str, output: str): analysis_text.append("Solution:\n", style="bold green") for fix in claude_fixes[:3]: analysis_text.append(f" $ {fix}\n", style="green") - + console.print() - console.print(Panel( - analysis_text, - title="[cyan bold]🤖 Claude Analysis[/cyan bold]", - border_style="cyan", - padding=(0, 1), - )) - + console.print( + Panel( + analysis_text, + title="[cyan bold]🤖 Claude Analysis[/cyan bold]", + border_style="cyan", + padding=(0, 1), + ) + ) + # Send notification with Claude's solution if cause or claude_fixes: notif_title = f"🔧 Cortex: {error_type if error_type != 'unknown' else 'Error'}" @@ -1243,115 +1305,139 @@ def _provide_error_help(self, command: str, output: str): if claude_fixes: notif_body += f"\n\nFix: {claude_fixes[0]}" self._send_solution_notification(notif_title, notif_body) - + # Use Claude's fixes if pattern-based analysis didn't find any if not fix_commands and claude_fixes: fix_commands = claude_fixes can_auto_fix = True - + # Show diagnosis in panel (only if no Claude analysis) if not claude_analysis: from rich.panel import Panel - from rich.text import Text from rich.table import Table - + from rich.text import Text + diag_table = Table(show_header=False, box=None, padding=(0, 1)) diag_table.add_column("Key", style="dim") diag_table.add_column("Value", style="bold") - + diag_table.add_row("Type", error_type) diag_table.add_row("Category", category) if can_auto_fix: - diag_table.add_row("Auto-Fix", f"[green]● Yes[/green] [dim]({fix_strategy})[/dim]" if fix_strategy else "[green]● Yes[/green]") + diag_table.add_row( + "Auto-Fix", + ( + f"[green]● Yes[/green] [dim]({fix_strategy})[/dim]" + if fix_strategy + else "[green]● Yes[/green]" + ), + ) else: diag_table.add_row("Auto-Fix", "[red]○ No[/red]") - + console.print() - console.print(Panel( - diag_table, - title="[yellow bold]Diagnosis[/yellow bold]", - border_style="yellow", - padding=(0, 1), - )) - - # If auto-fix is possible, attempt to run the fix commands - if can_auto_fix and fix_commands: - actionable_commands = [c for c in fix_commands if not c.startswith("#")] - + console.print( + Panel( + diag_table, + title="[yellow bold]Diagnosis[/yellow bold]", + border_style="yellow", + padding=(0, 1), + ) + ) + + # If auto-fix is possible, attempt to run the fix commands + if can_auto_fix and fix_commands: + actionable_commands = [c for c in fix_commands if not c.startswith("#")] + if actionable_commands: # Auto-fix with progress bar from rich.panel import Panel - from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn - + from rich.progress import BarColumn, Progress, SpinnerColumn, TextColumn + console.print() - console.print(Panel( - f"[bold]Running {len(actionable_commands)} fix command(s)...[/bold]", - title="[green bold]🔧 Auto-Fix[/green bold]", - border_style="green", - padding=(0, 1), - )) - + console.print( + Panel( + f"[bold]Running {len(actionable_commands)} fix command(s)...[/bold]", + title="[green bold]🔧 Auto-Fix[/green bold]", + border_style="green", + padding=(0, 1), + ) + ) + # Send notification that we're fixing the command self._notify_fixing_command(command, actionable_commands[0]) - + # Run the fix commands fix_success = self._run_auto_fix_commands(actionable_commands, command, error_type) - + if fix_success: # Success in bordered panel from rich.panel import Panel + console.print() - console.print(Panel( - f"[green]✓[/green] Auto-fix completed!\n\n[dim]Retry:[/dim] [cyan]{command}[/cyan]", - title="[green bold]Success[/green bold]", - border_style="green", - padding=(0, 1), - )) - + console.print( + Panel( + f"[green]✓[/green] Auto-fix completed!\n\n[dim]Retry:[/dim] [cyan]{command}[/cyan]", + title="[green bold]Success[/green bold]", + border_style="green", + padding=(0, 1), + ) + ) + # Send success notification self._send_fix_success_notification(command, error_type) else: pass # Sudo commands shown separately - + console.print() return - + # Show fix commands in bordered panel if we can't auto-fix if fix_commands and not claude_analysis: from rich.panel import Panel from rich.text import Text - + fix_text = Text() for cmd in fix_commands[:3]: if not cmd.startswith("#"): fix_text.append(f" $ {cmd}\n", style="green") - + console.print() - console.print(Panel( - fix_text, - title="[bold]Manual Fix[/bold]", - border_style="blue", - padding=(0, 1), - )) - + console.print( + Panel( + fix_text, + title="[bold]Manual Fix[/bold]", + border_style="blue", + padding=(0, 1), + ) + ) + # If error is unknown and no Claude, use local LLM - if error_type == "unknown" and not claude_analysis and self._llm and self._use_llm and self._llm.is_available(): + if ( + error_type == "unknown" + and not claude_analysis + and self._llm + and self._use_llm + and self._llm.is_available() + ): llm_help = self._llm_analyze_error(command, output) if llm_help: console.print() console.print(f"[dim]{llm_help}[/dim]") - + # Try to extract fix command from LLM response llm_fix = self._extract_fix_from_llm(llm_help) if llm_fix: console.print() - console.print(f"[bold green]💡 AI Suggested Fix:[/bold green] [cyan]{llm_fix}[/cyan]") - + console.print( + f"[bold green]💡 AI Suggested Fix:[/bold green] [cyan]{llm_fix}[/cyan]" + ) + # Attempt to run the LLM suggested fix if self._is_safe_fix_command(llm_fix): console.print("[dim]Attempting AI-suggested fix...[/dim]") self._run_auto_fix_commands([llm_fix], command, "ai_suggested") - + # Build notification message notification_msg = "" if fix_commands: @@ -1362,23 +1448,26 @@ def _provide_error_help(self, command: str, output: str): notification_msg = description[:100] else: notification_msg = description[:100] - + # Send desktop notification self._send_error_notification(command, notification_msg, error_type, can_auto_fix) - + console.print() - - def _run_auto_fix_commands(self, commands: list[str], original_command: str, error_type: str) -> bool: + + def _run_auto_fix_commands( + self, commands: list[str], original_command: str, error_type: str + ) -> bool: """Run auto-fix commands with progress bar and return True if successful.""" import subprocess - from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn + from rich.panel import Panel + from rich.progress import BarColumn, Progress, SpinnerColumn, TaskProgressColumn, TextColumn from rich.table import Table - + all_success = True sudo_commands_pending = [] results = [] - + # Break down && commands into individual commands expanded_commands = [] for cmd in commands[:3]: @@ -1390,149 +1479,168 @@ def _run_auto_fix_commands(self, commands: list[str], original_command: str, err expanded_commands.extend(parts) else: expanded_commands.append(cmd) - + actionable = expanded_commands - + # Show each command being run with Rich Status (no raw ANSI codes) from rich.status import Status - + for i, fix_cmd in enumerate(actionable, 1): # Check if this needs sudo needs_sudo = fix_cmd.strip().startswith("sudo ") - + if needs_sudo: try: check_sudo = subprocess.run( - ["sudo", "-n", "true"], - capture_output=True, - timeout=5 + ["sudo", "-n", "true"], capture_output=True, timeout=5 ) - + if check_sudo.returncode != 0: sudo_commands_pending.append(fix_cmd) results.append((fix_cmd, "sudo", None)) - console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {fix_cmd[:55]}... [dim](needs sudo)[/dim]") + console.print( + f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {fix_cmd[:55]}... [dim](needs sudo)[/dim]" + ) continue except Exception: sudo_commands_pending.append(fix_cmd) results.append((fix_cmd, "sudo", None)) - console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {fix_cmd[:55]}... [dim](needs sudo)[/dim]") + console.print( + f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {fix_cmd[:55]}... [dim](needs sudo)[/dim]" + ) continue - + # Run command with status spinner cmd_display = fix_cmd[:55] + "..." if len(fix_cmd) > 55 else fix_cmd - + try: with Status(f"[cyan]{cmd_display}[/cyan]", console=console, spinner="dots"): result = subprocess.run( - fix_cmd, - shell=True, - capture_output=True, - text=True, - timeout=60 + fix_cmd, shell=True, capture_output=True, text=True, timeout=60 ) - + if result.returncode == 0: results.append((fix_cmd, "success", None)) - console.print(f" [dim][{i}/{len(actionable)}][/dim] [green]✓[/green] {cmd_display}") + console.print( + f" [dim][{i}/{len(actionable)}][/dim] [green]✓[/green] {cmd_display}" + ) else: - if "password" in (result.stderr or "").lower() or "terminal is required" in (result.stderr or "").lower(): + if ( + "password" in (result.stderr or "").lower() + or "terminal is required" in (result.stderr or "").lower() + ): sudo_commands_pending.append(fix_cmd) results.append((fix_cmd, "sudo", None)) - console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {cmd_display} [dim](needs sudo)[/dim]") + console.print( + f" [dim][{i}/{len(actionable)}][/dim] [yellow]![/yellow] {cmd_display} [dim](needs sudo)[/dim]" + ) else: - results.append((fix_cmd, "failed", result.stderr[:60] if result.stderr else "failed")) + results.append( + (fix_cmd, "failed", result.stderr[:60] if result.stderr else "failed") + ) all_success = False - console.print(f" [dim][{i}/{len(actionable)}][/dim] [red]✗[/red] {cmd_display}") - console.print(f" [dim red]{result.stderr[:80] if result.stderr else 'Command failed'}[/dim red]") + console.print( + f" [dim][{i}/{len(actionable)}][/dim] [red]✗[/red] {cmd_display}" + ) + console.print( + f" [dim red]{result.stderr[:80] if result.stderr else 'Command failed'}[/dim red]" + ) break - + except subprocess.TimeoutExpired: results.append((fix_cmd, "timeout", None)) all_success = False - console.print(f" [dim][{i}/{len(actionable)}][/dim] [yellow]⏱[/yellow] {cmd_display} [dim](timeout)[/dim]") + console.print( + f" [dim][{i}/{len(actionable)}][/dim] [yellow]⏱[/yellow] {cmd_display} [dim](timeout)[/dim]" + ) break except Exception as e: results.append((fix_cmd, "error", str(e)[:50])) all_success = False console.print(f" [dim][{i}/{len(actionable)}][/dim] [red]✗[/red] {cmd_display}") break - + # Show summary line success_count = sum(1 for _, s, _ in results if s == "success") if success_count > 0 and success_count == len([r for r in results if r[1] != "sudo"]): console.print(f"\n [green]✓ All {success_count} command(s) completed[/green]") - + # Show sudo commands in bordered panel if sudo_commands_pending: from rich.panel import Panel from rich.text import Text - + sudo_text = Text() sudo_text.append("Run these commands manually:\n\n", style="dim") for cmd in sudo_commands_pending: sudo_text.append(f" $ {cmd}\n", style="green") - + console.print() - console.print(Panel( - sudo_text, - title="[yellow bold]🔐 Sudo Required[/yellow bold]", - border_style="yellow", - padding=(0, 1), - )) - + console.print( + Panel( + sudo_text, + title="[yellow bold]🔐 Sudo Required[/yellow bold]", + border_style="yellow", + padding=(0, 1), + ) + ) + # Send notification about pending sudo commands self._send_sudo_pending_notification(sudo_commands_pending) - + # Still consider it a partial success if we need manual sudo return len(sudo_commands_pending) < len([c for c in commands if not c.startswith("#")]) - + return all_success - + def _send_sudo_pending_notification(self, commands: list[str]): """Send notification about pending sudo commands.""" try: import subprocess - + cmd_preview = commands[0][:40] + "..." if len(commands[0]) > 40 else commands[0] - - subprocess.run([ - "notify-send", - "--urgency=normal", - "--icon=dialog-password", - "🔐 Cortex: Sudo required", - f"Run in your terminal:\n{cmd_preview}" - ], capture_output=True, timeout=2) - + + subprocess.run( + [ + "notify-send", + "--urgency=normal", + "--icon=dialog-password", + "🔐 Cortex: Sudo required", + f"Run in your terminal:\n{cmd_preview}", + ], + capture_output=True, + timeout=2, + ) + except Exception: pass - + def _extract_fix_from_llm(self, llm_response: str) -> str | None: """Extract a fix command from LLM response.""" import re - + # Look for commands in common formats patterns = [ - r'`([^`]+)`', # Backtick enclosed - r'^\$ (.+)$', # Shell prompt format - r'^sudo (.+)$', # Sudo commands - r'run[:\s]+([^\n]+)', # "run: command" format - r'try[:\s]+([^\n]+)', # "try: command" format + r"`([^`]+)`", # Backtick enclosed + r"^\$ (.+)$", # Shell prompt format + r"^sudo (.+)$", # Sudo commands + r"run[:\s]+([^\n]+)", # "run: command" format + r"try[:\s]+([^\n]+)", # "try: command" format ] - + for pattern in patterns: matches = re.findall(pattern, llm_response, re.MULTILINE | re.IGNORECASE) for match in matches: cmd = match.strip() if cmd and len(cmd) > 3 and self._is_safe_fix_command(cmd): return cmd - + return None - + def _is_safe_fix_command(self, command: str) -> bool: """Check if a fix command is safe to run automatically.""" cmd_lower = command.lower().strip() - + # Dangerous commands we should never auto-run dangerous_patterns = [ "rm -rf /", @@ -1549,11 +1657,11 @@ def _is_safe_fix_command(self, command: str) -> bool: "curl|bash", "wget|bash", ] - + for pattern in dangerous_patterns: if pattern in cmd_lower: return False - + # Safe fix command patterns safe_patterns = [ "sudo systemctl", @@ -1573,72 +1681,91 @@ def _is_safe_fix_command(self, command: str) -> bool: "mkdir -p", "touch", ] - + for pattern in safe_patterns: if cmd_lower.startswith(pattern): return True - + # Allow sudo commands for common safe operations if cmd_lower.startswith("sudo "): rest = cmd_lower[5:].strip() - safe_sudo = ["systemctl", "service", "apt", "apt-get", "nginx", "chmod", "chown", "mkdir"] + safe_sudo = [ + "systemctl", + "service", + "apt", + "apt-get", + "nginx", + "chmod", + "chown", + "mkdir", + ] if any(rest.startswith(s) for s in safe_sudo): return True - + return False - + def _send_fix_success_notification(self, command: str, error_type: str): """Send a desktop notification that the fix was successful.""" try: import subprocess - + cmd_short = command[:30] + "..." if len(command) > 30 else command - - subprocess.run([ - "notify-send", - "--urgency=normal", - "--icon=dialog-information", - f"✅ Cortex: Fixed {error_type}", - f"Auto-fix successful! You can now retry:\n{cmd_short}" - ], capture_output=True, timeout=2) - + + subprocess.run( + [ + "notify-send", + "--urgency=normal", + "--icon=dialog-information", + f"✅ Cortex: Fixed {error_type}", + f"Auto-fix successful! You can now retry:\n{cmd_short}", + ], + capture_output=True, + timeout=2, + ) + except Exception: pass - + def _send_solution_notification(self, title: str, body: str): """Send a desktop notification with the solution from Claude.""" try: import subprocess - + # Use notify-send with high priority - subprocess.run([ - "notify-send", - "--urgency=critical", - "--icon=dialog-information", - "--expire-time=15000", # 15 seconds - title, - body - ], capture_output=True, timeout=2) - + subprocess.run( + [ + "notify-send", + "--urgency=critical", + "--icon=dialog-information", + "--expire-time=15000", # 15 seconds + title, + body, + ], + capture_output=True, + timeout=2, + ) + except Exception: pass - - def _send_error_notification(self, command: str, solution: str, error_type: str = "", can_auto_fix: bool = False): + + def _send_error_notification( + self, command: str, solution: str, error_type: str = "", can_auto_fix: bool = False + ): """Send a desktop notification with the error solution.""" try: # Try to use notify-send (standard on Ubuntu) import subprocess - + # Truncate for notification cmd_short = command[:30] + "..." if len(command) > 30 else command solution_short = solution[:150] + "..." if len(solution) > 150 else solution - + # Build title with error type if error_type and error_type != "unknown": title = f"🔧 Cortex: {error_type}" else: - title = f"🔧 Cortex: Error detected" - + title = "🔧 Cortex: Error detected" + # Add auto-fix indicator if can_auto_fix: body = f"✓ Auto-fixable\n\n{solution_short}" @@ -1646,32 +1773,30 @@ def _send_error_notification(self, command: str, solution: str, error_type: str else: body = solution_short icon = "dialog-warning" - + # Send notification - subprocess.run([ - "notify-send", - "--urgency=normal", - f"--icon={icon}", - title, - body - ], capture_output=True, timeout=2) - + subprocess.run( + ["notify-send", "--urgency=normal", f"--icon={icon}", title, body], + capture_output=True, + timeout=2, + ) + except (FileNotFoundError, subprocess.TimeoutExpired, Exception): # notify-send not available or failed, try callback if self.notification_callback: self.notification_callback(f"Error in: {command[:30]}", solution[:100]) - + def _llm_analyze_error(self, command: str, error_output: str) -> str | None: """Use local LLM to analyze an error and provide a fix.""" if not self._llm: return None - + # Build context from recent commands context = "" if self._session_context: context = "Recent commands:\n" + "\n".join(self._session_context[-5:]) + "\n\n" - - prompt = f"""You are a Linux expert. A user ran a command and got an error. + + prompt = f"""You are a Linux expert. A user ran a command and got an error. Provide a brief, actionable fix (2-3 sentences max). IMPORTANT: Do NOT suggest sudo commands - they cannot be auto-executed. @@ -1683,24 +1808,24 @@ def _llm_analyze_error(self, command: str, error_output: str) -> str | None: {error_output[:500]} Fix (be specific, give the exact non-sudo command to run):""" - + try: result = self._llm.analyze(prompt, max_tokens=150, timeout=10) if result: return result.strip() except Exception: pass - + return None - + def analyze_session_intent(self) -> str | None: """Use LLM to analyze what the user is trying to accomplish based on their commands.""" if not self._llm or not self._llm.is_available(): return None - + if len(self._session_context) < 2: return None - + prompt = f"""Based on these terminal commands, what is the user trying to accomplish? Give a brief summary (1 sentence max). @@ -1708,7 +1833,7 @@ def analyze_session_intent(self) -> str | None: {chr(10).join(self._session_context[-5:])} The user is trying to:""" - + try: result = self._llm.analyze(prompt, max_tokens=50, timeout=15) if result: @@ -1719,17 +1844,17 @@ def analyze_session_intent(self) -> str | None: return result except Exception: pass - + return None - + def get_next_step_suggestion(self) -> str | None: """Use LLM to suggest the next logical step based on recent commands.""" if not self._llm or not self._llm.is_available(): return None - + if len(self._session_context) < 1: return None - + prompt = f"""Based on these terminal commands, what single command should the user run next? Respond with ONLY the command, nothing else. @@ -1737,7 +1862,7 @@ def get_next_step_suggestion(self) -> str | None: {chr(10).join(self._session_context[-5:])} Next command:""" - + try: result = self._llm.analyze(prompt, max_tokens=30, timeout=15) if result: @@ -1746,84 +1871,99 @@ def get_next_step_suggestion(self) -> str | None: # Remove common prefixes for prefix in ["$", "Run:", "Try:", "Next:", "Command:", "`"]: if result.lower().startswith(prefix.lower()): - result = result[len(prefix):].strip() + result = result[len(prefix) :].strip() result = result.rstrip("`") return result.split("\n")[0].strip() except Exception: pass - + return None - + def get_collected_context(self) -> str: """Get a formatted summary of all collected terminal context.""" with self._lock: if not self._commands_observed: return "No commands observed yet." - + lines = ["[bold]📋 Collected Terminal Context:[/bold]", ""] - + for i, obs in enumerate(self._commands_observed, 1): timestamp = obs.get("timestamp", "")[:19] source = obs.get("source", "unknown") command = obs.get("command", "") - + lines.append(f"{i}. [{timestamp}] ({source})") lines.append(f" $ {command}") lines.append("") - + return "\n".join(lines) - + def print_collected_context(self): """Print a summary of all collected terminal context with AI analysis.""" from rich.panel import Panel - + with self._lock: if not self._commands_observed: console.print("[dim]No commands observed yet.[/dim]") return - + console.print() - console.print(Panel( - f"[bold]Collected {len(self._commands_observed)} command(s) from other terminals[/bold]", - title="[cyan]📋 Terminal Context Summary[/cyan]", - border_style="cyan", - )) - + console.print( + Panel( + f"[bold]Collected {len(self._commands_observed)} command(s) from other terminals[/bold]", + title="[cyan]📋 Terminal Context Summary[/cyan]", + border_style="cyan", + ) + ) + for i, obs in enumerate(self._commands_observed[-10:], 1): # Show last 10 - timestamp = obs.get("timestamp", "")[:19].split("T")[-1] if "T" in obs.get("timestamp", "") else obs.get("timestamp", "")[:8] + timestamp = ( + obs.get("timestamp", "")[:19].split("T")[-1] + if "T" in obs.get("timestamp", "") + else obs.get("timestamp", "")[:8] + ) source = obs.get("source", "unknown") command = obs.get("command", "") - + # Shorten source name if ":" in source: source = source.split(":")[-1] - - console.print(f" [dim]{timestamp}[/dim] [cyan]{source:12}[/cyan] [white]{command[:50]}{'...' if len(command) > 50 else ''}[/white]") - + + console.print( + f" [dim]{timestamp}[/dim] [cyan]{source:12}[/cyan] [white]{command[:50]}{'...' if len(command) > 50 else ''}[/white]" + ) + if len(self._commands_observed) > 10: - console.print(f" [dim]... and {len(self._commands_observed) - 10} more commands[/dim]") - + console.print( + f" [dim]... and {len(self._commands_observed) - 10} more commands[/dim]" + ) + # Add AI analysis if available - if self._llm and self._use_llm and self._llm.is_available() and len(self._session_context) >= 2: + if ( + self._llm + and self._use_llm + and self._llm.is_available() + and len(self._session_context) >= 2 + ): console.print() console.print("[bold magenta]🤖 AI Analysis:[/bold magenta]") - + # Analyze intent intent = self.analyze_session_intent() if intent: console.print(f"[white] Intent: {intent}[/white]") - + # Suggest next step next_step = self.get_next_step_suggestion() if next_step: console.print(f"[green] Suggested next: {next_step}[/green]") - + console.print() - + def _extract_commands_from_content(self, content: str, source: str) -> list[str]: """Extract commands from terminal content based on source type.""" commands = [] - + # Shell history files - each line is a command if "_history" in source or "history" in source: for line in content.strip().split("\n"): @@ -1850,21 +1990,21 @@ def _extract_commands_from_content(self, content: str, source: str) -> list[str] line = line.strip() if not line: continue - + # Various prompt patterns prompt_patterns = [ - r"^\$ (.+)$", # Simple $ prompt - r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+\$ (.+)$", # user@host:path$ cmd - r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+# (.+)$", # root prompt - r"^>>> (.+)$", # Python REPL - r"^\(.*\)\s*\$ (.+)$", # (venv) $ cmd - r"^➜\s+.+\s+(.+)$", # Oh-my-zsh prompt - r"^❯ (.+)$", # Starship prompt - r"^▶ (.+)$", # Another prompt style - r"^\[.*\]\$ (.+)$", # [dir]$ cmd - r"^% (.+)$", # % prompt (zsh default) + r"^\$ (.+)$", # Simple $ prompt + r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+\$ (.+)$", # user@host:path$ cmd + r"^[a-zA-Z0-9_-]+@[a-zA-Z0-9_-]+:.+# (.+)$", # root prompt + r"^>>> (.+)$", # Python REPL + r"^\(.*\)\s*\$ (.+)$", # (venv) $ cmd + r"^➜\s+.+\s+(.+)$", # Oh-my-zsh prompt + r"^❯ (.+)$", # Starship prompt + r"^▶ (.+)$", # Another prompt style + r"^\[.*\]\$ (.+)$", # [dir]$ cmd + r"^% (.+)$", # % prompt (zsh default) ] - + for pattern in prompt_patterns: match = re.match(pattern, line) if match: @@ -1872,117 +2012,133 @@ def _extract_commands_from_content(self, content: str, source: str) -> list[str] if cmd: commands.append(cmd) break - + return commands - + def _process_observed_command(self, command: str, source: str = "unknown"): """Process an observed command and notify about issues with real-time feedback.""" # Skip empty or very short commands if not command or len(command.strip()) < 2: return - + command = command.strip() - + # Skip commands from the Cortex terminal itself if self._is_cortex_terminal_command(command): return - + # Skip common shell built-ins that aren't interesting (only if standalone) skip_commands = ["cd", "ls", "pwd", "clear", "exit", "history", "fg", "bg", "jobs", "alias"] parts = command.split() cmd_base = parts[0] if parts else "" - + # Also handle sudo prefix if cmd_base == "sudo" and len(parts) > 1: cmd_base = parts[1] - + # Only skip if it's JUST the command with no args if cmd_base in skip_commands and len(parts) == 1: return - + # Skip if it looks like a partial command or just an argument if not any(c.isalpha() for c in cmd_base): return - + # Avoid duplicates within short time window with self._lock: - recent = [c for c in self._commands_observed - if c["command"] == command - and (datetime.datetime.now() - datetime.datetime.fromisoformat(c["timestamp"])).seconds < 5] + recent = [ + c + for c in self._commands_observed + if c["command"] == command + and ( + datetime.datetime.now() - datetime.datetime.fromisoformat(c["timestamp"]) + ).seconds + < 5 + ] if recent: return - - self._commands_observed.append({ - "command": command, - "timestamp": datetime.datetime.now().isoformat(), - "source": source, - "has_error": False, # Will be updated if error is detected - "status": "pending", # pending, success, failed - }) - + + self._commands_observed.append( + { + "command": command, + "timestamp": datetime.datetime.now().isoformat(), + "source": source, + "has_error": False, # Will be updated if error is detected + "status": "pending", # pending, success, failed + } + ) + # Add to session context for LLM self._session_context.append(f"$ {command}") # Keep only last 10 commands for context if len(self._session_context) > 10: self._session_context = self._session_context[-10:] - + # Real-time feedback with visual emphasis self._show_realtime_feedback(command, source) - + # For live terminal commands, proactively check the result if source == "live_terminal": self._check_command_result(command) - + # Check for issues and provide help issues = self._check_command_issues(command) if issues: from rich.panel import Panel - console.print(Panel( - f"[bold yellow]⚠ Issue:[/bold yellow] {issues}", - border_style="yellow", - padding=(0, 1), - expand=False, - )) + + console.print( + Panel( + f"[bold yellow]⚠ Issue:[/bold yellow] {issues}", + border_style="yellow", + padding=(0, 1), + expand=False, + ) + ) if self.notification_callback: - self.notification_callback(f"Cortex: Issue detected", issues) - + self.notification_callback("Cortex: Issue detected", issues) + # Check if command matches expected commands if self._expected_commands: matched = self._check_command_match(command) from rich.panel import Panel + if matched: - console.print(Panel( - "[bold green]✓ Matches expected command[/bold green]", - border_style="green", - padding=(0, 1), - expand=False, - )) + console.print( + Panel( + "[bold green]✓ Matches expected command[/bold green]", + border_style="green", + padding=(0, 1), + expand=False, + ) + ) else: # User ran a DIFFERENT command than expected - console.print(Panel( - "[bold yellow]⚠ Not in expected commands[/bold yellow]", - border_style="yellow", - padding=(0, 1), - expand=False, - )) + console.print( + Panel( + "[bold yellow]⚠ Not in expected commands[/bold yellow]", + border_style="yellow", + padding=(0, 1), + expand=False, + ) + ) # Send notification with the correct command(s) self._notify_wrong_command(command) - + def _check_command_match(self, command: str) -> bool: """Check if a command matches any expected command.""" if not self._expected_commands: return True # No expected commands means anything goes - + cmd_normalized = command.strip().lower() # Remove sudo prefix for comparison if cmd_normalized.startswith("sudo "): cmd_normalized = cmd_normalized[5:].strip() - + for expected in self._expected_commands: exp_normalized = expected.strip().lower() if exp_normalized.startswith("sudo "): exp_normalized = exp_normalized[5:].strip() - + # Check for exact match or if command contains the expected command if cmd_normalized == exp_normalized: return True @@ -1990,74 +2146,86 @@ def _check_command_match(self, command: str) -> bool: return True if cmd_normalized in exp_normalized: return True - + # Check if first words match (e.g., "systemctl restart nginx" vs "systemctl restart nginx.service") cmd_parts = cmd_normalized.split() exp_parts = exp_normalized.split() if len(cmd_parts) >= 2 and len(exp_parts) >= 2: if cmd_parts[0] == exp_parts[0] and cmd_parts[1] == exp_parts[1]: return True - + return False - + def _notify_wrong_command(self, wrong_command: str): """Send desktop notification when user runs wrong command.""" if not self._expected_commands: return - + # Find the most relevant expected command correct_cmd = self._expected_commands[0] if self._expected_commands else None - + if correct_cmd: title = "⚠️ Cortex: Wrong Command" body = f"You ran: {wrong_command[:40]}...\n\nExpected: {correct_cmd}" - + try: import subprocess - subprocess.run([ - "notify-send", - "--urgency=critical", - "--icon=dialog-warning", - "--expire-time=10000", - title, - body - ], capture_output=True, timeout=2) + + subprocess.run( + [ + "notify-send", + "--urgency=critical", + "--icon=dialog-warning", + "--expire-time=10000", + title, + body, + ], + capture_output=True, + timeout=2, + ) except Exception: pass - + # Also show in console - console.print(f" [bold yellow]📢 Expected command:[/bold yellow] [cyan]{correct_cmd}[/cyan]") - + console.print( + f" [bold yellow]📢 Expected command:[/bold yellow] [cyan]{correct_cmd}[/cyan]" + ) + def _notify_fixing_command(self, original_cmd: str, fix_cmd: str): """Send notification that Cortex is fixing a command error.""" title = "🔧 Cortex: Fixing Error" body = f"Command failed: {original_cmd[:30]}...\n\nFix: {fix_cmd}" - + try: import subprocess - subprocess.run([ - "notify-send", - "--urgency=normal", - "--icon=dialog-information", - "--expire-time=8000", - title, - body - ], capture_output=True, timeout=2) + + subprocess.run( + [ + "notify-send", + "--urgency=normal", + "--icon=dialog-information", + "--expire-time=8000", + title, + body, + ], + capture_output=True, + timeout=2, + ) except Exception: pass - + def _check_command_result(self, command: str): """Proactively check if a command succeeded by running verification commands.""" import subprocess import time - + # Wait a moment for the command to complete time.sleep(0.5) - + cmd_lower = command.lower().strip() check_cmd = None error_output = None - + # Determine what check to run based on the command if "systemctl" in cmd_lower: # Extract service name @@ -2068,17 +2236,17 @@ def _check_command_result(self, command: str): if i + 1 < len(parts): service_name = parts[i + 1] break - + if service_name: check_cmd = f"systemctl status {service_name} 2>&1 | head -5" - + elif "service" in cmd_lower and "status" not in cmd_lower: # Extract service name for service command parts = command.split() if len(parts) >= 3: service_name = parts[1] if parts[0] != "sudo" else parts[2] check_cmd = f"service {service_name} status 2>&1 | head -5" - + elif "docker" in cmd_lower: if "run" in cmd_lower or "start" in cmd_lower: # Get container name if present @@ -2088,52 +2256,59 @@ def _check_command_result(self, command: str): if p == "--name" and i + 1 < len(parts): container_name = parts[i + 1] break - + if container_name: - check_cmd = f"docker ps -f name={container_name} --format '{{{{.Status}}}}' 2>&1" + check_cmd = ( + f"docker ps -f name={container_name} --format '{{{{.Status}}}}' 2>&1" + ) else: check_cmd = "docker ps -l --format '{{.Status}} {{.Names}}' 2>&1" elif "stop" in cmd_lower or "rm" in cmd_lower: check_cmd = "docker ps -a -l --format '{{.Status}} {{.Names}}' 2>&1" - + elif "nginx" in cmd_lower and "-t" in cmd_lower: check_cmd = "nginx -t 2>&1" - + elif "apt" in cmd_lower or "apt-get" in cmd_lower: # Check for recent apt errors check_cmd = "tail -3 /var/log/apt/term.log 2>/dev/null || echo 'ok'" - + # Run the check command if we have one if check_cmd: try: result = subprocess.run( - check_cmd, - shell=True, - capture_output=True, - text=True, - timeout=5 + check_cmd, shell=True, capture_output=True, text=True, timeout=5 ) - + output = result.stdout + result.stderr - + # Check for error indicators in the output error_indicators = [ - "failed", "error", "not found", "inactive", "dead", - "could not", "unable", "denied", "cannot", "exited", - "not running", "not loaded" + "failed", + "error", + "not found", + "inactive", + "dead", + "could not", + "unable", + "denied", + "cannot", + "exited", + "not running", + "not loaded", ] - + has_error = any(ind in output.lower() for ind in error_indicators) - + if has_error or result.returncode != 0: error_output = output - + except (subprocess.TimeoutExpired, Exception): pass - + # If we found an error, mark the command and process it with auto-fix if error_output: - console.print(f" [dim]checking...[/dim]") + console.print(" [dim]checking...[/dim]") # Mark this command as having an error with self._lock: for obs in self._commands_observed: @@ -2149,15 +2324,15 @@ def _check_command_result(self, command: str): if obs["command"] == command and obs["status"] == "pending": obs["status"] = "success" break - + def _show_realtime_feedback(self, command: str, source: str): """Show real-time visual feedback for detected commands.""" if not self._show_live_output: return - + from rich.panel import Panel from rich.text import Text - + # Source icons and labels source_info = { "cursor": ("🖥️", "Cursor IDE", "cyan"), @@ -2167,14 +2342,14 @@ def _show_realtime_feedback(self, command: str, source: str): "zsh": ("📝", "Zsh", "green"), "fish": ("🐟", "Fish", "yellow"), } - + # Determine source type icon, label, color = "📝", "Terminal", "white" for key, (i, l, c) in source_info.items(): if key in source.lower(): icon, label, color = i, l, c break - + # Categorize command cmd_category = self._categorize_command(command) category_icons = { @@ -2201,59 +2376,60 @@ def _show_realtime_feedback(self, command: str, source: str): "node": "📗", } cmd_icon = category_icons.get(cmd_category, "▶") - + # Format timestamp timestamp = datetime.datetime.now().strftime("%H:%M:%S") - + # Store in buffer for later reference - self._output_buffer.append({ - "timestamp": timestamp, - "source": source, - "label": label, - "icon": icon, - "color": color, - "command": command, - "cmd_icon": cmd_icon, - }) - + self._output_buffer.append( + { + "timestamp": timestamp, + "source": source, + "label": label, + "icon": icon, + "color": color, + "command": command, + "cmd_icon": cmd_icon, + } + ) + # Print real-time feedback with bordered section analysis = self._analyze_command(command) - - from rich.panel import Panel - from rich.text import Text - + # Build command display cmd_text = Text() cmd_text.append(f"{cmd_icon} ", style="bold") cmd_text.append(command, style="bold white") if analysis: cmd_text.append(f"\n {analysis}", style="dim italic") - + console.print() - console.print(Panel( - cmd_text, - title=f"[dim]{timestamp}[/dim]", - title_align="right", - border_style="blue", - padding=(0, 1), - )) - + console.print( + Panel( + cmd_text, + title=f"[dim]{timestamp}[/dim]", + title_align="right", + border_style="blue", + padding=(0, 1), + ) + ) + def _categorize_command(self, command: str) -> str: """Categorize a command by its base command.""" cmd_parts = command.split() if not cmd_parts: return "unknown" - + base = cmd_parts[0] if base == "sudo" and len(cmd_parts) > 1: base = cmd_parts[1] - + return base.lower() - + def _analyze_command(self, command: str) -> str | None: """Analyze a command and return a brief description using LLM or patterns.""" cmd_lower = command.lower() - + # First try pattern matching for speed patterns = [ (r"docker run", "Starting a Docker container"), @@ -2287,28 +2463,28 @@ def _analyze_command(self, command: str) -> str | None: (r"chmod", "Changing file permissions"), (r"chown", "Changing file ownership"), ] - + for pattern, description in patterns: if re.search(pattern, cmd_lower): return description - + # Use LLM for unknown commands if self._llm and self._use_llm and self._llm.is_available(): return self._llm_analyze_command(command) - + return None - + def _llm_analyze_command(self, command: str) -> str | None: """Use local LLM to analyze a command.""" if not self._llm: return None - + prompt = f"""Analyze this Linux command and respond with ONLY a brief description (max 10 words) of what it does: Command: {command} Brief description:""" - + try: result = self._llm.analyze(prompt, max_tokens=30, timeout=5) if result: @@ -2322,20 +2498,20 @@ def _llm_analyze_command(self, command: str) -> str | None: return result except Exception: pass - + return None - + def _check_command_issues(self, command: str) -> str | None: """Check if a command has potential issues and return a warning.""" issues = [] - + if any(p in command for p in ["/etc/", "/var/", "/usr/"]): if not command.startswith("sudo") and not command.startswith("cat"): issues.append("May need sudo for system files") - + if "rm -rf /" in command: issues.append("DANGER: Destructive command detected!") - + typo_checks = { "sudp": "sudo", "suod": "sudo", @@ -2346,6 +2522,5 @@ def _check_command_issues(self, command: str) -> str | None: for typo, correct in typo_checks.items(): if command.startswith(typo + " "): issues.append(f"Typo? Did you mean '{correct}'?") - - return "; ".join(issues) if issues else None + return "; ".join(issues) if issues else None diff --git a/cortex/do_runner/verification.py b/cortex/do_runner/verification.py index c13a3c040..f179c1ed5 100644 --- a/cortex/do_runner/verification.py +++ b/cortex/do_runner/verification.py @@ -15,13 +15,15 @@ class ConflictDetector: """Detects conflicts with existing configurations.""" - - def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + + def _execute_command( + self, cmd: str, needs_sudo: bool = False, timeout: int = 120 + ) -> tuple[bool, str, str]: """Execute a single command.""" try: if needs_sudo and not cmd.strip().startswith("sudo"): cmd = f"sudo {cmd}" - + result = subprocess.run( ["sudo", "bash", "-c", cmd] if needs_sudo else cmd, shell=not needs_sudo, @@ -34,7 +36,7 @@ def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 12 return False, "", f"Command timed out after {timeout} seconds" except Exception as e: return False, "", str(e) - + def check_for_conflicts( self, cmd: str, @@ -42,7 +44,7 @@ def check_for_conflicts( ) -> dict[str, Any]: """ Check if the command might conflict with existing resources. - + This is a GENERAL conflict detector that works for: - Docker containers - Services (systemd) @@ -53,7 +55,7 @@ def check_for_conflicts( - Ports - Virtual environments - And more... - + Returns: Dict with conflict info, alternatives, and cleanup commands. """ @@ -69,12 +71,12 @@ def check_for_conflicts( self._check_database_conflict, self._check_cron_conflict, ] - + for checker in checkers: result = checker(cmd, purpose) if result["has_conflict"]: return result - + # Default: no conflict return { "has_conflict": False, @@ -85,7 +87,7 @@ def check_for_conflicts( "cleanup_commands": [], "alternative_actions": [], } - + def _create_conflict_result( self, resource_type: str, @@ -96,7 +98,7 @@ def _create_conflict_result( alternative_actions: list[dict] | None = None, ) -> dict[str, Any]: """Create a standardized conflict result with alternatives.""" - + # Generate standard alternative actions based on resource type and state if alternative_actions is None: if is_active: @@ -130,7 +132,7 @@ def _create_conflict_result( "commands": self._get_remove_commands(resource_type, resource_name), }, ] - + return { "has_conflict": True, "conflict_type": conflict_type, @@ -142,7 +144,7 @@ def _create_conflict_result( "cleanup_commands": [], "use_existing": is_active, } - + def _get_restart_commands(self, resource_type: str, name: str) -> list[str]: """Get restart commands for a resource type.""" commands = { @@ -152,7 +154,7 @@ def _get_restart_commands(self, resource_type: str, name: str) -> list[str]: "webserver": [f"sudo systemctl restart {name}"], } return commands.get(resource_type, []) - + def _get_start_commands(self, resource_type: str, name: str) -> list[str]: """Get start commands for a resource type.""" commands = { @@ -162,7 +164,7 @@ def _get_start_commands(self, resource_type: str, name: str) -> list[str]: "webserver": [f"sudo systemctl start {name}"], } return commands.get(resource_type, []) - + def _get_remove_commands(self, resource_type: str, name: str) -> list[str]: """Get remove/cleanup commands for a resource type.""" commands = { @@ -176,35 +178,36 @@ def _get_remove_commands(self, resource_type: str, name: str) -> list[str]: "database": [], # Don't auto-remove databases } return commands.get(resource_type, []) - + def _check_docker_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for Docker container/compose conflicts.""" result = {"has_conflict": False} - + # Docker run with --name if "docker run" in cmd.lower(): - name_match = re.search(r'--name\s+([^\s]+)', cmd) + name_match = re.search(r"--name\s+([^\s]+)", cmd) if name_match: container_name = name_match.group(1) - + # Check if container exists success, container_id, _ = self._execute_command( f"docker ps -aq --filter name=^{container_name}$", needs_sudo=False ) - + if success and container_id.strip(): # Check if running running_success, running_id, _ = self._execute_command( f"docker ps -q --filter name=^{container_name}$", needs_sudo=False ) is_running = running_success and running_id.strip() - + # Get image info _, image_info, _ = self._execute_command( - f"docker inspect --format '{{{{.Config.Image}}}}' {container_name}", needs_sudo=False + f"docker inspect --format '{{{{.Config.Image}}}}' {container_name}", + needs_sudo=False, ) image = image_info.strip() if image_info else "unknown" - + status = "running" if is_running else "stopped" return self._create_conflict_result( resource_type="container", @@ -213,11 +216,13 @@ def _check_docker_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Container '{container_name}' already exists ({status}, image: {image})", is_active=is_running, ) - + # Docker compose if "docker-compose" in cmd.lower() or "docker compose" in cmd.lower(): if "up" in cmd: - success, services, _ = self._execute_command("docker compose ps -q 2>/dev/null", needs_sudo=False) + success, services, _ = self._execute_command( + "docker compose ps -q 2>/dev/null", needs_sudo=False + ) if success and services.strip(): return self._create_conflict_result( resource_type="compose", @@ -226,29 +231,41 @@ def _check_docker_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion="Docker Compose services are already running", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": "Keep existing services", "commands": []}, - {"action": "restart", "description": "Restart services", "commands": ["docker compose restart"]}, - {"action": "recreate", "description": "Recreate services", "commands": ["docker compose down", "docker compose up -d"]}, - ] + { + "action": "use_existing", + "description": "Keep existing services", + "commands": [], + }, + { + "action": "restart", + "description": "Restart services", + "commands": ["docker compose restart"], + }, + { + "action": "recreate", + "description": "Recreate services", + "commands": ["docker compose down", "docker compose up -d"], + }, + ], ) - + return result - + def _check_service_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for systemd service conflicts.""" result = {"has_conflict": False} - + # systemctl start/enable if "systemctl" in cmd: - service_match = re.search(r'systemctl\s+(start|enable|restart)\s+([^\s]+)', cmd) + service_match = re.search(r"systemctl\s+(start|enable|restart)\s+([^\s]+)", cmd) if service_match: action = service_match.group(1) - service = service_match.group(2).replace('.service', '') - + service = service_match.group(2).replace(".service", "") + success, status, _ = self._execute_command( f"systemctl is-active {service} 2>/dev/null", needs_sudo=False ) - + if action in ["start", "enable"] and status.strip() == "active": return self._create_conflict_result( resource_type="service", @@ -257,10 +274,10 @@ def _check_service_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Service '{service}' is already running", is_active=True, ) - + # service command if cmd.startswith("service ") or " service " in cmd: - service_match = re.search(r'service\s+(\S+)\s+(start|restart)', cmd) + service_match = re.search(r"service\s+(\S+)\s+(start|restart)", cmd) if service_match: service = service_match.group(1) success, status, _ = self._execute_command( @@ -274,26 +291,28 @@ def _check_service_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Service '{service}' is already running", is_active=True, ) - + return result - + def _check_file_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for file/directory conflicts.""" result = {"has_conflict": False} - paths_in_cmd = re.findall(r'(/[^\s>|]+)', cmd) - + paths_in_cmd = re.findall(r"(/[^\s>|]+)", cmd) + for path in paths_in_cmd: # Skip common read paths if path in ["/dev/null", "/etc/os-release", "/proc/", "/sys/"]: continue - + # Check for file creation/modification commands - is_write_cmd = any(p in cmd for p in [">" , "tee ", "cp ", "mv ", "touch ", "mkdir ", "echo "]) - + is_write_cmd = any( + p in cmd for p in [">", "tee ", "cp ", "mv ", "touch ", "mkdir ", "echo "] + ) + if is_write_cmd and os.path.exists(path): is_dir = os.path.isdir(path) resource_type = "directory" if is_dir else "file" - + return self._create_conflict_result( resource_type=resource_type, resource_name=path, @@ -301,29 +320,45 @@ def _check_file_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"{resource_type.title()} '{path}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Keep existing {resource_type}", "commands": []}, - {"action": "backup", "description": f"Backup and overwrite", "commands": [f"sudo cp -r {path} {path}.cortex.bak"]}, - {"action": "recreate", "description": f"Remove and recreate", "commands": [f"sudo rm -rf {path}" if is_dir else f"sudo rm -f {path}"]}, - ] + { + "action": "use_existing", + "description": f"Keep existing {resource_type}", + "commands": [], + }, + { + "action": "backup", + "description": "Backup and overwrite", + "commands": [f"sudo cp -r {path} {path}.cortex.bak"], + }, + { + "action": "recreate", + "description": "Remove and recreate", + "commands": [f"sudo rm -rf {path}" if is_dir else f"sudo rm -f {path}"], + }, + ], ) - + return result - + def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for package installation conflicts.""" result = {"has_conflict": False} - + # apt install if "apt install" in cmd or "apt-get install" in cmd: - pkg_match = re.search(r'(?:apt|apt-get)\s+install\s+(?:-y\s+)?(\S+)', cmd) + pkg_match = re.search(r"(?:apt|apt-get)\s+install\s+(?:-y\s+)?(\S+)", cmd) if pkg_match: package = pkg_match.group(1) - success, _, _ = self._execute_command(f"dpkg -l {package} 2>/dev/null | grep -q '^ii'", needs_sudo=False) + success, _, _ = self._execute_command( + f"dpkg -l {package} 2>/dev/null | grep -q '^ii'", needs_sudo=False + ) if success: # Get version - _, version_out, _ = self._execute_command(f"dpkg -l {package} | grep '^ii' | awk '{{print $3}}'", needs_sudo=False) + _, version_out, _ = self._execute_command( + f"dpkg -l {package} | grep '^ii' | awk '{{print $3}}'", needs_sudo=False + ) version = version_out.strip() if version_out else "unknown" - + return self._create_conflict_result( resource_type="package", resource_name=package, @@ -331,18 +366,32 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Package '{package}' is already installed (version: {version})", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Keep current version ({version})", "commands": []}, - {"action": "upgrade", "description": "Upgrade to latest version", "commands": [f"sudo apt install --only-upgrade -y {package}"]}, - {"action": "reinstall", "description": "Reinstall package", "commands": [f"sudo apt install --reinstall -y {package}"]}, - ] + { + "action": "use_existing", + "description": f"Keep current version ({version})", + "commands": [], + }, + { + "action": "upgrade", + "description": "Upgrade to latest version", + "commands": [f"sudo apt install --only-upgrade -y {package}"], + }, + { + "action": "reinstall", + "description": "Reinstall package", + "commands": [f"sudo apt install --reinstall -y {package}"], + }, + ], ) - + # pip install if "pip install" in cmd or "pip3 install" in cmd: - pkg_match = re.search(r'pip3?\s+install\s+(?:-[^\s]+\s+)*(\S+)', cmd) + pkg_match = re.search(r"pip3?\s+install\s+(?:-[^\s]+\s+)*(\S+)", cmd) if pkg_match: package = pkg_match.group(1) - success, version_out, _ = self._execute_command(f"pip3 show {package} 2>/dev/null | grep Version", needs_sudo=False) + success, version_out, _ = self._execute_command( + f"pip3 show {package} 2>/dev/null | grep Version", needs_sudo=False + ) if success and version_out: version = version_out.replace("Version:", "").strip() return self._create_conflict_result( @@ -352,18 +401,32 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Python package '{package}' is already installed (version: {version})", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Keep current version ({version})", "commands": []}, - {"action": "upgrade", "description": "Upgrade to latest", "commands": [f"pip3 install --upgrade {package}"]}, - {"action": "reinstall", "description": "Reinstall package", "commands": [f"pip3 install --force-reinstall {package}"]}, - ] + { + "action": "use_existing", + "description": f"Keep current version ({version})", + "commands": [], + }, + { + "action": "upgrade", + "description": "Upgrade to latest", + "commands": [f"pip3 install --upgrade {package}"], + }, + { + "action": "reinstall", + "description": "Reinstall package", + "commands": [f"pip3 install --force-reinstall {package}"], + }, + ], ) - + # npm install -g if "npm install -g" in cmd or "npm i -g" in cmd: - pkg_match = re.search(r'npm\s+(?:install|i)\s+-g\s+(\S+)', cmd) + pkg_match = re.search(r"npm\s+(?:install|i)\s+-g\s+(\S+)", cmd) if pkg_match: package = pkg_match.group(1) - success, version_out, _ = self._execute_command(f"npm list -g {package} 2>/dev/null | grep {package}", needs_sudo=False) + success, version_out, _ = self._execute_command( + f"npm list -g {package} 2>/dev/null | grep {package}", needs_sudo=False + ) if success and version_out: return self._create_conflict_result( resource_type="npm_package", @@ -372,11 +435,19 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"npm package '{package}' is already installed globally", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": "Keep current version", "commands": []}, - {"action": "upgrade", "description": "Update to latest", "commands": [f"npm update -g {package}"]}, - ] + { + "action": "use_existing", + "description": "Keep current version", + "commands": [], + }, + { + "action": "upgrade", + "description": "Update to latest", + "commands": [f"npm update -g {package}"], + }, + ], ) - + # snap install - check if snap is available and package is installed if "snap install" in cmd: # First check if snap is available @@ -389,15 +460,25 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion="Snap package manager is not installed. Installing snap first.", is_active=False, alternative_actions=[ - {"action": "install_first", "description": "Install snapd first", "commands": ["sudo apt update", "sudo apt install -y snapd"]}, - {"action": "use_apt", "description": "Use apt instead of snap", "commands": []}, - ] + { + "action": "install_first", + "description": "Install snapd first", + "commands": ["sudo apt update", "sudo apt install -y snapd"], + }, + { + "action": "use_apt", + "description": "Use apt instead of snap", + "commands": [], + }, + ], ) - - pkg_match = re.search(r'snap\s+install\s+(\S+)', cmd) + + pkg_match = re.search(r"snap\s+install\s+(\S+)", cmd) if pkg_match: package = pkg_match.group(1) - success, version_out, _ = self._execute_command(f"snap list {package} 2>/dev/null | grep {package}", needs_sudo=False) + success, version_out, _ = self._execute_command( + f"snap list {package} 2>/dev/null | grep {package}", needs_sudo=False + ) if success and version_out: return self._create_conflict_result( resource_type="snap_package", @@ -406,11 +487,19 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Snap package '{package}' is already installed", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": "Keep current version", "commands": []}, - {"action": "refresh", "description": "Refresh to latest", "commands": [f"sudo snap refresh {package}"]}, - ] + { + "action": "use_existing", + "description": "Keep current version", + "commands": [], + }, + { + "action": "refresh", + "description": "Refresh to latest", + "commands": [f"sudo snap refresh {package}"], + }, + ], ) - + # flatpak install - check if flatpak is available and package is installed if "flatpak install" in cmd: # First check if flatpak is available @@ -423,15 +512,25 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion="Flatpak is not installed. Installing flatpak first.", is_active=False, alternative_actions=[ - {"action": "install_first", "description": "Install flatpak first", "commands": ["sudo apt update", "sudo apt install -y flatpak"]}, - {"action": "use_apt", "description": "Use apt instead of flatpak", "commands": []}, - ] + { + "action": "install_first", + "description": "Install flatpak first", + "commands": ["sudo apt update", "sudo apt install -y flatpak"], + }, + { + "action": "use_apt", + "description": "Use apt instead of flatpak", + "commands": [], + }, + ], ) - - pkg_match = re.search(r'flatpak\s+install\s+(?:-y\s+)?(\S+)', cmd) + + pkg_match = re.search(r"flatpak\s+install\s+(?:-y\s+)?(\S+)", cmd) if pkg_match: package = pkg_match.group(1) - success, version_out, _ = self._execute_command(f"flatpak list | grep -i {package}", needs_sudo=False) + success, version_out, _ = self._execute_command( + f"flatpak list | grep -i {package}", needs_sudo=False + ) if success and version_out: return self._create_conflict_result( resource_type="flatpak_package", @@ -440,44 +539,54 @@ def _check_package_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Flatpak application '{package}' is already installed", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": "Keep current version", "commands": []}, - {"action": "upgrade", "description": "Update to latest", "commands": [f"flatpak update -y {package}"]}, - ] + { + "action": "use_existing", + "description": "Keep current version", + "commands": [], + }, + { + "action": "upgrade", + "description": "Update to latest", + "commands": [f"flatpak update -y {package}"], + }, + ], ) - + return result - + def _check_tool_available(self, tool: str) -> bool: """Check if a command-line tool is available.""" success, output, _ = self._execute_command(f"which {tool} 2>/dev/null", needs_sudo=False) return success and bool(output.strip()) - + def _check_port_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for port binding conflicts.""" result = {"has_conflict": False} - + # Look for port mappings port_patterns = [ - r'-p\s+(\d+):\d+', # docker -p 8080:80 - r'--port[=\s]+(\d+)', # --port 8080 - r':(\d+)\s', # :8080 - r'listen\s+(\d+)', # nginx listen 80 + r"-p\s+(\d+):\d+", # docker -p 8080:80 + r"--port[=\s]+(\d+)", # --port 8080 + r":(\d+)\s", # :8080 + r"listen\s+(\d+)", # nginx listen 80 ] - + for pattern in port_patterns: match = re.search(pattern, cmd) if match: port = match.group(1) - + # Check if port is in use - success, output, _ = self._execute_command(f"ss -tlnp | grep ':{port} '", needs_sudo=True) + success, output, _ = self._execute_command( + f"ss -tlnp | grep ':{port} '", needs_sudo=True + ) if success and output: # Get process using the port process = "unknown" proc_match = re.search(r'users:\(\("([^"]+)"', output) if proc_match: process = proc_match.group(1) - + return self._create_conflict_result( resource_type="port", resource_name=port, @@ -485,23 +594,33 @@ def _check_port_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Port {port} is already in use by '{process}'", is_active=True, alternative_actions=[ - {"action": "use_different", "description": f"Use a different port", "commands": []}, - {"action": "stop_existing", "description": f"Stop process using port {port}", "commands": [f"sudo fuser -k {port}/tcp"]}, - ] + { + "action": "use_different", + "description": "Use a different port", + "commands": [], + }, + { + "action": "stop_existing", + "description": f"Stop process using port {port}", + "commands": [f"sudo fuser -k {port}/tcp"], + }, + ], ) - + return result - + def _check_user_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for user/group creation conflicts.""" result = {"has_conflict": False} - + # useradd / adduser if "useradd" in cmd or "adduser" in cmd: - user_match = re.search(r'(?:useradd|adduser)\s+(?:[^\s]+\s+)*(\S+)$', cmd) + user_match = re.search(r"(?:useradd|adduser)\s+(?:[^\s]+\s+)*(\S+)$", cmd) if user_match: username = user_match.group(1) - success, _, _ = self._execute_command(f"id {username} 2>/dev/null", needs_sudo=False) + success, _, _ = self._execute_command( + f"id {username} 2>/dev/null", needs_sudo=False + ) if success: return self._create_conflict_result( resource_type="user", @@ -510,17 +629,27 @@ def _check_user_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"User '{username}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Use existing user '{username}'", "commands": []}, - {"action": "modify", "description": f"Modify existing user", "commands": []}, - ] + { + "action": "use_existing", + "description": f"Use existing user '{username}'", + "commands": [], + }, + { + "action": "modify", + "description": "Modify existing user", + "commands": [], + }, + ], ) - + # groupadd / addgroup if "groupadd" in cmd or "addgroup" in cmd: - group_match = re.search(r'(?:groupadd|addgroup)\s+(\S+)$', cmd) + group_match = re.search(r"(?:groupadd|addgroup)\s+(\S+)$", cmd) if group_match: groupname = group_match.group(1) - success, _, _ = self._execute_command(f"getent group {groupname} 2>/dev/null", needs_sudo=False) + success, _, _ = self._execute_command( + f"getent group {groupname} 2>/dev/null", needs_sudo=False + ) if success: return self._create_conflict_result( resource_type="group", @@ -529,22 +658,28 @@ def _check_user_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Group '{groupname}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Use existing group '{groupname}'", "commands": []}, - ] + { + "action": "use_existing", + "description": f"Use existing group '{groupname}'", + "commands": [], + }, + ], ) - + return result - + def _check_venv_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for virtual environment conflicts.""" result = {"has_conflict": False} - + # python -m venv / virtualenv if "python" in cmd and "venv" in cmd: - venv_match = re.search(r'(?:venv|virtualenv)\s+(\S+)', cmd) + venv_match = re.search(r"(?:venv|virtualenv)\s+(\S+)", cmd) if venv_match: venv_path = venv_match.group(1) - if os.path.exists(venv_path) and os.path.exists(os.path.join(venv_path, "bin", "python")): + if os.path.exists(venv_path) and os.path.exists( + os.path.join(venv_path, "bin", "python") + ): return self._create_conflict_result( resource_type="venv", resource_name=venv_path, @@ -552,22 +687,32 @@ def _check_venv_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Virtual environment '{venv_path}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Use existing venv", "commands": []}, - {"action": "recreate", "description": "Delete and recreate", "commands": [f"rm -rf {venv_path}"]}, - ] + { + "action": "use_existing", + "description": "Use existing venv", + "commands": [], + }, + { + "action": "recreate", + "description": "Delete and recreate", + "commands": [f"rm -rf {venv_path}"], + }, + ], ) - + return result - + def _check_database_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for database creation conflicts.""" result = {"has_conflict": False} - + # MySQL/MariaDB create database if "mysql" in cmd.lower() and "create database" in cmd.lower(): - db_match = re.search(r'create\s+database\s+(?:if\s+not\s+exists\s+)?(\S+)', cmd, re.IGNORECASE) + db_match = re.search( + r"create\s+database\s+(?:if\s+not\s+exists\s+)?(\S+)", cmd, re.IGNORECASE + ) if db_match: - dbname = db_match.group(1).strip('`"\'') + dbname = db_match.group(1).strip("`\"'") success, output, _ = self._execute_command( f"mysql -e \"SHOW DATABASES LIKE '{dbname}'\" 2>/dev/null", needs_sudo=False ) @@ -579,16 +724,24 @@ def _check_database_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"MySQL database '{dbname}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Use existing database", "commands": []}, - {"action": "recreate", "description": "Drop and recreate", "commands": [f"mysql -e 'DROP DATABASE {dbname}'"]}, - ] + { + "action": "use_existing", + "description": "Use existing database", + "commands": [], + }, + { + "action": "recreate", + "description": "Drop and recreate", + "commands": [f"mysql -e 'DROP DATABASE {dbname}'"], + }, + ], ) - + # PostgreSQL create database if "createdb" in cmd or ("psql" in cmd and "create database" in cmd.lower()): - db_match = re.search(r'(?:createdb|create\s+database)\s+(\S+)', cmd, re.IGNORECASE) + db_match = re.search(r"(?:createdb|create\s+database)\s+(\S+)", cmd, re.IGNORECASE) if db_match: - dbname = db_match.group(1).strip('"\'') + dbname = db_match.group(1).strip("\"'") success, _, _ = self._execute_command( f"psql -lqt 2>/dev/null | cut -d \\| -f 1 | grep -qw {dbname}", needs_sudo=False ) @@ -600,17 +753,25 @@ def _check_database_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"PostgreSQL database '{dbname}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": f"Use existing database", "commands": []}, - {"action": "recreate", "description": "Drop and recreate", "commands": [f"dropdb {dbname}"]}, - ] + { + "action": "use_existing", + "description": "Use existing database", + "commands": [], + }, + { + "action": "recreate", + "description": "Drop and recreate", + "commands": [f"dropdb {dbname}"], + }, + ], ) - + return result - + def _check_cron_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: """Check for cron job conflicts.""" result = {"has_conflict": False} - + # crontab entries if "crontab" in cmd or "/etc/cron" in cmd: # Check if similar cron job exists @@ -620,7 +781,9 @@ def _check_cron_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: if job_match: job_content = job_match.group(1) # Check existing crontab - success, crontab, _ = self._execute_command("crontab -l 2>/dev/null", needs_sudo=False) + success, crontab, _ = self._execute_command( + "crontab -l 2>/dev/null", needs_sudo=False + ) if success and crontab: # Check if similar job exists job_cmd = job_content.split()[-1] if job_content else "" @@ -632,23 +795,33 @@ def _check_cron_conflict(self, cmd: str, purpose: str) -> dict[str, Any]: suggestion=f"Similar cron job for '{job_cmd}' already exists", is_active=True, alternative_actions=[ - {"action": "use_existing", "description": "Keep existing cron job", "commands": []}, - {"action": "replace", "description": "Replace existing job", "commands": []}, - ] + { + "action": "use_existing", + "description": "Keep existing cron job", + "commands": [], + }, + { + "action": "replace", + "description": "Replace existing job", + "commands": [], + }, + ], ) - + return result class VerificationRunner: """Runs verification tests after command execution.""" - - def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + + def _execute_command( + self, cmd: str, needs_sudo: bool = False, timeout: int = 120 + ) -> tuple[bool, str, str]: """Execute a single command.""" try: if needs_sudo and not cmd.strip().startswith("sudo"): cmd = f"sudo {cmd}" - + result = subprocess.run( ["sudo", "bash", "-c", cmd] if needs_sudo else cmd, shell=not needs_sudo, @@ -661,7 +834,7 @@ def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 12 return False, "", f"Command timed out after {timeout} seconds" except Exception as e: return False, "", str(e) - + def run_verification_tests( self, commands_executed: list[CommandLog], @@ -669,67 +842,73 @@ def run_verification_tests( ) -> tuple[bool, list[dict[str, Any]]]: """ Run verification tests after all commands have been executed. - + Returns: Tuple of (all_passed, test_results) """ console.print() console.print("[bold cyan]🧪 Running verification tests...[/bold cyan]") - + test_results = [] services_to_check = set() configs_to_check = set() files_to_check = set() - + for cmd_log in commands_executed: cmd = cmd_log.command.lower() - + if "systemctl" in cmd or "service " in cmd: - svc_match = re.search(r'(?:systemctl|service)\s+\w+\s+([^\s]+)', cmd) + svc_match = re.search(r"(?:systemctl|service)\s+\w+\s+([^\s]+)", cmd) if svc_match: - services_to_check.add(svc_match.group(1).replace('.service', '')) - + services_to_check.add(svc_match.group(1).replace(".service", "")) + if "nginx" in cmd: configs_to_check.add("nginx") if "apache" in cmd or "a2ensite" in cmd: configs_to_check.add("apache") - - paths = re.findall(r'(/[^\s>|&]+)', cmd_log.command) + + paths = re.findall(r"(/[^\s>|&]+)", cmd_log.command) for path in paths: - if any(x in path for x in ['/etc/', '/var/', '/opt/']): + if any(x in path for x in ["/etc/", "/var/", "/opt/"]): files_to_check.add(path) - + all_passed = True - + # Config tests if "nginx" in configs_to_check: console.print("[dim] Testing nginx configuration...[/dim]") success, stdout, stderr = self._execute_command("nginx -t", needs_sudo=True) - test_results.append({ - "test": "nginx -t", - "passed": success, - "output": stdout if success else stderr, - }) + test_results.append( + { + "test": "nginx -t", + "passed": success, + "output": stdout if success else stderr, + } + ) if success: console.print("[green] ✓ Nginx configuration is valid[/green]") else: console.print(f"[red] ✗ Nginx config test failed: {stderr[:100]}[/red]") all_passed = False - + if "apache" in configs_to_check: console.print("[dim] Testing Apache configuration...[/dim]") - success, stdout, stderr = self._execute_command("apache2ctl configtest", needs_sudo=True) - test_results.append({ - "test": "apache2ctl configtest", - "passed": success, - "output": stdout if success else stderr, - }) + success, stdout, stderr = self._execute_command( + "apache2ctl configtest", needs_sudo=True + ) + test_results.append( + { + "test": "apache2ctl configtest", + "passed": success, + "output": stdout if success else stderr, + } + ) if success: console.print("[green] ✓ Apache configuration is valid[/green]") else: console.print(f"[red] ✗ Apache config test failed: {stderr[:100]}[/red]") all_passed = False - + # Service status tests for service in services_to_check: console.print(f"[dim] Checking service {service}...[/dim]") @@ -737,76 +916,92 @@ def run_verification_tests( f"systemctl is-active {service}", needs_sudo=False ) is_active = stdout.strip() == "active" - test_results.append({ - "test": f"systemctl is-active {service}", - "passed": is_active, - "output": stdout, - }) + test_results.append( + { + "test": f"systemctl is-active {service}", + "passed": is_active, + "output": stdout, + } + ) if is_active: console.print(f"[green] ✓ Service {service} is running[/green]") else: console.print(f"[yellow] ⚠ Service {service} status: {stdout.strip()}[/yellow]") - + # File existence tests for file_path in list(files_to_check)[:5]: if os.path.exists(file_path): success, _, _ = self._execute_command(f"test -r {file_path}", needs_sudo=True) - test_results.append({ - "test": f"file exists: {file_path}", - "passed": True, - "output": "File exists and is readable", - }) + test_results.append( + { + "test": f"file exists: {file_path}", + "passed": True, + "output": "File exists and is readable", + } + ) else: - test_results.append({ - "test": f"file exists: {file_path}", - "passed": False, - "output": "File does not exist", - }) + test_results.append( + { + "test": f"file exists: {file_path}", + "passed": False, + "output": "File does not exist", + } + ) console.print(f"[yellow] ⚠ File not found: {file_path}[/yellow]") - + # Connectivity tests query_lower = user_query.lower() if any(x in query_lower for x in ["proxy", "forward", "port", "listen"]): - port_match = re.search(r'port\s*(\d+)|:(\d+)', user_query) + port_match = re.search(r"port\s*(\d+)|:(\d+)", user_query) if port_match: port = port_match.group(1) or port_match.group(2) console.print(f"[dim] Testing connectivity on port {port}...[/dim]") success, stdout, stderr = self._execute_command( f"curl -s -o /dev/null -w '%{{http_code}}' http://localhost:{port}/ 2>/dev/null || echo 'failed'", - needs_sudo=False + needs_sudo=False, ) if stdout.strip() not in ["failed", "000", ""]: - console.print(f"[green] ✓ Port {port} responding (HTTP {stdout.strip()})[/green]") - test_results.append({ - "test": f"curl localhost:{port}", - "passed": True, - "output": f"HTTP {stdout.strip()}", - }) + console.print( + f"[green] ✓ Port {port} responding (HTTP {stdout.strip()})[/green]" + ) + test_results.append( + { + "test": f"curl localhost:{port}", + "passed": True, + "output": f"HTTP {stdout.strip()}", + } + ) else: - console.print(f"[yellow] ⚠ Port {port} not responding (may be expected)[/yellow]") - + console.print( + f"[yellow] ⚠ Port {port} not responding (may be expected)[/yellow]" + ) + # Summary passed = sum(1 for t in test_results if t["passed"]) total = len(test_results) - + console.print() if all_passed: console.print(f"[bold green]✓ All tests passed ({passed}/{total})[/bold green]") else: - console.print(f"[bold yellow]⚠ Some tests failed ({passed}/{total} passed)[/bold yellow]") - + console.print( + f"[bold yellow]⚠ Some tests failed ({passed}/{total} passed)[/bold yellow]" + ) + return all_passed, test_results class FileUsefulnessAnalyzer: """Analyzes file content usefulness for modifications.""" - - def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 120) -> tuple[bool, str, str]: + + def _execute_command( + self, cmd: str, needs_sudo: bool = False, timeout: int = 120 + ) -> tuple[bool, str, str]: """Execute a single command.""" try: if needs_sudo and not cmd.strip().startswith("sudo"): cmd = f"sudo {cmd}" - + result = subprocess.run( ["sudo", "bash", "-c", cmd] if needs_sudo else cmd, shell=not needs_sudo, @@ -819,7 +1014,7 @@ def _execute_command(self, cmd: str, needs_sudo: bool = False, timeout: int = 12 return False, "", f"Command timed out after {timeout} seconds" except Exception as e: return False, "", str(e) - + def check_file_exists_and_usefulness( self, cmd: str, @@ -834,78 +1029,92 @@ def check_file_exists_and_usefulness( "recommendations": [], "modified_command": cmd, } - + file_creation_patterns = [ - (r'(?:echo|printf)\s+.*?>\s*([^\s;|&]+)', 'write'), - (r'(?:echo|printf)\s+.*?>>\s*([^\s;|&]+)', 'append'), - (r'tee\s+(?:-a\s+)?([^\s;|&]+)', 'write'), - (r'cp\s+[^\s]+\s+([^\s;|&]+)', 'copy'), - (r'touch\s+([^\s;|&]+)', 'create'), - (r'cat\s+.*?>\s*([^\s;|&]+)', 'write'), - (r'sed\s+-i[^\s]*\s+.*?\s+([^\s;|&]+)$', 'modify'), - (r'mv\s+[^\s]+\s+([^\s;|&]+)', 'move'), + (r"(?:echo|printf)\s+.*?>\s*([^\s;|&]+)", "write"), + (r"(?:echo|printf)\s+.*?>>\s*([^\s;|&]+)", "append"), + (r"tee\s+(?:-a\s+)?([^\s;|&]+)", "write"), + (r"cp\s+[^\s]+\s+([^\s;|&]+)", "copy"), + (r"touch\s+([^\s;|&]+)", "create"), + (r"cat\s+.*?>\s*([^\s;|&]+)", "write"), + (r"sed\s+-i[^\s]*\s+.*?\s+([^\s;|&]+)$", "modify"), + (r"mv\s+[^\s]+\s+([^\s;|&]+)", "move"), ] - + target_files = [] operation_type = None - + for pattern, op_type in file_creation_patterns: matches = re.findall(pattern, cmd) for match in matches: - if match.startswith('/') or match.startswith('~'): + if match.startswith("/") or match.startswith("~"): target_files.append(match) operation_type = op_type - + result["files_checked"] = target_files - + for file_path in target_files: - if file_path.startswith('~'): + if file_path.startswith("~"): file_path = os.path.expanduser(file_path) - + if os.path.exists(file_path): result["existing_files"].append(file_path) console.print(f"[yellow]📁 File exists: {file_path}[/yellow]") - - success, content, _ = self._execute_command(f"cat '{file_path}' 2>/dev/null", needs_sudo=True) - + + success, content, _ = self._execute_command( + f"cat '{file_path}' 2>/dev/null", needs_sudo=True + ) + if success and content: useful_parts = self.analyze_file_usefulness(content, purpose, user_query) - + if useful_parts["is_useful"]: result["useful_content"][file_path] = useful_parts - console.print(f"[cyan] ✓ Contains useful content: {useful_parts['summary']}[/cyan]") - + console.print( + f"[cyan] ✓ Contains useful content: {useful_parts['summary']}[/cyan]" + ) + if useful_parts["action"] == "merge": - result["recommendations"].append({ - "file": file_path, - "action": "merge", - "reason": useful_parts["reason"], - "keep_sections": useful_parts.get("keep_sections", []), - }) + result["recommendations"].append( + { + "file": file_path, + "action": "merge", + "reason": useful_parts["reason"], + "keep_sections": useful_parts.get("keep_sections", []), + } + ) elif useful_parts["action"] == "modify": - result["recommendations"].append({ - "file": file_path, - "action": "modify", - "reason": useful_parts["reason"], - }) + result["recommendations"].append( + { + "file": file_path, + "action": "modify", + "reason": useful_parts["reason"], + } + ) else: - result["recommendations"].append({ - "file": file_path, - "action": "backup_and_replace", - "reason": "Existing content not relevant", - }) - elif operation_type in ['write', 'copy', 'create']: + result["recommendations"].append( + { + "file": file_path, + "action": "backup_and_replace", + "reason": "Existing content not relevant", + } + ) + elif operation_type in ["write", "copy", "create"]: parent_dir = os.path.dirname(file_path) if parent_dir and not os.path.exists(parent_dir): - console.print(f"[yellow]📁 Parent directory doesn't exist: {parent_dir}[/yellow]") - result["recommendations"].append({ - "file": file_path, - "action": "create_parent", - "reason": f"Need to create {parent_dir} first", - }) - + console.print( + f"[yellow]📁 Parent directory doesn't exist: {parent_dir}[/yellow]" + ) + result["recommendations"].append( + { + "file": file_path, + "action": "create_parent", + "reason": f"Need to create {parent_dir} first", + } + ) + return result - + def analyze_file_usefulness( self, content: str, @@ -920,20 +1129,22 @@ def analyze_file_usefulness( "reason": "", "keep_sections": [], } - + content_lower = content.lower() purpose_lower = purpose.lower() query_lower = user_query.lower() - + # Nginx configuration - if any(x in content_lower for x in ["server {", "location", "nginx", "proxy_pass", "listen"]): + if any( + x in content_lower for x in ["server {", "location", "nginx", "proxy_pass", "listen"] + ): result["is_useful"] = True - + has_server_block = "server {" in content_lower or "server{" in content_lower has_location = "location" in content_lower has_proxy = "proxy_pass" in content_lower has_ssl = "ssl" in content_lower or "443" in content - + summary_parts = [] if has_server_block: summary_parts.append("server block") @@ -943,12 +1154,12 @@ def analyze_file_usefulness( summary_parts.append("proxy settings") if has_ssl: summary_parts.append("SSL config") - + result["summary"] = "Has " + ", ".join(summary_parts) - + if "proxy" in query_lower or "forward" in query_lower: if has_proxy: - existing_proxy = re.search(r'proxy_pass\s+([^;]+)', content) + existing_proxy = re.search(r"proxy_pass\s+([^;]+)", content) if existing_proxy: result["action"] = "modify" result["reason"] = f"Existing proxy to {existing_proxy.group(1).strip()}" @@ -966,70 +1177,72 @@ def analyze_file_usefulness( else: result["action"] = "merge" result["reason"] = "Preserve existing configuration" - + # Apache configuration - elif any(x in content_lower for x in [" 2: result["is_useful"] = True result["summary"] = f"Related content ({len(overlap)} keyword matches)" result["action"] = "backup_and_replace" result["reason"] = "Content partially relevant, backing up" - + return result - + def apply_file_recommendations( self, recommendations: list[dict[str, Any]], ) -> list[str]: """Apply recommendations for existing files.""" commands_executed = [] - + for rec in recommendations: file_path = rec["file"] action = rec["action"] - + if action == "backup_and_replace": backup_path = f"{file_path}.cortex.bak.{int(time.time())}" backup_cmd = f"sudo cp '{file_path}' '{backup_path}'" @@ -1037,7 +1250,7 @@ def apply_file_recommendations( if success: console.print(f"[dim] ✓ Backed up to {backup_path}[/dim]") commands_executed.append(backup_cmd) - + elif action == "create_parent": parent = os.path.dirname(file_path) mkdir_cmd = f"sudo mkdir -p '{parent}'" @@ -1045,6 +1258,5 @@ def apply_file_recommendations( if success: console.print(f"[dim] ✓ Created directory {parent}[/dim]") commands_executed.append(mkdir_cmd) - - return commands_executed + return commands_executed diff --git a/cortex/semantic_cache.py b/cortex/semantic_cache.py index 89a42f518..1d01b370d 100644 --- a/cortex/semantic_cache.py +++ b/cortex/semantic_cache.py @@ -94,8 +94,7 @@ def _init_database(self) -> None: with self._pool.get_connection() as conn: cur = conn.cursor() - cur.execute( - """ + cur.execute(""" CREATE TABLE IF NOT EXISTS llm_cache_entries ( id INTEGER PRIMARY KEY AUTOINCREMENT, provider TEXT NOT NULL, @@ -109,29 +108,22 @@ def _init_database(self) -> None: last_accessed TEXT NOT NULL, hit_count INTEGER NOT NULL DEFAULT 0 ) - """ - ) - cur.execute( - """ + """) + cur.execute(""" CREATE UNIQUE INDEX IF NOT EXISTS idx_llm_cache_unique ON llm_cache_entries(provider, model, system_hash, prompt_hash) - """ - ) - cur.execute( - """ + """) + cur.execute(""" CREATE INDEX IF NOT EXISTS idx_llm_cache_lru ON llm_cache_entries(last_accessed) - """ - ) - cur.execute( - """ + """) + cur.execute(""" CREATE TABLE IF NOT EXISTS llm_cache_stats ( id INTEGER PRIMARY KEY CHECK (id = 1), hits INTEGER NOT NULL DEFAULT 0, misses INTEGER NOT NULL DEFAULT 0 ) - """ - ) + """) cur.execute("INSERT OR IGNORE INTO llm_cache_stats(id, hits, misses) VALUES (1, 0, 0)") conn.commit() diff --git a/cortex/system_info_generator.py b/cortex/system_info_generator.py index 5b6f1000b..d2dd4b750 100644 --- a/cortex/system_info_generator.py +++ b/cortex/system_info_generator.py @@ -6,13 +6,13 @@ Usage: generator = SystemInfoGenerator(api_key="...", provider="claude") - + # Simple info queries result = generator.get_info("What version of Python is installed?") - + # Application-specific queries result = generator.get_app_info("nginx", "What's the current nginx configuration?") - + # Structured info retrieval info = generator.get_structured_info("hardware", ["cpu", "memory", "disk"]) """ @@ -36,6 +36,7 @@ class InfoCategory(str, Enum): """Categories of system information.""" + HARDWARE = "hardware" SOFTWARE = "software" NETWORK = "network" @@ -55,6 +56,7 @@ class InfoCategory(str, Enum): @dataclass class InfoCommand: """A single read-only command for gathering information.""" + command: str purpose: str category: InfoCategory = InfoCategory.CUSTOM @@ -64,6 +66,7 @@ class InfoCommand: @dataclass class InfoResult: """Result of executing an info command.""" + command: str success: bool output: str @@ -74,6 +77,7 @@ class InfoResult: @dataclass class SystemInfoResult: """Complete result of a system info query.""" + query: str answer: str commands_executed: list[InfoResult] = field(default_factory=list) @@ -92,17 +96,22 @@ class SystemInfoResult: ], "memory": [ InfoCommand("free -h", "Get memory usage in human-readable format", InfoCategory.HARDWARE), - InfoCommand("head -20 /proc/meminfo", "Get detailed memory information", InfoCategory.HARDWARE), + InfoCommand( + "head -20 /proc/meminfo", "Get detailed memory information", InfoCategory.HARDWARE + ), ], "disk": [ InfoCommand("df -h", "Get disk space usage", InfoCategory.STORAGE), InfoCommand("lsblk", "List block devices", InfoCategory.STORAGE), ], "gpu": [ - InfoCommand("nvidia-smi --query-gpu=name,memory.total,driver_version --format=csv,noheader", "Get NVIDIA GPU info", InfoCategory.HARDWARE), + InfoCommand( + "nvidia-smi --query-gpu=name,memory.total,driver_version --format=csv,noheader", + "Get NVIDIA GPU info", + InfoCategory.HARDWARE, + ), InfoCommand("lspci", "List PCI devices including VGA", InfoCategory.HARDWARE), ], - # OS Information "os": [ InfoCommand("cat /etc/os-release", "Get OS release information", InfoCategory.SOFTWARE), @@ -113,7 +122,6 @@ class SystemInfoResult: InfoCommand("uname -r", "Get kernel version", InfoCategory.SOFTWARE), InfoCommand("cat /proc/version", "Get detailed kernel version", InfoCategory.SOFTWARE), ], - # Network Information "network": [ InfoCommand("ip addr show", "List network interfaces", InfoCategory.NETWORK), @@ -124,26 +132,32 @@ class SystemInfoResult: InfoCommand("cat /etc/resolv.conf", "Get DNS configuration", InfoCategory.NETWORK), InfoCommand("host google.com", "Test DNS resolution", InfoCategory.NETWORK), ], - # Services "services": [ - InfoCommand("systemctl list-units --type=service --state=running --no-pager", "List running services", InfoCategory.SERVICES), - InfoCommand("systemctl list-units --type=service --state=failed --no-pager", "List failed services", InfoCategory.SERVICES), + InfoCommand( + "systemctl list-units --type=service --state=running --no-pager", + "List running services", + InfoCategory.SERVICES, + ), + InfoCommand( + "systemctl list-units --type=service --state=failed --no-pager", + "List failed services", + InfoCategory.SERVICES, + ), ], - # Security "security": [ InfoCommand("ufw status", "Check firewall status", InfoCategory.SECURITY), InfoCommand("aa-status", "Check AppArmor status", InfoCategory.SECURITY), InfoCommand("wc -l /etc/passwd", "Count system users", InfoCategory.SECURITY), ], - # Processes "processes": [ - InfoCommand("ps aux --sort=-%mem", "Top memory-consuming processes", InfoCategory.PROCESSES), + InfoCommand( + "ps aux --sort=-%mem", "Top memory-consuming processes", InfoCategory.PROCESSES + ), InfoCommand("ps aux --sort=-%cpu", "Top CPU-consuming processes", InfoCategory.PROCESSES), ], - # Environment "environment": [ InfoCommand("env", "List environment variables", InfoCategory.CONFIGURATION), @@ -157,16 +171,28 @@ class SystemInfoResult: APP_INFO_TEMPLATES: dict[str, dict[str, list[InfoCommand]]] = { "nginx": { "status": [ - InfoCommand("systemctl status nginx --no-pager", "Check nginx service status", InfoCategory.SERVICES), + InfoCommand( + "systemctl status nginx --no-pager", + "Check nginx service status", + InfoCategory.SERVICES, + ), InfoCommand("nginx -v", "Get nginx version", InfoCategory.SOFTWARE), ], "config": [ - InfoCommand("cat /etc/nginx/nginx.conf", "Get nginx configuration", InfoCategory.CONFIGURATION), - InfoCommand("ls -la /etc/nginx/sites-enabled/", "List enabled sites", InfoCategory.CONFIGURATION), + InfoCommand( + "cat /etc/nginx/nginx.conf", "Get nginx configuration", InfoCategory.CONFIGURATION + ), + InfoCommand( + "ls -la /etc/nginx/sites-enabled/", "List enabled sites", InfoCategory.CONFIGURATION + ), ], "logs": [ - InfoCommand("tail -50 /var/log/nginx/access.log", "Recent access logs", InfoCategory.LOGS), - InfoCommand("tail -50 /var/log/nginx/error.log", "Recent error logs", InfoCategory.LOGS), + InfoCommand( + "tail -50 /var/log/nginx/access.log", "Recent access logs", InfoCategory.LOGS + ), + InfoCommand( + "tail -50 /var/log/nginx/error.log", "Recent error logs", InfoCategory.LOGS + ), ], }, "docker": { @@ -179,27 +205,43 @@ class SystemInfoResult: InfoCommand("docker images", "List images", InfoCategory.APPLICATION), ], "resources": [ - InfoCommand("docker stats --no-stream", "Container resource usage", InfoCategory.PERFORMANCE), + InfoCommand( + "docker stats --no-stream", "Container resource usage", InfoCategory.PERFORMANCE + ), ], }, "postgresql": { "status": [ - InfoCommand("systemctl status postgresql --no-pager", "Check PostgreSQL service", InfoCategory.SERVICES), + InfoCommand( + "systemctl status postgresql --no-pager", + "Check PostgreSQL service", + InfoCategory.SERVICES, + ), InfoCommand("psql --version", "Get PostgreSQL version", InfoCategory.SOFTWARE), ], "config": [ - InfoCommand("head -50 /etc/postgresql/14/main/postgresql.conf", "PostgreSQL config", InfoCategory.CONFIGURATION), + InfoCommand( + "head -50 /etc/postgresql/14/main/postgresql.conf", + "PostgreSQL config", + InfoCategory.CONFIGURATION, + ), ], }, "mysql": { "status": [ - InfoCommand("systemctl status mysql --no-pager", "Check MySQL status", InfoCategory.SERVICES), + InfoCommand( + "systemctl status mysql --no-pager", "Check MySQL status", InfoCategory.SERVICES + ), InfoCommand("mysql --version", "Get MySQL version", InfoCategory.SOFTWARE), ], }, "redis": { "status": [ - InfoCommand("systemctl status redis-server --no-pager", "Check Redis status", InfoCategory.SERVICES), + InfoCommand( + "systemctl status redis-server --no-pager", + "Check Redis status", + InfoCategory.SERVICES, + ), InfoCommand("redis-cli --version", "Get Redis version", InfoCategory.SOFTWARE), ], "info": [ @@ -212,10 +254,14 @@ class SystemInfoResult: InfoCommand("which python3", "Find Python executable", InfoCategory.SOFTWARE), ], "packages": [ - InfoCommand("pip3 list --format=freeze", "List installed packages", InfoCategory.PACKAGES), + InfoCommand( + "pip3 list --format=freeze", "List installed packages", InfoCategory.PACKAGES + ), ], "venv": [ - InfoCommand("echo $VIRTUAL_ENV", "Check active virtual environment", InfoCategory.CONFIGURATION), + InfoCommand( + "echo $VIRTUAL_ENV", "Check active virtual environment", InfoCategory.CONFIGURATION + ), ], }, "nodejs": { @@ -232,24 +278,36 @@ class SystemInfoResult: InfoCommand("git --version", "Get Git version", InfoCategory.SOFTWARE), ], "config": [ - InfoCommand("git config --global --list", "Git global config", InfoCategory.CONFIGURATION), + InfoCommand( + "git config --global --list", "Git global config", InfoCategory.CONFIGURATION + ), ], }, "ssh": { "status": [ - InfoCommand("systemctl status ssh --no-pager", "Check SSH service", InfoCategory.SERVICES), + InfoCommand( + "systemctl status ssh --no-pager", "Check SSH service", InfoCategory.SERVICES + ), ], "config": [ - InfoCommand("head -50 /etc/ssh/sshd_config", "SSH server config", InfoCategory.CONFIGURATION), + InfoCommand( + "head -50 /etc/ssh/sshd_config", "SSH server config", InfoCategory.CONFIGURATION + ), ], }, "systemd": { "status": [ InfoCommand("systemctl --version", "Get systemd version", InfoCategory.SOFTWARE), - InfoCommand("systemctl list-units --state=failed --no-pager", "Failed units", InfoCategory.SERVICES), + InfoCommand( + "systemctl list-units --state=failed --no-pager", + "Failed units", + InfoCategory.SERVICES, + ), ], "timers": [ - InfoCommand("systemctl list-timers --no-pager", "List active timers", InfoCategory.SERVICES), + InfoCommand( + "systemctl list-timers --no-pager", "List active timers", InfoCategory.SERVICES + ), ], }, } @@ -258,7 +316,7 @@ class SystemInfoResult: class SystemInfoGenerator: """ Generates read-only commands to retrieve system and application information. - + Uses LLM to generate appropriate commands based on natural language queries, while enforcing read-only access through CommandValidator. """ @@ -275,20 +333,22 @@ def __init__( ): """ Initialize the system info generator. - + Args: api_key: API key for LLM provider (defaults to env var) provider: LLM provider ("claude", "openai", "ollama") model: Optional model override debug: Enable debug output """ - self.api_key = api_key or os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + self.api_key = ( + api_key or os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") + ) self.provider = provider.lower() self.model = model or self._default_model() self.debug = debug - + self._initialize_client() - + def _default_model(self) -> str: if self.provider == "openai": return "gpt-4o" @@ -297,18 +357,20 @@ def _default_model(self) -> str: elif self.provider == "ollama": return "llama3.2" return "gpt-4o" - + def _initialize_client(self): """Initialize the LLM client.""" if self.provider == "openai": try: from openai import OpenAI + self.client = OpenAI(api_key=self.api_key) except ImportError: raise ImportError("OpenAI package not installed. Run: pip install openai") elif self.provider == "claude": try: from anthropic import Anthropic + self.client = Anthropic(api_key=self.api_key) except ImportError: raise ImportError("Anthropic package not installed. Run: pip install anthropic") @@ -322,7 +384,7 @@ def _get_system_prompt(self, context: str = "") -> str: """Get the system prompt for info command generation.""" app_list = ", ".join(sorted(APP_INFO_TEMPLATES.keys())) category_list = ", ".join([c.value for c in InfoCategory]) - + prompt = f"""You are a Linux system information assistant that generates READ-ONLY shell commands. Your task is to generate shell commands that gather system information to answer the user's query. @@ -386,8 +448,9 @@ def _truncate_output(self, output: str) -> str: def _execute_command(self, command: str, timeout: int = 30) -> InfoResult: """Execute a validated read-only command.""" import time + start_time = time.time() - + # Validate command first is_valid, error = CommandValidator.validate_command(command) if not is_valid: @@ -398,7 +461,7 @@ def _execute_command(self, command: str, timeout: int = 30) -> InfoResult: error=f"Command blocked: {error}", execution_time=time.time() - start_time, ) - + try: result = subprocess.run( command, @@ -454,6 +517,7 @@ def _call_llm(self, system_prompt: str, user_prompt: str) -> dict[str, Any]: content = response.choices[0].message.content elif self.provider == "ollama": import httpx + response = httpx.post( f"{self.ollama_url}/api/chat", json={ @@ -470,17 +534,21 @@ def _call_llm(self, system_prompt: str, user_prompt: str) -> dict[str, Any]: content = response.json()["message"]["content"] else: raise ValueError(f"Unsupported provider: {self.provider}") - + # Parse JSON from response json_match = re.search(r"\{[\s\S]*\}", content) if json_match: return json.loads(json_match.group()) raise ValueError("No JSON found in response") - + except json.JSONDecodeError as e: if self.debug: console.print(f"[red]JSON parse error: {e}[/red]") - return {"response_type": "answer", "answer": f"Error parsing LLM response: {e}", "reasoning": ""} + return { + "response_type": "answer", + "answer": f"Error parsing LLM response: {e}", + "reasoning": "", + } except Exception as e: if self.debug: console.print(f"[red]LLM error: {e}[/red]") @@ -489,30 +557,30 @@ def _call_llm(self, system_prompt: str, user_prompt: str) -> dict[str, Any]: def get_info(self, query: str, context: str = "") -> SystemInfoResult: """ Get system information based on a natural language query. - + Uses an agentic loop to: 1. Generate commands to gather information 2. Execute commands (read-only only) 3. Analyze results 4. Either generate more commands or provide final answer - + Args: query: Natural language question about the system context: Optional additional context for the LLM - + Returns: SystemInfoResult with answer and command execution details """ system_prompt = self._get_system_prompt(context) commands_executed: list[InfoResult] = [] history: list[dict[str, str]] = [] - + user_prompt = f"Query: {query}" - + for iteration in range(self.MAX_ITERATIONS): if self.debug: console.print(f"[dim]Iteration {iteration + 1}/{self.MAX_ITERATIONS}[/dim]") - + # Build prompt with history full_prompt = user_prompt if history: @@ -520,15 +588,15 @@ def get_info(self, query: str, context: str = "") -> SystemInfoResult: for i, entry in enumerate(history, 1): full_prompt += f"\n--- Command {i} ---\n" full_prompt += f"Command: {entry['command']}\n" - if entry['success']: + if entry["success"]: full_prompt += f"Output:\n{self._truncate_output(entry['output'])}\n" else: full_prompt += f"Error: {entry['error']}\n" full_prompt += "\nBased on these results, either run another command or provide the final answer.\n" - + # Call LLM response = self._call_llm(system_prompt, full_prompt) - + if response.get("response_type") == "answer": # Final answer return SystemInfoResult( @@ -537,31 +605,33 @@ def get_info(self, query: str, context: str = "") -> SystemInfoResult: commands_executed=commands_executed, raw_data={h["command"]: h["output"] for h in history if h.get("success")}, ) - + elif response.get("response_type") == "command": command = response.get("command", "") if not command: continue - + if self.debug: console.print(f"[cyan]Executing:[/cyan] {command}") - + result = self._execute_command(command) commands_executed.append(result) - - history.append({ - "command": command, - "success": result.success, - "output": result.output, - "error": result.error, - }) - + + history.append( + { + "command": command, + "success": result.success, + "output": result.output, + "error": result.error, + } + ) + if self.debug: if result.success: - console.print(f"[green]✓ Success[/green]") + console.print("[green]✓ Success[/green]") else: console.print(f"[red]✗ Failed: {result.error}[/red]") - + # Max iterations reached return SystemInfoResult( query=query, @@ -571,31 +641,31 @@ def get_info(self, query: str, context: str = "") -> SystemInfoResult: ) def get_app_info( - self, - app_name: str, + self, + app_name: str, query: str | None = None, aspects: list[str] | None = None, ) -> SystemInfoResult: """ Get information about a specific application. - + Args: app_name: Application name (nginx, docker, postgresql, etc.) query: Optional natural language query about the app aspects: Optional list of aspects to check (status, config, logs, etc.) - + Returns: SystemInfoResult with application information """ app_lower = app_name.lower() commands_executed: list[InfoResult] = [] raw_data: dict[str, Any] = {} - + # Check if we have predefined commands for this app if app_lower in APP_INFO_TEMPLATES: templates = APP_INFO_TEMPLATES[app_lower] aspects_to_check = aspects or list(templates.keys()) - + for aspect in aspects_to_check: if aspect in templates: for cmd_info in templates[aspect]: @@ -603,7 +673,7 @@ def get_app_info( commands_executed.append(result) if result.success and result.output: raw_data[f"{aspect}:{cmd_info.purpose}"] = result.output - + # If there's a specific query, use LLM to analyze if query: context = f"""Application: {app_name} @@ -611,18 +681,20 @@ def get_app_info( {json.dumps(raw_data, indent=2)[:2000]} Now answer the specific question about this application.""" - + result = self.get_info(query, context) result.commands_executed = commands_executed + result.commands_executed result.raw_data.update(raw_data) return result - + # Generate summary answer from raw data answer_parts = [f"**{app_name.title()} Information**\n"] for key, value in raw_data.items(): aspect, desc = key.split(":", 1) - answer_parts.append(f"\n**{aspect.title()}** ({desc}):\n```\n{value[:500]}{'...' if len(value) > 500 else ''}\n```") - + answer_parts.append( + f"\n**{aspect.title()}** ({desc}):\n```\n{value[:500]}{'...' if len(value) > 500 else ''}\n```" + ) + return SystemInfoResult( query=query or f"Get information about {app_name}", answer="\n".join(answer_parts) if raw_data else f"No information found for {app_name}", @@ -638,11 +710,11 @@ def get_structured_info( ) -> SystemInfoResult: """ Get structured system information for a category. - + Args: category: Info category (hardware, network, services, etc.) aspects: Optional specific aspects (cpu, memory, disk for hardware, etc.) - + Returns: SystemInfoResult with structured information """ @@ -650,10 +722,10 @@ def get_structured_info( category = category.lower() else: category = category.value - + commands_executed: list[InfoResult] = [] raw_data: dict[str, Any] = {} - + # Map categories to common commands category_mapping = { "hardware": ["cpu", "memory", "disk", "gpu"], @@ -666,9 +738,9 @@ def get_structured_info( "performance": ["cpu", "memory", "processes"], "configuration": ["environment"], } - + aspects_to_check = aspects or category_mapping.get(category, []) - + for aspect in aspects_to_check: if aspect in COMMON_INFO_COMMANDS: for cmd_info in COMMON_INFO_COMMANDS[aspect]: @@ -676,33 +748,39 @@ def get_structured_info( commands_executed.append(result) if result.success and result.output: raw_data[f"{aspect}:{cmd_info.purpose}"] = result.output - + # Generate structured answer answer_parts = [f"**{category.title()} Information**\n"] for key, value in raw_data.items(): aspect, desc = key.split(":", 1) - answer_parts.append(f"\n**{aspect.upper()}** ({desc}):\n```\n{value[:800]}{'...' if len(value) > 800 else ''}\n```") - + answer_parts.append( + f"\n**{aspect.upper()}** ({desc}):\n```\n{value[:800]}{'...' if len(value) > 800 else ''}\n```" + ) + return SystemInfoResult( query=f"Get {category} information", answer="\n".join(answer_parts) if raw_data else f"No {category} information found", commands_executed=commands_executed, raw_data=raw_data, - category=InfoCategory(category) if category in [c.value for c in InfoCategory] else InfoCategory.CUSTOM, + category=( + InfoCategory(category) + if category in [c.value for c in InfoCategory] + else InfoCategory.CUSTOM + ), ) def quick_info(self, info_type: str) -> str: """ Quick lookup for common system information. - + Args: info_type: Type of info (cpu, memory, disk, os, network, etc.) - + Returns: String with the requested information """ info_lower = info_type.lower() - + if info_lower in COMMON_INFO_COMMANDS: outputs = [] for cmd_info in COMMON_INFO_COMMANDS[info_lower]: @@ -710,13 +788,15 @@ def quick_info(self, info_type: str) -> str: if result.success and result.output: outputs.append(result.output) return "\n\n".join(outputs) if outputs else f"No {info_type} information available" - + # Try as app info if info_lower in APP_INFO_TEMPLATES: result = self.get_app_info(info_lower, aspects=["status", "version"]) return result.answer - - return f"Unknown info type: {info_type}. Available: {', '.join(COMMON_INFO_COMMANDS.keys())}" + + return ( + f"Unknown info type: {info_type}. Available: {', '.join(COMMON_INFO_COMMANDS.keys())}" + ) def list_available_info(self) -> dict[str, list[str]]: """List all available pre-defined info types and applications.""" @@ -733,57 +813,57 @@ def get_system_info_generator( ) -> SystemInfoGenerator: """ Factory function to create a SystemInfoGenerator with default configuration. - + Args: provider: LLM provider to use debug: Enable debug output - + Returns: Configured SystemInfoGenerator instance """ api_key = os.environ.get("ANTHROPIC_API_KEY") or os.environ.get("OPENAI_API_KEY") if not api_key: raise ValueError("No API key found. Set ANTHROPIC_API_KEY or OPENAI_API_KEY") - + return SystemInfoGenerator(api_key=api_key, provider=provider, debug=debug) # CLI helper for quick testing if __name__ == "__main__": import sys - + if len(sys.argv) < 2: print("Usage: python system_info_generator.py ") print(" python system_info_generator.py --quick ") print(" python system_info_generator.py --app [query]") print(" python system_info_generator.py --list") sys.exit(1) - + try: generator = get_system_info_generator(debug=True) - + if sys.argv[1] == "--list": available = generator.list_available_info() console.print("\n[bold]Available Information Types:[/bold]") console.print(f"System: {', '.join(available['system_info'])}") console.print(f"Apps: {', '.join(available['applications'])}") console.print(f"Categories: {', '.join(available['categories'])}") - + elif sys.argv[1] == "--quick" and len(sys.argv) > 2: info = generator.quick_info(sys.argv[2]) console.print(Panel(info, title=f"{sys.argv[2].title()} Info")) - + elif sys.argv[1] == "--app" and len(sys.argv) > 2: app_name = sys.argv[2] query = " ".join(sys.argv[3:]) if len(sys.argv) > 3 else None result = generator.get_app_info(app_name, query) console.print(Panel(result.answer, title=f"{app_name.title()} Info")) - + else: query = " ".join(sys.argv[1:]) result = generator.get_info(query) console.print(Panel(result.answer, title="System Info")) - + if result.commands_executed: table = Table(title="Commands Executed") table.add_column("Command", style="cyan") @@ -793,8 +873,7 @@ def get_system_info_generator( status = "✓" if cmd.success else "✗" table.add_row(cmd.command[:60], status, f"{cmd.execution_time:.2f}s") console.print(table) - + except ValueError as e: console.print(f"[red]Error: {e}[/red]") sys.exit(1) - diff --git a/cortex/watch_service.py b/cortex/watch_service.py index 83681a6cf..899ec1835 100644 --- a/cortex/watch_service.py +++ b/cortex/watch_service.py @@ -29,7 +29,7 @@ class CortexWatchDaemon: """Background daemon that monitors terminal activity.""" - + def __init__(self): self.running = False self.cortex_dir = Path.home() / ".cortex" @@ -37,41 +37,41 @@ def __init__(self): self.terminals_dir = self.cortex_dir / "terminals" self.pid_file = self.cortex_dir / "watch_service.pid" self.state_file = self.cortex_dir / "watch_state.json" - + # Terminal tracking self.terminals: dict[str, dict[str, Any]] = {} self.terminal_counter = 0 - + # Track commands seen from watch_hook to avoid duplicates with bash_history self._watch_hook_commands: set[str] = set() self._recent_commands: list[str] = [] # Last 100 commands for dedup - + # Ensure directories exist self.cortex_dir.mkdir(parents=True, exist_ok=True) self.terminals_dir.mkdir(parents=True, exist_ok=True) - + # Setup signal handlers signal.signal(signal.SIGTERM, self._handle_signal) signal.signal(signal.SIGINT, self._handle_signal) signal.signal(signal.SIGHUP, self._handle_reload) - + def _handle_signal(self, signum, frame): """Handle shutdown signals.""" self.log(f"Received signal {signum}, shutting down...") self.running = False - + def _handle_reload(self, signum, frame): """Handle reload signal (SIGHUP).""" self.log("Received SIGHUP, reloading configuration...") self._load_state() - + def log(self, message: str): """Log a message to the service log.""" log_file = self.cortex_dir / "watch_service.log" timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") with open(log_file, "a") as f: f.write(f"[{timestamp}] {message}\n") - + def _load_state(self): """Load saved state from file.""" if self.state_file.exists(): @@ -82,7 +82,7 @@ def _load_state(self): self.terminals = state.get("terminals", {}) except Exception as e: self.log(f"Error loading state: {e}") - + def _save_state(self): """Save current state to file.""" try: @@ -95,15 +95,15 @@ def _save_state(self): json.dump(state, f, indent=2) except Exception as e: self.log(f"Error saving state: {e}") - + def _get_terminal_id(self, pts: str) -> str: """Generate or retrieve a unique terminal ID.""" if pts in self.terminals: return self.terminals[pts]["id"] - + self.terminal_counter += 1 terminal_id = f"term_{self.terminal_counter:04d}" - + self.terminals[pts] = { "id": terminal_id, "pts": pts, @@ -111,10 +111,10 @@ def _get_terminal_id(self, pts: str) -> str: "is_cortex": False, "command_count": 0, } - + self._save_state() return terminal_id - + def _is_cortex_terminal(self, pid: int) -> bool: """Check if a process is a Cortex terminal.""" try: @@ -124,7 +124,7 @@ def _is_cortex_terminal(self, pid: int) -> bool: environ = environ_file.read_bytes() if b"CORTEX_TERMINAL=1" in environ: return True - + # Check command line cmdline_file = Path(f"/proc/{pid}/cmdline") if cmdline_file.exists(): @@ -133,13 +133,13 @@ def _is_cortex_terminal(self, pid: int) -> bool: return True except (PermissionError, FileNotFoundError, ProcessLookupError): pass - + return False - + def _get_active_terminals(self) -> list[dict]: """Get list of active terminal processes.""" terminals = [] - + try: # Find all pts (pseudo-terminal) devices pts_dir = Path("/dev/pts") @@ -147,15 +147,12 @@ def _get_active_terminals(self) -> list[dict]: for pts_file in pts_dir.iterdir(): if pts_file.name.isdigit(): pts_path = str(pts_file) - + # Find process using this pts result = subprocess.run( - ["fuser", pts_path], - capture_output=True, - text=True, - timeout=2 + ["fuser", pts_path], capture_output=True, text=True, timeout=2 ) - + if result.stdout.strip(): pids = result.stdout.strip().split() for pid_str in pids: @@ -163,35 +160,37 @@ def _get_active_terminals(self) -> list[dict]: pid = int(pid_str) is_cortex = self._is_cortex_terminal(pid) terminal_id = self._get_terminal_id(pts_path) - + # Update cortex flag if pts_path in self.terminals: self.terminals[pts_path]["is_cortex"] = is_cortex - - terminals.append({ - "pts": pts_path, - "pid": pid, - "id": terminal_id, - "is_cortex": is_cortex, - }) + + terminals.append( + { + "pts": pts_path, + "pid": pid, + "id": terminal_id, + "is_cortex": is_cortex, + } + ) except ValueError: continue - + except Exception as e: self.log(f"Error getting terminals: {e}") - + return terminals - + def _monitor_bash_history(self): """Monitor bash history for new commands using inotify if available.""" history_files = [ Path.home() / ".bash_history", Path.home() / ".zsh_history", ] - + positions: dict[str, int] = {} last_commands: dict[str, str] = {} # Track last command per file to avoid duplicates - + # Initialize positions to current end of file for hist_file in history_files: if hist_file.exists(): @@ -204,63 +203,63 @@ def _monitor_bash_history(self): last_commands[str(hist_file)] = lines[-1].strip() except Exception: pass - + # Try to use inotify for more efficient monitoring try: + import ctypes import select import struct - import ctypes - + # Check if inotify is available libc = ctypes.CDLL("libc.so.6") inotify_init = libc.inotify_init inotify_add_watch = libc.inotify_add_watch - + IN_MODIFY = 0x00000002 IN_CLOSE_WRITE = 0x00000008 - + fd = inotify_init() if fd < 0: raise OSError("Failed to initialize inotify") - + watches = {} for hist_file in history_files: if hist_file.exists(): wd = inotify_add_watch(fd, str(hist_file).encode(), IN_MODIFY | IN_CLOSE_WRITE) if wd >= 0: watches[wd] = hist_file - + self.log(f"Using inotify to monitor {len(watches)} history files") - + while self.running: # Wait for inotify event with timeout r, _, _ = select.select([fd], [], [], 1.0) if not r: continue - + data = os.read(fd, 4096) # Process inotify events for hist_file in history_files: key = str(hist_file) if not hist_file.exists(): continue - + try: current_size = hist_file.stat().st_size - + if key not in positions: positions[key] = current_size continue - + if current_size < positions[key]: positions[key] = current_size continue - + if current_size > positions[key]: with open(hist_file) as f: f.seek(positions[key]) new_content = f.read() - + for line in new_content.split("\n"): line = line.strip() # Skip empty, short, or duplicate commands @@ -268,117 +267,117 @@ def _monitor_bash_history(self): if last_commands.get(key) != line: self._log_command(line, "history") last_commands[key] = line - + positions[key] = current_size except Exception as e: self.log(f"Error reading {hist_file}: {e}") - + os.close(fd) return - + except Exception as e: self.log(f"Inotify not available, using polling: {e}") - + # Fallback to polling while self.running: for hist_file in history_files: if not hist_file.exists(): continue - + key = str(hist_file) try: current_size = hist_file.stat().st_size - + if key not in positions: positions[key] = current_size continue - + if current_size < positions[key]: # File was truncated positions[key] = current_size continue - + if current_size > positions[key]: with open(hist_file) as f: f.seek(positions[key]) new_content = f.read() - + for line in new_content.split("\n"): line = line.strip() if line and len(line) > 1: if last_commands.get(key) != line: self._log_command(line, "history") last_commands[key] = line - + positions[key] = current_size - + except Exception as e: self.log(f"Error reading {hist_file}: {e}") - + time.sleep(0.3) - + def _monitor_watch_hook(self): """Monitor the watch hook log file and sync to terminal_commands.json.""" position = 0 - + while self.running: try: if not self.watch_log.exists(): time.sleep(0.5) continue - + current_size = self.watch_log.stat().st_size - + if current_size < position: position = 0 - + if current_size > position: with open(self.watch_log) as f: f.seek(position) new_content = f.read() - + for line in new_content.split("\n"): line = line.strip() if not line or len(line) < 2: continue - + # Parse format: TTY|COMMAND (new format from updated hook) # Skip lines that don't have the TTY| prefix or have "shared|" if "|" not in line: continue - + parts = line.split("|", 1) terminal_id = parts[0] - + # Skip "shared" entries (those come from bash_history monitor) if terminal_id == "shared": continue - + # Must have valid TTY format (pts_X, tty_X, etc.) if not terminal_id or terminal_id == "unknown": continue - + command = parts[1] if len(parts) > 1 else "" if not command: continue - + # Skip duplicates if self._is_duplicate(command): continue - + # Mark this command as seen from watch_hook self._watch_hook_commands.add(command) - + # Log to terminal_commands.json only self._log_to_json(command, "watch_hook", terminal_id) - + position = current_size - + except Exception as e: self.log(f"Error monitoring watch hook: {e}") - + time.sleep(0.2) - + def _log_to_json(self, command: str, source: str, terminal_id: str): """Log a command only to terminal_commands.json.""" try: @@ -389,24 +388,24 @@ def _log_to_json(self, command: str, source: str, terminal_id: str): "source": source, "terminal_id": terminal_id, } - + with open(detailed_log, "a") as f: f.write(json.dumps(entry) + "\n") except Exception as e: self.log(f"Error logging to JSON: {e}") - + def _is_duplicate(self, command: str) -> bool: """Check if command was recently logged to avoid duplicates.""" if command in self._recent_commands: return True - + # Keep last 100 commands self._recent_commands.append(command) if len(self._recent_commands) > 100: self._recent_commands.pop(0) - + return False - + def _log_command(self, command: str, source: str = "unknown", terminal_id: str | None = None): """Log a command from bash_history (watch_hook uses _log_to_json directly).""" # Skip cortex commands @@ -416,31 +415,31 @@ def _log_command(self, command: str, source: str = "unknown", terminal_id: str | return if command.startswith("source ") and ".cortex" in command: return - + # Skip if this command was already logged by watch_hook if command in self._watch_hook_commands: self._watch_hook_commands.discard(command) # Clear it for next time return - + # Skip duplicates if self._is_duplicate(command): return - + # For bash_history source, we can't know which terminal - use "shared" if terminal_id is None: terminal_id = "shared" - + try: # Write to watch_log with format TTY|COMMAND with open(self.watch_log, "a") as f: f.write(f"{terminal_id}|{command}\n") - + # Log to JSON self._log_to_json(command, source, terminal_id) - + except Exception as e: self.log(f"Error logging command: {e}") - + def _cleanup_stale_terminals(self): """Remove stale terminal entries.""" while self.running: @@ -451,20 +450,20 @@ def _cleanup_stale_terminals(self): for pts_file in pts_dir.iterdir(): if pts_file.name.isdigit(): active_pts.add(str(pts_file)) - + # Remove stale entries stale = [pts for pts in self.terminals if pts not in active_pts] for pts in stale: del self.terminals[pts] - + if stale: self._save_state() - + except Exception as e: self.log(f"Error cleaning up terminals: {e}") - + time.sleep(30) # Check every 30 seconds - + def start(self): """Start the watch daemon.""" # Check if already running @@ -477,55 +476,55 @@ def start(self): except (ProcessLookupError, ValueError): # Stale PID file self.pid_file.unlink() - + # Write PID file self.pid_file.write_text(str(os.getpid())) - + self.running = True self._load_state() - + self.log("Cortex Watch Service starting...") - + # Start monitor threads threads = [ threading.Thread(target=self._monitor_bash_history, daemon=True), threading.Thread(target=self._monitor_watch_hook, daemon=True), threading.Thread(target=self._cleanup_stale_terminals, daemon=True), ] - + for t in threads: t.start() - + self.log(f"Cortex Watch Service started (PID: {os.getpid()})") - + # Main loop - just keep alive and handle signals try: while self.running: time.sleep(1) finally: self._shutdown() - + return True - + def _shutdown(self): """Clean shutdown.""" self.log("Shutting down...") self._save_state() - + if self.pid_file.exists(): self.pid_file.unlink() - + self.log("Cortex Watch Service stopped") - + def stop(self): """Stop the running daemon.""" if not self.pid_file.exists(): return False, "Service not running" - + try: pid = int(self.pid_file.read_text().strip()) os.kill(pid, signal.SIGTERM) - + # Wait for process to exit for _ in range(10): try: @@ -533,15 +532,15 @@ def stop(self): time.sleep(0.5) except ProcessLookupError: break - + return True, f"Service stopped (PID: {pid})" - + except ProcessLookupError: self.pid_file.unlink() return True, "Service was not running" except Exception as e: return False, f"Error stopping service: {e}" - + def status(self) -> dict: """Get service status.""" status = { @@ -550,7 +549,7 @@ def status(self) -> dict: "terminals": 0, "commands_logged": 0, } - + if self.pid_file.exists(): try: pid = int(self.pid_file.read_text().strip()) @@ -559,17 +558,17 @@ def status(self) -> dict: status["pid"] = pid except (ProcessLookupError, ValueError): pass - + if self.watch_log.exists(): try: content = self.watch_log.read_text() status["commands_logged"] = len([l for l in content.split("\n") if l.strip()]) except Exception: pass - + self._load_state() status["terminals"] = len(self.terminals) - + return status @@ -577,7 +576,7 @@ def get_systemd_service_content() -> str: """Generate systemd service file content.""" python_path = sys.executable service_script = Path(__file__).resolve() - + return f"""[Unit] Description=Cortex Terminal Watch Service Documentation=https://github.com/cortexlinux/cortex @@ -606,25 +605,27 @@ def install_service() -> tuple[bool, str]: """Install the systemd user service.""" service_dir = Path.home() / ".config" / "systemd" / "user" service_file = service_dir / "cortex-watch.service" - + try: # Create directory service_dir.mkdir(parents=True, exist_ok=True) - + # Write service file service_file.write_text(get_systemd_service_content()) - + # Reload systemd subprocess.run(["systemctl", "--user", "daemon-reload"], check=True) - + # Enable and start service subprocess.run(["systemctl", "--user", "enable", "cortex-watch.service"], check=True) subprocess.run(["systemctl", "--user", "start", "cortex-watch.service"], check=True) - + # Enable lingering so service runs even when not logged in subprocess.run(["loginctl", "enable-linger", os.getenv("USER", "")], capture_output=True) - - return True, f"""✓ Cortex Watch Service installed and started! + + return ( + True, + f"""✓ Cortex Watch Service installed and started! Service file: {service_file} @@ -638,7 +639,8 @@ def install_service() -> tuple[bool, str]: systemctl --user restart cortex-watch # Restart systemctl --user stop cortex-watch # Stop journalctl --user -u cortex-watch # View logs -""" +""", + ) except subprocess.CalledProcessError as e: return False, f"Failed to install service: {e}" except Exception as e: @@ -648,19 +650,21 @@ def install_service() -> tuple[bool, str]: def uninstall_service() -> tuple[bool, str]: """Uninstall the systemd user service.""" service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" - + try: # Stop and disable service subprocess.run(["systemctl", "--user", "stop", "cortex-watch.service"], capture_output=True) - subprocess.run(["systemctl", "--user", "disable", "cortex-watch.service"], capture_output=True) - + subprocess.run( + ["systemctl", "--user", "disable", "cortex-watch.service"], capture_output=True + ) + # Remove service file if service_file.exists(): service_file.unlink() - + # Reload systemd subprocess.run(["systemctl", "--user", "daemon-reload"], check=True) - + return True, "✓ Cortex Watch Service uninstalled" except Exception as e: return False, f"Error: {e}" @@ -669,42 +673,42 @@ def uninstall_service() -> tuple[bool, str]: def main(): """Main entry point.""" import argparse - + parser = argparse.ArgumentParser(description="Cortex Watch Service") parser.add_argument("--daemon", action="store_true", help="Run as daemon") parser.add_argument("--stop", action="store_true", help="Stop the daemon") parser.add_argument("--status", action="store_true", help="Show status") parser.add_argument("--install", action="store_true", help="Install systemd service") parser.add_argument("--uninstall", action="store_true", help="Uninstall systemd service") - + args = parser.parse_args() - + daemon = CortexWatchDaemon() - + if args.install: success, msg = install_service() print(msg) sys.exit(0 if success else 1) - + if args.uninstall: success, msg = uninstall_service() print(msg) sys.exit(0 if success else 1) - + if args.status: status = daemon.status() print(f"Running: {status['running']}") - if status['pid']: + if status["pid"]: print(f"PID: {status['pid']}") print(f"Terminals tracked: {status['terminals']}") print(f"Commands logged: {status['commands_logged']}") sys.exit(0) - + if args.stop: success, msg = daemon.stop() print(msg) sys.exit(0 if success else 1) - + if args.daemon: daemon.start() else: @@ -713,4 +717,3 @@ def main(): if __name__ == "__main__": main() - diff --git a/scripts/setup_ask_do.py b/scripts/setup_ask_do.py index e593a2e08..dd40807ce 100755 --- a/scripts/setup_ask_do.py +++ b/scripts/setup_ask_do.py @@ -29,15 +29,15 @@ # ANSI colors class Colors: - HEADER = '\033[95m' - BLUE = '\033[94m' - CYAN = '\033[96m' - GREEN = '\033[92m' - YELLOW = '\033[93m' - RED = '\033[91m' - BOLD = '\033[1m' - DIM = '\033[2m' - END = '\033[0m' + HEADER = "\033[95m" + BLUE = "\033[94m" + CYAN = "\033[96m" + GREEN = "\033[92m" + YELLOW = "\033[93m" + RED = "\033[91m" + BOLD = "\033[1m" + DIM = "\033[2m" + END = "\033[0m" def print_header(text: str): @@ -67,15 +67,13 @@ def print_error(text: str): print(f"{Colors.RED}✗{Colors.END} {text}") -def run_cmd(cmd: list[str], check: bool = True, capture: bool = False, timeout: int = 300) -> subprocess.CompletedProcess: +def run_cmd( + cmd: list[str], check: bool = True, capture: bool = False, timeout: int = 300 +) -> subprocess.CompletedProcess: """Run a command and return the result.""" try: result = subprocess.run( - cmd, - check=check, - capture_output=capture, - text=True, - timeout=timeout + cmd, check=check, capture_output=capture, text=True, timeout=timeout ) return result except subprocess.CalledProcessError as e: @@ -100,18 +98,18 @@ def check_docker() -> bool: def check_ollama_container() -> tuple[bool, bool]: """Check if Ollama container exists and is running. - + Returns: (exists, running) """ try: result = run_cmd( ["docker", "ps", "-a", "--filter", "name=ollama", "--format", "{{.Status}}"], capture=True, - check=False + check=False, ) if result.returncode != 0 or not result.stdout.strip(): return False, False - + status = result.stdout.strip().lower() running = "up" in status return True, running @@ -122,7 +120,7 @@ def check_ollama_container() -> tuple[bool, bool]: def setup_ollama(model: str = "mistral") -> bool: """Set up Ollama Docker container and pull a model.""" print_header("Setting up Ollama (Local LLM)") - + # Check Docker print_step("Checking Docker...") if not check_docker(): @@ -131,10 +129,10 @@ def setup_ollama(model: str = "mistral") -> bool: print(f" {Colors.DIM}Then run: sudo systemctl start docker{Colors.END}") return False print_success("Docker is available") - + # Check existing container exists, running = check_ollama_container() - + if exists and running: print_success("Ollama container is already running") elif exists and not running: @@ -146,66 +144,70 @@ def setup_ollama(model: str = "mistral") -> bool: print_step("Pulling Ollama Docker image...") run_cmd(["docker", "pull", "ollama/ollama"]) print_success("Ollama image pulled") - + print_step("Starting Ollama container...") - run_cmd([ - "docker", "run", "-d", - "--name", "ollama", - "-p", "11434:11434", - "-v", "ollama:/root/.ollama", - "--restart", "unless-stopped", - "ollama/ollama" - ]) + run_cmd( + [ + "docker", + "run", + "-d", + "--name", + "ollama", + "-p", + "11434:11434", + "-v", + "ollama:/root/.ollama", + "--restart", + "unless-stopped", + "ollama/ollama", + ] + ) print_success("Ollama container started") - + # Wait for container to be ready print_step("Waiting for Ollama to initialize...") time.sleep(5) - + # Check if model exists print_step(f"Checking for {model} model...") try: - result = run_cmd( - ["docker", "exec", "ollama", "ollama", "list"], - capture=True, - check=False - ) + result = run_cmd(["docker", "exec", "ollama", "ollama", "list"], capture=True, check=False) if model in result.stdout: print_success(f"Model {model} is already installed") return True except Exception: pass - + # Pull model print_step(f"Pulling {model} model (this may take a few minutes)...") print(f" {Colors.DIM}Model size: ~4GB for mistral, ~2GB for phi{Colors.END}") - + try: # Use subprocess directly for streaming output process = subprocess.Popen( ["docker", "exec", "ollama", "ollama", "pull", model], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, - text=True + text=True, ) - + for line in process.stdout: line = line.strip() if line: # Show progress if "pulling" in line.lower() or "%" in line: print(f"\r {Colors.DIM}{line[:70]}{Colors.END}", end="", flush=True) - + process.wait() print() # New line after progress - + if process.returncode == 0: print_success(f"Model {model} installed successfully") return True else: print_error(f"Failed to pull model {model}") return False - + except Exception as e: print_error(f"Error pulling model: {e}") return False @@ -214,16 +216,14 @@ def setup_ollama(model: str = "mistral") -> bool: def setup_watch_service() -> bool: """Install and start the Cortex Watch service.""" print_header("Setting up Cortex Watch Service") - + # Check if service is already installed service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" - + if service_file.exists(): print_step("Watch service is already installed, checking status...") result = run_cmd( - ["systemctl", "--user", "is-active", "cortex-watch.service"], - capture=True, - check=False + ["systemctl", "--user", "is-active", "cortex-watch.service"], capture=True, check=False ) if result.stdout.strip() == "active": print_success("Cortex Watch service is running") @@ -234,39 +234,40 @@ def setup_watch_service() -> bool: else: # Install the service print_step("Installing Cortex Watch service...") - + try: # Import and run the installation from cortex.watch_service import install_service + success, msg = install_service() - + if success: print_success("Watch service installed and started") - print(f" {Colors.DIM}{msg[:200]}...{Colors.END}" if len(msg) > 200 else f" {Colors.DIM}{msg}{Colors.END}") + print( + f" {Colors.DIM}{msg[:200]}...{Colors.END}" + if len(msg) > 200 + else f" {Colors.DIM}{msg}{Colors.END}" + ) else: print_error(f"Failed to install watch service: {msg}") return False - + except ImportError: print_warning("Could not import watch_service module") print_step("Installing via CLI...") - + result = run_cmd( - ["cortex", "watch", "--install", "--service"], - capture=True, - check=False + ["cortex", "watch", "--install", "--service"], capture=True, check=False ) if result.returncode == 0: print_success("Watch service installed via CLI") else: print_error("Failed to install watch service") return False - + # Verify service is running result = run_cmd( - ["systemctl", "--user", "is-active", "cortex-watch.service"], - capture=True, - check=False + ["systemctl", "--user", "is-active", "cortex-watch.service"], capture=True, check=False ) if result.stdout.strip() == "active": print_success("Watch service is active and monitoring terminals") @@ -279,13 +280,13 @@ def setup_watch_service() -> bool: def setup_shell_hooks() -> bool: """Set up shell hooks for terminal monitoring.""" print_header("Setting up Shell Hooks") - + cortex_dir = Path.home() / ".cortex" cortex_dir.mkdir(parents=True, exist_ok=True) - + # Create watch hook script hook_file = cortex_dir / "watch_hook.sh" - hook_content = '''#!/bin/bash + hook_content = """#!/bin/bash # Cortex Terminal Watch Hook # This hook logs commands for Cortex to monitor during manual intervention @@ -294,72 +295,72 @@ def setup_shell_hooks() -> bool: local histnum="$(history 1 | awk '{print $1}')" [[ "$histnum" == "$__cortex_last_histnum" ]] && return __cortex_last_histnum="$histnum" - + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" [[ -z "${cmd// /}" ]] && return [[ "$cmd" == cortex* ]] && return [[ "$cmd" == *"source"*".cortex"* ]] && return [[ "$cmd" == *"watch_hook"* ]] && return [[ -n "$CORTEX_TERMINAL" ]] && return - + # Include terminal ID (TTY) in the log local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" echo "${tty_name:-unknown}|$cmd" >> ~/.cortex/terminal_watch.log } export PROMPT_COMMAND='history -a; __cortex_log_cmd' echo "✓ Cortex is now watching this terminal" -''' - +""" + print_step("Creating watch hook script...") hook_file.write_text(hook_content) hook_file.chmod(0o755) print_success(f"Created {hook_file}") - + # Add to .bashrc if not already present bashrc = Path.home() / ".bashrc" marker = "# Cortex Terminal Watch Hook" - + if bashrc.exists(): content = bashrc.read_text() if marker not in content: print_step("Adding hook to .bashrc...") - - bashrc_addition = f''' + + bashrc_addition = f""" {marker} __cortex_last_histnum="" __cortex_log_cmd() {{ local histnum="$(history 1 | awk '{{print $1}}')" [[ "$histnum" == "$__cortex_last_histnum" ]] && return __cortex_last_histnum="$histnum" - + local cmd="$(history 1 | sed "s/^[ ]*[0-9]*[ ]*//")" [[ -z "${{cmd// /}}" ]] && return [[ "$cmd" == cortex* ]] && return [[ "$cmd" == *"source"*".cortex"* ]] && return [[ "$cmd" == *"watch_hook"* ]] && return [[ -n "$CORTEX_TERMINAL" ]] && return - + local tty_name="$(tty 2>/dev/null | sed 's|/dev/||' | tr '/' '_')" echo "${{tty_name:-unknown}}|$cmd" >> ~/.cortex/terminal_watch.log }} export PROMPT_COMMAND='history -a; __cortex_log_cmd' alias cw="source ~/.cortex/watch_hook.sh" -''' +""" with open(bashrc, "a") as f: f.write(bashrc_addition) print_success("Hook added to .bashrc") else: print_success("Hook already in .bashrc") - + # Add to .zshrc if it exists zshrc = Path.home() / ".zshrc" if zshrc.exists(): content = zshrc.read_text() if marker not in content: print_step("Adding hook to .zshrc...") - - zshrc_addition = f''' + + zshrc_addition = f""" {marker} typeset -g __cortex_last_cmd="" cortex_watch_hook() {{ @@ -374,31 +375,31 @@ def setup_shell_hooks() -> bool: echo "${{tty_name:-unknown}}|$cmd" >> ~/.cortex/terminal_watch.log }} precmd_functions+=(cortex_watch_hook) -''' +""" with open(zshrc, "a") as f: f.write(zshrc_addition) print_success("Hook added to .zshrc") else: print_success("Hook already in .zshrc") - + return True def check_api_keys() -> dict[str, bool]: """Check for available API keys.""" print_header("Checking API Keys") - + keys = { "ANTHROPIC_API_KEY": False, "OPENAI_API_KEY": False, } - + # Check environment variables for key in keys: if os.environ.get(key): keys[key] = True print_success(f"{key} found in environment") - + # Check .env file env_file = Path.cwd() / ".env" if env_file.exists(): @@ -407,22 +408,22 @@ def check_api_keys() -> dict[str, bool]: if key in content and not keys[key]: keys[key] = True print_success(f"{key} found in .env file") - + # Report missing keys if not any(keys.values()): print_warning("No API keys found") print(f" {Colors.DIM}For cloud LLM, set ANTHROPIC_API_KEY or OPENAI_API_KEY{Colors.END}") print(f" {Colors.DIM}Or use local Ollama (--no-docker to skip){Colors.END}") - + return keys def verify_installation() -> bool: """Verify the installation is working.""" print_header("Verifying Installation") - + all_good = True - + # Check cortex command print_step("Checking cortex command...") result = run_cmd(["cortex", "--version"], capture=True, check=False) @@ -431,65 +432,61 @@ def verify_installation() -> bool: else: print_error("Cortex command not found") all_good = False - + # Check watch service print_step("Checking watch service...") result = run_cmd( - ["systemctl", "--user", "is-active", "cortex-watch.service"], - capture=True, - check=False + ["systemctl", "--user", "is-active", "cortex-watch.service"], capture=True, check=False ) if result.stdout.strip() == "active": print_success("Watch service is running") else: print_warning("Watch service is not running") - + # Check Ollama print_step("Checking Ollama...") exists, running = check_ollama_container() if running: print_success("Ollama container is running") - + # Check if model is available - result = run_cmd( - ["docker", "exec", "ollama", "ollama", "list"], - capture=True, - check=False - ) + result = run_cmd(["docker", "exec", "ollama", "ollama", "list"], capture=True, check=False) if result.returncode == 0 and result.stdout.strip(): - models = [line.split()[0] for line in result.stdout.strip().split('\n')[1:] if line.strip()] + models = [ + line.split()[0] for line in result.stdout.strip().split("\n")[1:] if line.strip() + ] if models: print_success(f"Models available: {', '.join(models[:3])}") elif exists: print_warning("Ollama container exists but not running") else: print_warning("Ollama not installed (will use cloud LLM)") - + # Check API keys api_keys = check_api_keys() has_llm = any(api_keys.values()) or running - + if not has_llm: print_error("No LLM available (need API key or Ollama)") all_good = False - + return all_good def uninstall() -> bool: """Remove all ask --do components.""" print_header("Uninstalling Cortex ask --do Components") - + # Stop and remove watch service print_step("Removing watch service...") run_cmd(["systemctl", "--user", "stop", "cortex-watch.service"], check=False) run_cmd(["systemctl", "--user", "disable", "cortex-watch.service"], check=False) - + service_file = Path.home() / ".config" / "systemd" / "user" / "cortex-watch.service" if service_file.exists(): service_file.unlink() print_success("Watch service removed") - + # Remove shell hooks from .bashrc and .zshrc marker = "# Cortex Terminal Watch Hook" for rc_file in [Path.home() / ".bashrc", Path.home() / ".zshrc"]: @@ -497,20 +494,20 @@ def uninstall() -> bool: content = rc_file.read_text() if marker in content: print_step(f"Removing hook from {rc_file.name}...") - lines = content.split('\n') + lines = content.split("\n") new_lines = [] skip = False for line in lines: if marker in line: skip = True - elif skip and line.strip() == '': + elif skip and line.strip() == "": skip = False continue elif not skip: new_lines.append(line) - rc_file.write_text('\n'.join(new_lines)) + rc_file.write_text("\n".join(new_lines)) print_success(f"Hook removed from {rc_file.name}") - + # Remove cortex directory files (but keep config) cortex_dir = Path.home() / ".cortex" files_to_remove = [ @@ -526,20 +523,20 @@ def uninstall() -> bool: if filepath.exists(): filepath.unlink() print_success("Cortex watch files removed") - + # Optionally remove Ollama container exists, _ = check_ollama_container() if exists: print_step("Ollama container found") response = input(" Remove Ollama container and data? [y/N]: ").strip().lower() - if response == 'y': + if response == "y": run_cmd(["docker", "stop", "ollama"], check=False) run_cmd(["docker", "rm", "ollama"], check=False) run_cmd(["docker", "volume", "rm", "ollama"], check=False) print_success("Ollama container and data removed") else: print(f" {Colors.DIM}Keeping Ollama container{Colors.END}") - + print_success("Uninstallation complete") return True @@ -554,15 +551,17 @@ def main(): python scripts/setup_ask_do.py --no-docker # Skip Docker/Ollama setup python scripts/setup_ask_do.py --model phi # Use smaller phi model python scripts/setup_ask_do.py --uninstall # Remove all components -""" +""", ) parser.add_argument("--no-docker", action="store_true", help="Skip Docker/Ollama setup") - parser.add_argument("--model", default="mistral", help="Ollama model to install (default: mistral)") + parser.add_argument( + "--model", default="mistral", help="Ollama model to install (default: mistral)" + ) parser.add_argument("--skip-watch", action="store_true", help="Skip watch service installation") parser.add_argument("--uninstall", action="store_true", help="Remove all ask --do components") - + args = parser.parse_args() - + print(f"\n{Colors.BOLD}{Colors.CYAN}") print(" ██████╗ ██████╗ ██████╗ ████████╗███████╗██╗ ██╗") print(" ██╔════╝██╔═══██╗██╔══██╗╚══██╔══╝██╔════╝╚██╗██╔╝") @@ -572,15 +571,15 @@ def main(): print(" ╚═════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝ ╚══════╝╚═╝ ╚═╝") print(f"{Colors.END}") print(f" {Colors.DIM}ask --do Setup Wizard{Colors.END}\n") - + if args.uninstall: return 0 if uninstall() else 1 - + success = True - + # Step 1: Check API keys api_keys = check_api_keys() - + # Step 2: Setup Ollama (unless skipped) if not args.no_docker: if not setup_ollama(args.model): @@ -591,17 +590,17 @@ def main(): print_warning("Skipping Docker/Ollama setup (--no-docker)") if not any(api_keys.values()): print_warning("No API keys found - you'll need to set one up") - + # Step 3: Setup watch service if not args.skip_watch: if not setup_watch_service(): print_warning("Watch service setup had issues") else: print_warning("Skipping watch service (--skip-watch)") - + # Step 4: Setup shell hooks setup_shell_hooks() - + # Step 5: Verify installation if verify_installation(): print_header("Setup Complete! 🎉") @@ -610,7 +609,7 @@ def main(): {Colors.BOLD}To use Cortex ask --do:{Colors.END} cortex ask --do - + {Colors.BOLD}To start an interactive session:{Colors.END} cortex ask --do "install nginx and configure it" @@ -634,4 +633,3 @@ def main(): if __name__ == "__main__": sys.exit(main()) -