diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 0000000..715901e --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,194 @@ +"""Tests for CLI module.""" + +import pytest +from typer.testing import CliRunner + +from src.cli.commands import app +from src.cli.options import OutputFormat, SeverityLevel, LanguageType, resolve_output_format, resolve_severity, resolve_language + + +class TestCLIOptions: + """Tests for CLI options and enums.""" + + def test_output_format_enum_values(self): + """Test OutputFormat enum values.""" + assert OutputFormat.TERMINAL.value == "terminal" + assert OutputFormat.JSON.value == "json" + assert OutputFormat.MARKDOWN.value == "markdown" + + def test_severity_level_enum_values(self): + """Test SeverityLevel enum values.""" + assert SeverityLevel.LOW.value == "low" + assert SeverityLevel.MEDIUM.value == "medium" + assert SeverityLevel.HIGH.value == "high" + assert SeverityLevel.CRITICAL.value == "critical" + + def test_language_type_enum_values(self): + """Test LanguageType enum values.""" + assert LanguageType.PYTHON.value == "python" + assert LanguageType.JAVASCRIPT.value == "javascript" + assert LanguageType.TYPESCRIPT.value == "typescript" + + def test_resolve_output_format_terminal(self): + """Test resolving terminal format.""" + assert resolve_output_format("terminal") == OutputFormat.TERMINAL + assert resolve_output_format("TERMINAL") == OutputFormat.TERMINAL + assert resolve_output_format(None) == OutputFormat.TERMINAL + + def test_resolve_output_format_json(self): + """Test resolving JSON format.""" + assert resolve_output_format("json") == OutputFormat.JSON + assert resolve_output_format("JSON") == OutputFormat.JSON + + def test_resolve_output_format_markdown(self): + """Test resolving Markdown format.""" + assert resolve_output_format("markdown") == OutputFormat.MARKDOWN + assert resolve_output_format("MARKDOWN") == OutputFormat.MARKDOWN + + def test_resolve_output_format_invalid(self): + """Test resolving invalid format returns terminal.""" + assert resolve_output_format("invalid") == OutputFormat.TERMINAL + + def test_resolve_severity(self): + """Test resolving severity levels.""" + assert resolve_severity("low") == SeverityLevel.LOW + assert resolve_severity("medium") == SeverityLevel.MEDIUM + assert resolve_severity("high") == SeverityLevel.HIGH + assert resolve_severity("critical") == SeverityLevel.CRITICAL + assert resolve_severity("MEDIUM") == SeverityLevel.MEDIUM + assert resolve_severity(None) is None + assert resolve_severity("invalid") is None + + def test_resolve_language(self): + """Test resolving language types.""" + assert resolve_language("python") == LanguageType.PYTHON + assert resolve_language("javascript") == LanguageType.JAVASCRIPT + assert resolve_language("typescript") == LanguageType.TYPESCRIPT + assert resolve_language("PYTHON") == LanguageType.PYTHON + assert resolve_language(None) is None + assert resolve_language("invalid") is None + + +class TestCLIScanCommand: + """Tests for CLI scan command.""" + + @pytest.fixture + def runner(self): + """Create CLI runner.""" + return CliRunner() + + def test_scan_command_help(self, runner): + """Test scan command help output.""" + result = runner.invoke(app, ["scan", "--help"]) + assert result.exit_code == 0 + assert "Scan code for issues" in result.output + + def test_scan_command_version(self, runner): + """Test version command output.""" + result = runner.invoke(app, ["version"]) + assert result.exit_code == 0 + assert "AI Code Audit CLI" in result.output + + def test_scan_command_languages(self, runner): + """Test languages command output.""" + result = runner.invoke(app, ["languages"]) + assert result.exit_code == 0 + assert "Python" in result.output + assert "JavaScript" in result.output + assert "TypeScript" in result.output + + def test_scan_nonexistent_file(self, runner): + """Test scanning nonexistent file.""" + result = runner.invoke(app, ["scan", "/nonexistent/file.py"]) + assert result.exit_code == 1 + assert "Error" in result.output + + def test_scan_directory(self, runner, test_files): + """Test scanning a directory.""" + result = runner.invoke(app, ["scan", str(test_files)]) + assert result.exit_code == 0 + assert "AI Code Audit Results" in result.output or "Issues Found" in result.output + + def test_scan_with_verbose(self, runner, test_files): + """Test scanning with verbose option.""" + result = runner.invoke(app, ["scan", str(test_files), "--verbose"]) + assert result.exit_code == 0 + + def test_scan_with_output_format(self, runner, test_files): + """Test scanning with output format option.""" + result = runner.invoke(app, ["scan", str(test_files), "--format", "json"]) + assert result.exit_code == 0 + + def test_scan_with_quiet_mode(self, runner, test_files): + """Test scanning with quiet mode.""" + result = runner.invoke(app, ["scan", str(test_files), "--quiet"]) + assert result.exit_code == 0 + assert "Confidence Score" in result.output + + def test_scan_with_language_filter(self, runner, test_files): + """Test scanning with language filter.""" + result = runner.invoke(app, ["scan", str(test_files), "--language", "python"]) + assert result.exit_code == 0 + + def test_scan_with_severity_filter(self, runner, test_files): + """Test scanning with severity filter.""" + result = runner.invoke(app, ["scan", str(test_files), "--severity", "high"]) + assert result.exit_code == 0 + + def test_scan_with_no_color(self, runner, test_files): + """Test scanning with no color option.""" + result = runner.invoke(app, ["scan", str(test_files), "--no-color"]) + assert result.exit_code == 0 + + +class TestOutputFormatter: + """Tests for output formatting.""" + + def test_get_score_color_green(self): + """Test score color for high scores.""" + from src.cli.output import OutputFormatter + from src.cli.options import ScanOptions + + options = ScanOptions() + formatter = OutputFormatter(options) + assert formatter._get_score_color(95) == "green" + + def test_get_score_color_yellow(self): + """Test score color for medium scores.""" + from src.cli.output import OutputFormatter + from src.cli.options import ScanOptions + + options = ScanOptions() + formatter = OutputFormatter(options) + assert formatter._get_score_color(75) == "yellow" + + def test_get_score_color_orange(self): + """Test score color for low-medium scores.""" + from src.cli.output import OutputFormatter + from src.cli.options import ScanOptions + + options = ScanOptions() + formatter = OutputFormatter(options) + assert formatter._get_score_color(55) == "orange1" + + def test_get_score_color_red(self): + """Test score color for low scores.""" + from src.cli.output import OutputFormatter + from src.cli.options import ScanOptions + + options = ScanOptions() + formatter = OutputFormatter(options) + assert formatter._get_score_color(25) == "red" + + def test_get_severity_style(self): + """Test severity style mapping.""" + from src.cli.output import OutputFormatter + from src.cli.options import ScanOptions + + options = ScanOptions() + formatter = OutputFormatter(options) + + assert formatter._get_severity_style(SeverityLevel.CRITICAL) == "red bold" + assert formatter._get_severity_style(SeverityLevel.HIGH) == "orange1" + assert formatter._get_severity_style(SeverityLevel.MEDIUM) == "yellow" + assert formatter._get_severity_style(SeverityLevel.LOW) == "blue"