Add integration tests and fixtures
This commit is contained in:
210
tests/integration/test_cli.py
Normal file
210
tests/integration/test_cli.py
Normal file
@@ -0,0 +1,210 @@
|
||||
"""Integration tests for CLI commands."""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from vibeguard.cli.main import main
|
||||
from vibeguard.utils.config import Config
|
||||
|
||||
|
||||
def get_runner_with_config(config: Config | None = None):
|
||||
"""Create a CLI runner with custom config."""
|
||||
runner = CliRunner()
|
||||
return runner
|
||||
|
||||
|
||||
class TestAnalyzeCommand:
|
||||
"""Tests for analyze command."""
|
||||
|
||||
def test_analyze_no_files(self, tmp_path):
|
||||
"""Test analyze with no files to scan."""
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No files found" in result.output
|
||||
|
||||
def test_analyze_single_file(self, tmp_path):
|
||||
"""Test analyzing a single Python file."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('print("hello")')
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "Found" in result.output
|
||||
|
||||
def test_analyze_with_json_output(self, tmp_path):
|
||||
"""Test analyze with JSON output."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('print("hello")')
|
||||
|
||||
output_file = tmp_path / "output.json"
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path), "--json"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_analyze_with_severity_filter(self, tmp_path):
|
||||
"""Test analyze with severity filter."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('print("hello")')
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path), "--severity", "critical"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_analyze_exit_zero(self, tmp_path):
|
||||
"""Test analyze with --exit-zero flag."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('print("hello")')
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path), "--exit-zero"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_analyze_verbose(self, tmp_path):
|
||||
"""Test analyze with verbose flag."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('print("hello")')
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--verbose", "analyze", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "VibeGuard" in result.output
|
||||
|
||||
def test_analyze_file_with_anti_patterns(self, tmp_path):
|
||||
"""Test analyzing a file with anti-patterns."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('''
|
||||
MAGIC_VALUE = "this is a very long magic string that should be detected as anti-pattern"
|
||||
MAX_RETRIES = 12345
|
||||
|
||||
def process():
|
||||
pass
|
||||
|
||||
try:
|
||||
x = 1
|
||||
except:
|
||||
pass
|
||||
''')
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "Issues found" in result.output
|
||||
|
||||
def test_analyze_html_output(self, tmp_path):
|
||||
"""Test analyze with HTML output."""
|
||||
test_file = tmp_path / "test.py"
|
||||
test_file.write_text('print("hello")')
|
||||
|
||||
output_file = tmp_path / "report.html"
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["analyze", str(tmp_path), "--html", str(output_file)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert output_file.exists()
|
||||
|
||||
|
||||
class TestInitCommand:
|
||||
"""Tests for init command."""
|
||||
|
||||
def test_init_creates_config(self, tmp_path):
|
||||
"""Test init creates config file."""
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["init", "--path", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert (tmp_path / ".vibeguard.toml").exists()
|
||||
|
||||
def test_init_overwrite_existing(self, tmp_path):
|
||||
"""Test init doesn't overwrite existing config."""
|
||||
config_file = tmp_path / ".vibeguard.toml"
|
||||
config_file.write_text("# existing config")
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["init", "--path", str(tmp_path)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "already exists" in result.output
|
||||
|
||||
|
||||
class TestReportCommand:
|
||||
"""Tests for report command."""
|
||||
|
||||
def test_report_from_json(self, tmp_path):
|
||||
"""Test generating report from JSON file."""
|
||||
input_file = tmp_path / "input.json"
|
||||
issues = [
|
||||
{
|
||||
"pattern": "TEST001",
|
||||
"severity": "warning",
|
||||
"file": "test.py",
|
||||
"line": 1,
|
||||
"message": "Test issue",
|
||||
"suggestion": "Test suggestion",
|
||||
}
|
||||
]
|
||||
input_file.write_text(json.dumps({"issues": issues}))
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["report", str(input_file)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_report_to_html(self, tmp_path):
|
||||
"""Test generating HTML report."""
|
||||
input_file = tmp_path / "input.json"
|
||||
input_file.write_text(json.dumps({"issues": []}))
|
||||
|
||||
output_file = tmp_path / "report.html"
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["report", str(input_file), "--html", str(output_file)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_report_empty_issues(self, tmp_path):
|
||||
"""Test report with no issues."""
|
||||
input_file = tmp_path / "input.json"
|
||||
input_file.write_text(json.dumps({"issues": []}))
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["report", str(input_file)])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "No issues" in result.output
|
||||
|
||||
|
||||
class TestMainCommand:
|
||||
"""Tests for main command group."""
|
||||
|
||||
def test_main_help(self):
|
||||
"""Test main help command."""
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--help"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "VibeGuard" in result.output
|
||||
assert "analyze" in result.output
|
||||
assert "init" in result.output
|
||||
|
||||
def test_main_version(self):
|
||||
"""Test main version output."""
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(main, ["--help"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
Reference in New Issue
Block a user