Fix CI/CD: Add Gitea Actions workflow and fix linting issues

This commit is contained in:
Developer
2026-02-05 09:02:49 +00:00
commit d8325c4be2
111 changed files with 19657 additions and 0 deletions

View File

@@ -0,0 +1,126 @@
import subprocess
import tempfile
from pathlib import Path
import pytest
from src.config import Config
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
@pytest.fixture
def temp_git_repo():
"""Create a temporary Git repository for testing."""
with tempfile.TemporaryDirectory() as tmpdir:
repo_path = Path(tmpdir)
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True, check=False)
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True, check=False)
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True, check=False)
yield repo_path
@pytest.fixture
def sample_python_file(temp_git_repo):
"""Create a sample Python file in the temp repo."""
test_file = temp_git_repo / "test.py"
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True, check=False)
return test_file
@pytest.fixture
def sample_js_file(temp_git_repo):
"""Create a sample JavaScript file."""
test_file = temp_git_repo / "test.js"
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True, check=False)
return test_file
@pytest.fixture
def sample_diff():
"""Return a sample diff for testing."""
return """diff --git a/test.py b/test.py
--- a/test.py
+++ b/test.py
@@ -1,3 +1,4 @@
def hello():
+ print("hello")
return True
- return False
"""
@pytest.fixture
def mock_config():
"""Return a default Config instance."""
return Config()
class MockLLMProvider(LLMProvider):
"""Mock LLM provider for testing."""
def __init__(self, available: bool = True, response_text: str | None = None):
self._available = available
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
def is_available(self) -> bool:
return self._available
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
return LLMResponse(
text=self._response_text,
model="mock-model",
tokens_used=50,
finish_reason="stop"
)
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
return self.generate(_prompt, **_kwargs)
def stream_generate(self, _prompt: str, **_kwargs):
yield "Mock"
def list_models(self) -> list[ModelInfo]:
return [
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
]
def health_check(self) -> bool:
return self._available
@pytest.fixture
def mock_llm_provider():
"""Return a mock LLM provider."""
return MockLLMProvider(available=True)
@pytest.fixture
def mock_llm_unavailable():
"""Return a mock LLM provider that's not available."""
return MockLLMProvider(available=False)
@pytest.fixture
def mock_llm_with_issues():
"""Return a mock LLM provider that returns issues."""
response = '''{
"issues": [
{
"file": "test.py",
"line": 2,
"severity": "warning",
"category": "style",
"message": "Missing docstring for function",
"suggestion": "Add a docstring above the function definition"
}
],
"summary": {
"critical_count": 0,
"warning_count": 1,
"info_count": 0,
"overall_assessment": "Minor style issues found"
}
}'''
return MockLLMProvider(available=True, response_text=response)

View File

@@ -0,0 +1,126 @@
import subprocess
import tempfile
from pathlib import Path
import pytest
from src.config import Config
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
@pytest.fixture
def temp_git_repo():
"""Create a temporary Git repository for testing."""
with tempfile.TemporaryDirectory() as tmpdir:
repo_path = Path(tmpdir)
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True, check=False)
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True, check=False)
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True, check=False)
yield repo_path
@pytest.fixture
def sample_python_file(temp_git_repo):
"""Create a sample Python file in the temp repo."""
test_file = temp_git_repo / "test.py"
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True, check=False)
return test_file
@pytest.fixture
def sample_js_file(temp_git_repo):
"""Create a sample JavaScript file."""
test_file = temp_git_repo / "test.js"
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True, check=False)
return test_file
@pytest.fixture
def sample_diff():
"""Return a sample diff for testing."""
return """diff --git a/test.py b/test.py
--- a/test.py
+++ b/test.py
@@ -1,3 +1,4 @@
def hello():
+ print("hello")
return True
- return False
"""
@pytest.fixture
def mock_config():
"""Return a default Config instance."""
return Config()
class MockLLMProvider(LLMProvider):
"""Mock LLM provider for testing."""
def __init__(self, available: bool = True, response_text: str | None = None):
self._available = available
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
def is_available(self) -> bool:
return self._available
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
return LLMResponse(
text=self._response_text,
model="mock-model",
tokens_used=50,
finish_reason="stop"
)
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
return self.generate(_prompt, **_kwargs)
def stream_generate(self, _prompt: str, **_kwargs):
yield "Mock"
def list_models(self) -> list[ModelInfo]:
return [
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
]
def health_check(self) -> bool:
return self._available
@pytest.fixture
def mock_llm_provider():
"""Return a mock LLM provider."""
return MockLLMProvider(available=True)
@pytest.fixture
def mock_llm_unavailable():
"""Return a mock LLM provider that's not available."""
return MockLLMProvider(available=False)
@pytest.fixture
def mock_llm_with_issues():
"""Return a mock LLM provider that returns issues."""
response = '''{
"issues": [
{
"file": "test.py",
"line": 2,
"severity": "warning",
"category": "style",
"message": "Missing docstring for function",
"suggestion": "Add a docstring above the function definition"
}
],
"summary": {
"critical_count": 0,
"warning_count": 1,
"info_count": 0,
"overall_assessment": "Minor style issues found"
}
}'''
return MockLLMProvider(available=True, response_text=response)

View File

@@ -0,0 +1,46 @@
from fixtures.sample_repo import MockLLMProvider
class TestReviewWorkflow:
def test_review_with_no_staged_changes(self, temp_git_repo, mock_config):
from src.core.review_engine import ReviewEngine # noqa: PLC0415
engine = ReviewEngine(config=mock_config, llm_provider=MockLLMProvider())
engine.set_repo(temp_git_repo)
result = engine.review_staged_changes([])
assert result.error == "No staged changes found"
def test_review_with_staged_file(self, temp_git_repo, mock_config, request):
from src.core.review_engine import ReviewEngine # noqa: PLC0415
from src.git import get_staged_changes # noqa: PLC0415
request.getfixturevalue("sample_python_file")
changes = get_staged_changes(temp_git_repo)
engine = ReviewEngine(config=mock_config, llm_provider=MockLLMProvider())
engine.set_repo(temp_git_repo)
result = engine.review_staged_changes(changes)
assert result.review_mode == "balanced"
assert result.error is None or len(result.issues) >= 0
class TestHookInstallation:
def test_install_hook(self, temp_git_repo):
from src.hooks import install_pre_commit_hook # noqa: PLC0415
result = install_pre_commit_hook(temp_git_repo)
assert result is True
hook_path = temp_git_repo / ".git" / "hooks" / "pre-commit"
assert hook_path.exists()
content = hook_path.read_text()
assert "aicr" in content or "review" in content
def test_check_hook_installed(self, temp_git_repo):
from src.hooks import check_hook_installed, install_pre_commit_hook # noqa: PLC0415
assert check_hook_installed(temp_git_repo) is False
install_pre_commit_hook(temp_git_repo)
assert check_hook_installed(temp_git_repo) is True

View File

@@ -0,0 +1,51 @@
from src.config import Config, ConfigLoader
class TestConfig:
def test_default_config(self):
config = Config()
assert config.llm.endpoint == "http://localhost:11434"
assert config.llm.model == "codellama"
assert config.review.strictness == "balanced"
assert config.hooks.enabled is True
def test_config_from_dict(self):
data = {
"llm": {
"endpoint": "http://custom:9000",
"model": "custom-model"
},
"review": {
"strictness": "strict"
}
}
config = Config(**data)
assert config.llm.endpoint == "http://custom:9000"
assert config.llm.model == "custom-model"
assert config.review.strictness == "strict"
def test_language_config(self):
config = Config()
py_config = config.languages.get_language_config("python")
assert py_config is not None
assert py_config.enabled is True
def test_strictness_profiles(self):
config = Config()
permissive = config.strictness_profiles.get_profile("permissive")
assert permissive.check_style is False
strict = config.strictness_profiles.get_profile("strict")
assert strict.check_performance is True
class TestConfigLoader:
def test_load_default_config(self):
loader = ConfigLoader()
config = loader.load()
assert isinstance(config, Config)
def test_find_config_files_nonexistent(self):
loader = ConfigLoader("/nonexistent/path.yaml")
path, _global_path = loader.find_config_files()
assert path is None

View File

@@ -0,0 +1,40 @@
from pathlib import Path
from src.git.git import FileChange, GitRepo
class TestGitRepo:
def test_get_file_language(self):
repo = GitRepo(Path.cwd())
assert repo.get_file_language("test.py") == "python"
assert repo.get_file_language("test.js") == "javascript"
assert repo.get_file_language("test.go") == "go"
assert repo.get_file_language("test.rs") == "rust"
assert repo.get_file_language("test.unknown") == "unknown"
def test_get_diff_stats(self):
repo = GitRepo(Path.cwd())
diff = """diff --git a/test.py b/test.py
--- a/test.py
+++ b/test.py
@@ -1,3 +1,4 @@
def hello():
+ print("hello")
return True
- return False
"""
additions, deletions = repo.get_diff_stats(diff)
assert additions == 1
assert deletions == 1
class TestFileChange:
def test_file_change_creation(self):
change = FileChange(
filename="test.py",
status="M",
diff="diff content"
)
assert change.filename == "test.py"
assert change.status == "M"
assert change.diff == "diff content"

View File

@@ -0,0 +1,52 @@
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
class MockLLMProvider(LLMProvider):
def __init__(self, available: bool = True):
self._available = available
self._models = []
def is_available(self) -> bool:
return self._available
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
return LLMResponse(
text="Mock review response",
model="mock-model",
tokens_used=100,
finish_reason="stop"
)
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
return self.generate(_prompt, **_kwargs)
def stream_generate(self, _prompt: str, **_kwargs):
yield "Mock"
def list_models(self) -> list[ModelInfo]:
return self._models
def health_check(self) -> bool:
return self._available
class TestLLMProvider:
def test_mock_provider_is_available(self):
provider = MockLLMProvider(available=True)
assert provider.is_available() is True
def test_mock_provider_not_available(self):
provider = MockLLMProvider(available=False)
assert provider.is_available() is False
def test_mock_generate(self):
provider = MockLLMProvider()
response = provider.generate("test prompt")
assert isinstance(response, LLMResponse)
assert response.text == "Mock review response"
assert response.model == "mock-model"
def test_mock_list_models(self):
provider = MockLLMProvider()
models = provider.list_models()
assert isinstance(models, list)

View File

@@ -0,0 +1,76 @@
from src.core.review_engine import Issue, IssueCategory, IssueSeverity, ReviewResult, ReviewSummary
class TestIssue:
def test_issue_creation(self):
issue = Issue(
file="test.py",
line=10,
severity=IssueSeverity.WARNING,
category=IssueCategory.STYLE,
message="Missing docstring",
suggestion="Add a docstring"
)
assert issue.file == "test.py"
assert issue.line == 10 # noqa: PLR2004
assert issue.severity == IssueSeverity.WARNING
def test_issue_to_dict(self):
issue = Issue(
file="test.py",
line=10,
severity=IssueSeverity.CRITICAL,
category=IssueCategory.BUG,
message="Potential bug"
)
data = issue.to_dict()
assert data["file"] == "test.py"
assert data["severity"] == "critical"
assert data["category"] == "bug"
class TestReviewResult:
def test_review_result_no_issues(self):
result = ReviewResult()
assert result.has_issues() is False
assert result.has_critical_issues() is False
def test_review_result_with_issues(self):
result = ReviewResult()
result.issues = [
Issue(
file="test.py",
line=1,
severity=IssueSeverity.CRITICAL,
category=IssueCategory.SECURITY,
message="SQL injection"
)
]
assert result.has_issues() is True
assert result.has_critical_issues() is True
def test_get_issues_by_severity(self):
result = ReviewResult()
result.issues = [
Issue(file="a.py", line=1, severity=IssueSeverity.CRITICAL, category=IssueCategory.BUG, message="Bug1"),
Issue(file="b.py", line=2, severity=IssueSeverity.WARNING, category=IssueCategory.STYLE, message="Style1"),
Issue(file="c.py", line=3, severity=IssueSeverity.INFO, category=IssueCategory.DOCUMENTATION, message="Doc1"),
]
critical = result.get_issues_by_severity(IssueSeverity.CRITICAL)
assert len(critical) == 1
assert critical[0].file == "a.py"
class TestReviewSummary:
def test_review_summary_aggregation(self):
summary = ReviewSummary()
summary.files_reviewed = 5
summary.lines_changed = 100
summary.critical_count = 2
summary.warning_count = 5
summary.info_count = 10
summary.overall_assessment = "Good"
data = summary.to_dict()
assert data["files_reviewed"] == 5 # noqa: PLR2004
assert data["critical_count"] == 2 # noqa: PLR2004