Fix CI/CD: Add Gitea Actions workflow and fix linting issues
Some checks failed
CI / test (push) Failing after 13s
Some checks failed
CI / test (push) Failing after 13s
This commit is contained in:
51
local-ai-commit-reviewer/tests/unit/test_config.py
Normal file
51
local-ai-commit-reviewer/tests/unit/test_config.py
Normal file
@@ -0,0 +1,51 @@
|
||||
|
||||
from src.config import Config, ConfigLoader
|
||||
|
||||
|
||||
class TestConfig:
|
||||
def test_default_config(self):
|
||||
config = Config()
|
||||
assert config.llm.endpoint == "http://localhost:11434"
|
||||
assert config.llm.model == "codellama"
|
||||
assert config.review.strictness == "balanced"
|
||||
assert config.hooks.enabled is True
|
||||
|
||||
def test_config_from_dict(self):
|
||||
data = {
|
||||
"llm": {
|
||||
"endpoint": "http://custom:9000",
|
||||
"model": "custom-model"
|
||||
},
|
||||
"review": {
|
||||
"strictness": "strict"
|
||||
}
|
||||
}
|
||||
config = Config(**data)
|
||||
assert config.llm.endpoint == "http://custom:9000"
|
||||
assert config.llm.model == "custom-model"
|
||||
assert config.review.strictness == "strict"
|
||||
|
||||
def test_language_config(self):
|
||||
config = Config()
|
||||
py_config = config.languages.get_language_config("python")
|
||||
assert py_config is not None
|
||||
assert py_config.enabled is True
|
||||
|
||||
def test_strictness_profiles(self):
|
||||
config = Config()
|
||||
permissive = config.strictness_profiles.get_profile("permissive")
|
||||
assert permissive.check_style is False
|
||||
strict = config.strictness_profiles.get_profile("strict")
|
||||
assert strict.check_performance is True
|
||||
|
||||
|
||||
class TestConfigLoader:
|
||||
def test_load_default_config(self):
|
||||
loader = ConfigLoader()
|
||||
config = loader.load()
|
||||
assert isinstance(config, Config)
|
||||
|
||||
def test_find_config_files_nonexistent(self):
|
||||
loader = ConfigLoader("/nonexistent/path.yaml")
|
||||
path, _global_path = loader.find_config_files()
|
||||
assert path is None
|
||||
40
local-ai-commit-reviewer/tests/unit/test_git.py
Normal file
40
local-ai-commit-reviewer/tests/unit/test_git.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from pathlib import Path
|
||||
|
||||
from src.git.git import FileChange, GitRepo
|
||||
|
||||
|
||||
class TestGitRepo:
|
||||
def test_get_file_language(self):
|
||||
repo = GitRepo(Path.cwd())
|
||||
assert repo.get_file_language("test.py") == "python"
|
||||
assert repo.get_file_language("test.js") == "javascript"
|
||||
assert repo.get_file_language("test.go") == "go"
|
||||
assert repo.get_file_language("test.rs") == "rust"
|
||||
assert repo.get_file_language("test.unknown") == "unknown"
|
||||
|
||||
def test_get_diff_stats(self):
|
||||
repo = GitRepo(Path.cwd())
|
||||
diff = """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
def hello():
|
||||
+ print("hello")
|
||||
return True
|
||||
- return False
|
||||
"""
|
||||
additions, deletions = repo.get_diff_stats(diff)
|
||||
assert additions == 1
|
||||
assert deletions == 1
|
||||
|
||||
|
||||
class TestFileChange:
|
||||
def test_file_change_creation(self):
|
||||
change = FileChange(
|
||||
filename="test.py",
|
||||
status="M",
|
||||
diff="diff content"
|
||||
)
|
||||
assert change.filename == "test.py"
|
||||
assert change.status == "M"
|
||||
assert change.diff == "diff content"
|
||||
52
local-ai-commit-reviewer/tests/unit/test_llm.py
Normal file
52
local-ai-commit-reviewer/tests/unit/test_llm.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
def __init__(self, available: bool = True):
|
||||
self._available = available
|
||||
self._models = []
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text="Mock review response",
|
||||
model="mock-model",
|
||||
tokens_used=100,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
class TestLLMProvider:
|
||||
def test_mock_provider_is_available(self):
|
||||
provider = MockLLMProvider(available=True)
|
||||
assert provider.is_available() is True
|
||||
|
||||
def test_mock_provider_not_available(self):
|
||||
provider = MockLLMProvider(available=False)
|
||||
assert provider.is_available() is False
|
||||
|
||||
def test_mock_generate(self):
|
||||
provider = MockLLMProvider()
|
||||
response = provider.generate("test prompt")
|
||||
assert isinstance(response, LLMResponse)
|
||||
assert response.text == "Mock review response"
|
||||
assert response.model == "mock-model"
|
||||
|
||||
def test_mock_list_models(self):
|
||||
provider = MockLLMProvider()
|
||||
models = provider.list_models()
|
||||
assert isinstance(models, list)
|
||||
76
local-ai-commit-reviewer/tests/unit/test_review.py
Normal file
76
local-ai-commit-reviewer/tests/unit/test_review.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from src.core.review_engine import Issue, IssueCategory, IssueSeverity, ReviewResult, ReviewSummary
|
||||
|
||||
|
||||
class TestIssue:
|
||||
def test_issue_creation(self):
|
||||
issue = Issue(
|
||||
file="test.py",
|
||||
line=10,
|
||||
severity=IssueSeverity.WARNING,
|
||||
category=IssueCategory.STYLE,
|
||||
message="Missing docstring",
|
||||
suggestion="Add a docstring"
|
||||
)
|
||||
assert issue.file == "test.py"
|
||||
assert issue.line == 10 # noqa: PLR2004
|
||||
assert issue.severity == IssueSeverity.WARNING
|
||||
|
||||
def test_issue_to_dict(self):
|
||||
issue = Issue(
|
||||
file="test.py",
|
||||
line=10,
|
||||
severity=IssueSeverity.CRITICAL,
|
||||
category=IssueCategory.BUG,
|
||||
message="Potential bug"
|
||||
)
|
||||
data = issue.to_dict()
|
||||
assert data["file"] == "test.py"
|
||||
assert data["severity"] == "critical"
|
||||
assert data["category"] == "bug"
|
||||
|
||||
|
||||
class TestReviewResult:
|
||||
def test_review_result_no_issues(self):
|
||||
result = ReviewResult()
|
||||
assert result.has_issues() is False
|
||||
assert result.has_critical_issues() is False
|
||||
|
||||
def test_review_result_with_issues(self):
|
||||
result = ReviewResult()
|
||||
result.issues = [
|
||||
Issue(
|
||||
file="test.py",
|
||||
line=1,
|
||||
severity=IssueSeverity.CRITICAL,
|
||||
category=IssueCategory.SECURITY,
|
||||
message="SQL injection"
|
||||
)
|
||||
]
|
||||
assert result.has_issues() is True
|
||||
assert result.has_critical_issues() is True
|
||||
|
||||
def test_get_issues_by_severity(self):
|
||||
result = ReviewResult()
|
||||
result.issues = [
|
||||
Issue(file="a.py", line=1, severity=IssueSeverity.CRITICAL, category=IssueCategory.BUG, message="Bug1"),
|
||||
Issue(file="b.py", line=2, severity=IssueSeverity.WARNING, category=IssueCategory.STYLE, message="Style1"),
|
||||
Issue(file="c.py", line=3, severity=IssueSeverity.INFO, category=IssueCategory.DOCUMENTATION, message="Doc1"),
|
||||
]
|
||||
critical = result.get_issues_by_severity(IssueSeverity.CRITICAL)
|
||||
assert len(critical) == 1
|
||||
assert critical[0].file == "a.py"
|
||||
|
||||
|
||||
class TestReviewSummary:
|
||||
def test_review_summary_aggregation(self):
|
||||
summary = ReviewSummary()
|
||||
summary.files_reviewed = 5
|
||||
summary.lines_changed = 100
|
||||
summary.critical_count = 2
|
||||
summary.warning_count = 5
|
||||
summary.info_count = 10
|
||||
summary.overall_assessment = "Good"
|
||||
|
||||
data = summary.to_dict()
|
||||
assert data["files_reviewed"] == 5 # noqa: PLR2004
|
||||
assert data["critical_count"] == 2 # noqa: PLR2004
|
||||
Reference in New Issue
Block a user