126 lines
3.6 KiB
Python
126 lines
3.6 KiB
Python
import pytest
|
|
from pathlib import Path
|
|
import subprocess
|
|
import tempfile
|
|
|
|
from src.config import Config
|
|
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
|
|
|
|
|
@pytest.fixture
|
|
def temp_git_repo():
|
|
"""Create a temporary Git repository for testing."""
|
|
with tempfile.TemporaryDirectory() as tmpdir:
|
|
repo_path = Path(tmpdir)
|
|
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True)
|
|
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True)
|
|
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True)
|
|
yield repo_path
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_python_file(temp_git_repo):
|
|
"""Create a sample Python file in the temp repo."""
|
|
test_file = temp_git_repo / "test.py"
|
|
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
|
|
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True)
|
|
return test_file
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_js_file(temp_git_repo):
|
|
"""Create a sample JavaScript file."""
|
|
test_file = temp_git_repo / "test.js"
|
|
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
|
|
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True)
|
|
return test_file
|
|
|
|
|
|
@pytest.fixture
|
|
def sample_diff():
|
|
"""Return a sample diff for testing."""
|
|
return """diff --git a/test.py b/test.py
|
|
--- a/test.py
|
|
+++ b/test.py
|
|
@@ -1,3 +1,4 @@
|
|
def hello():
|
|
+ print("hello")
|
|
return True
|
|
- return False
|
|
"""
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_config():
|
|
"""Return a default Config instance."""
|
|
return Config()
|
|
|
|
|
|
class MockLLMProvider(LLMProvider):
|
|
"""Mock LLM provider for testing."""
|
|
|
|
def __init__(self, available: bool = True, response_text: str = None):
|
|
self._available = available
|
|
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
|
|
|
|
def is_available(self) -> bool:
|
|
return self._available
|
|
|
|
def generate(self, prompt: str, **kwargs) -> LLMResponse:
|
|
return LLMResponse(
|
|
text=self._response_text,
|
|
model="mock-model",
|
|
tokens_used=50,
|
|
finish_reason="stop"
|
|
)
|
|
|
|
async def agenerate(self, prompt: str, **kwargs) -> LLMResponse:
|
|
return self.generate(prompt, **kwargs)
|
|
|
|
def stream_generate(self, prompt: str, **kwargs):
|
|
yield "Mock"
|
|
|
|
def list_models(self) -> list[ModelInfo]:
|
|
return [
|
|
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
|
|
]
|
|
|
|
def health_check(self) -> bool:
|
|
return self._available
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_llm_provider():
|
|
"""Return a mock LLM provider."""
|
|
return MockLLMProvider(available=True)
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_llm_unavailable():
|
|
"""Return a mock LLM provider that's not available."""
|
|
return MockLLMProvider(available=False)
|
|
|
|
|
|
@pytest.fixture
|
|
def mock_llm_with_issues():
|
|
"""Return a mock LLM provider that returns issues."""
|
|
response = '''{{
|
|
"issues": [
|
|
{{
|
|
"file": "test.py",
|
|
"line": 2,
|
|
"severity": "warning",
|
|
"category": "style",
|
|
"message": "Missing docstring for function",
|
|
"suggestion": "Add a docstring above the function definition"
|
|
}}
|
|
],
|
|
"summary": {{
|
|
"critical_count": 0,
|
|
"warning_count": 1,
|
|
"info_count": 0,
|
|
"overall_assessment": "Minor style issues found"
|
|
}}
|
|
}}'''
|
|
return MockLLMProvider(available=True, response_text=response)
|