Files
local-llm-prompt-manager/local-ai-commit-reviewer/tests/unit/test_llm.py

53 lines
1.6 KiB
Python

from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
class MockLLMProvider(LLMProvider):
def __init__(self, available: bool = True):
self._available = available
self._models = []
def is_available(self) -> bool:
return self._available
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
return LLMResponse(
text="Mock review response",
model="mock-model",
tokens_used=100,
finish_reason="stop"
)
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
return self.generate(_prompt, **_kwargs)
def stream_generate(self, _prompt: str, **_kwargs):
yield "Mock"
def list_models(self) -> list[ModelInfo]:
return self._models
def health_check(self) -> bool:
return self._available
class TestLLMProvider:
def test_mock_provider_is_available(self):
provider = MockLLMProvider(available=True)
assert provider.is_available() is True
def test_mock_provider_not_available(self):
provider = MockLLMProvider(available=False)
assert provider.is_available() is False
def test_mock_generate(self):
provider = MockLLMProvider()
response = provider.generate("test prompt")
assert isinstance(response, LLMResponse)
assert response.text == "Mock review response"
assert response.model == "mock-model"
def test_mock_list_models(self):
provider = MockLLMProvider()
models = provider.list_models()
assert isinstance(models, list)