Add remaining test files: test_fixes, test_llm, test_generate, test_report, test_utils
Some checks failed
CI / test (push) Has been cancelled
CI / build (push) Has been cancelled

This commit is contained in:
2026-01-30 18:05:16 +00:00
parent 1fcfab3ca3
commit 524ea59fec

125
tests/test_llm.py Normal file
View File

@@ -0,0 +1,125 @@
import pytest
import os
from pathlib import Path
import tempfile
from unittest.mock import Mock, patch, MagicMock
from config_auditor.llm import (
LLMClient,
OllamaClient,
FallbackClient,
LLMProvider,
load_config,
get_llm_config,
)
class TestOllamaClient:
def test_initialization(self):
client = OllamaClient(endpoint="http://localhost:11434", model="llama3")
assert client.endpoint == "http://localhost:11434"
assert client.model == "llama3"
def test_build_prompt(self):
client = OllamaClient()
issues = [
{"message": "Missing test script"},
{"message": "Deprecated package found"},
]
prompt = client._build_prompt("json", issues)
assert "Missing test script" in prompt
assert "Deprecated package found" in prompt
assert "json" in prompt
def test_build_prompt_empty_issues(self):
client = OllamaClient()
prompt = client._build_prompt("json", [])
assert "No issues found" in prompt
@patch('config_auditor.llm.requests.post')
def test_get_recommendation_success(self, mock_post):
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"response": "Test recommendation"}
mock_post.return_value = mock_response
client = OllamaClient()
result = client.get_recommendation("json", [])
assert result == "Test recommendation"
@patch('config_auditor.llm.requests.post')
def test_get_recommendation_error(self, mock_post):
mock_post.side_effect = Exception("Connection failed")
client = OllamaClient()
result = client.get_recommendation("json", [])
assert "LLM unavailable" in result
class TestFallbackClient:
def test_get_recommendation_for_deprecated_package(self):
client = FallbackClient()
issues = [{"category": "deprecated-package"}]
result = client.get_recommendation("json", issues)
assert "modern equivalents" in result
def test_get_recommendation_for_missing_scripts(self):
client = FallbackClient()
issues = [{"category": "missing-scripts"}]
result = client.get_recommendation("json", issues)
assert "standard scripts" in result
def test_get_recommendation_unknown_category(self):
client = FallbackClient()
issues = [{"category": "unknown-category"}]
result = client.get_recommendation("json", issues)
assert "best practices" in result
class TestLLMProvider:
def test_get_client_fallback_when_unavailable(self):
provider = LLMProvider(endpoint="http://unavailable:11434")
with patch.object(OllamaClient, 'is_available', return_value=False):
client = provider.get_client()
assert isinstance(client, FallbackClient)
def test_get_recommendation_uses_client(self):
provider = LLMProvider()
with patch.object(FallbackClient, 'get_recommendation', return_value="Test"):
result = provider.get_recommendation("json", [])
assert result == "Test"
class TestConfigLoading:
def test_load_config_not_found(self):
with tempfile.TemporaryDirectory() as tmpdir:
old_cwd = os.getcwd()
os.chdir(tmpdir)
try:
config = load_config()
assert config == {}
finally:
os.chdir(old_cwd)
def test_get_llm_config_default(self):
with tempfile.TemporaryDirectory() as tmpdir:
old_cwd = os.getcwd()
os.chdir(tmpdir)
try:
config = get_llm_config()
assert config["endpoint"] == "http://localhost:11434"
assert config["model"] == "llama3"
finally:
os.chdir(old_cwd)