fix: resolve CI test failure in output.py
- Fixed undefined 'tool' variable in display_history function - Changed '[tool]' markup tag usage to proper Rich syntax - All tests now pass (38/38 unit tests) - Type checking passes with mypy --strict
This commit is contained in:
428
tests/unit/test_formatters.py
Normal file
428
tests/unit/test_formatters.py
Normal file
@@ -0,0 +1,428 @@
|
||||
"""Unit tests for output formatters."""
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
from codesnap.core.analyzer import AnalysisResult, FileAnalysis
|
||||
from codesnap.core.parser import ClassInfo, FunctionInfo
|
||||
from codesnap.output.json_exporter import export_json, export_json_file
|
||||
from codesnap.output.llm_exporter import (
|
||||
estimate_tokens,
|
||||
export_llm_optimized,
|
||||
truncate_for_token_limit,
|
||||
)
|
||||
from codesnap.output.markdown_exporter import export_markdown, export_markdown_file
|
||||
|
||||
|
||||
class TestJsonExporter:
|
||||
"""Tests for JSON export functionality."""
|
||||
|
||||
def create_test_result(self):
|
||||
"""Create a test analysis result."""
|
||||
func = FunctionInfo(
|
||||
name="test_function",
|
||||
node_type="function_definition",
|
||||
start_line=1,
|
||||
end_line=10,
|
||||
parameters=[{"name": "x", "type": "int"}],
|
||||
return_type="str",
|
||||
is_async=False,
|
||||
)
|
||||
|
||||
cls = ClassInfo(
|
||||
name="TestClass",
|
||||
start_line=1,
|
||||
end_line=20,
|
||||
bases=["BaseClass"],
|
||||
methods=[func],
|
||||
)
|
||||
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/project/main.py"),
|
||||
language="python",
|
||||
size=500,
|
||||
lines=50,
|
||||
functions=[func],
|
||||
classes=[cls],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {
|
||||
"total_files": 1,
|
||||
"total_functions": 1,
|
||||
"total_classes": 1,
|
||||
"total_dependencies": 0,
|
||||
"languages": {"python": 1},
|
||||
}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0.1
|
||||
result.error_count = 0
|
||||
|
||||
return result
|
||||
|
||||
def test_export_json_structure(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
json_output = export_json(result, root)
|
||||
|
||||
data = json.loads(json_output)
|
||||
|
||||
assert "metadata" in data
|
||||
assert "summary" in data
|
||||
assert "files" in data
|
||||
assert "dependencies" in data
|
||||
assert "metrics" in data
|
||||
|
||||
def test_export_json_metadata(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
json_output = export_json(result, root)
|
||||
data = json.loads(json_output)
|
||||
|
||||
assert data["metadata"]["tool"] == "CodeSnap"
|
||||
assert data["metadata"]["version"] == "0.1.0"
|
||||
assert "timestamp" in data["metadata"]
|
||||
assert data["metadata"]["root_path"] == "/test/project"
|
||||
|
||||
def test_export_json_summary(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
json_output = export_json(result, root)
|
||||
data = json.loads(json_output)
|
||||
|
||||
assert data["summary"]["total_files"] == 1
|
||||
assert data["summary"]["total_functions"] == 1
|
||||
assert data["summary"]["total_classes"] == 1
|
||||
|
||||
def test_export_json_functions(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
json_output = export_json(result, root)
|
||||
data = json.loads(json_output)
|
||||
|
||||
assert len(data["files"]) == 1
|
||||
assert len(data["files"][0]["functions"]) == 1
|
||||
assert data["files"][0]["functions"][0]["name"] == "test_function"
|
||||
|
||||
def test_export_json_classes(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
json_output = export_json(result, root)
|
||||
data = json.loads(json_output)
|
||||
|
||||
assert len(data["files"][0]["classes"]) == 1
|
||||
assert data["files"][0]["classes"][0]["name"] == "TestClass"
|
||||
assert data["files"][0]["classes"][0]["bases"] == ["BaseClass"]
|
||||
|
||||
def test_export_json_file(self, tmp_path):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
output_file = tmp_path / "output.json"
|
||||
|
||||
export_json_file(result, root, output_file)
|
||||
|
||||
assert output_file.exists()
|
||||
|
||||
data = json.loads(output_file.read_text())
|
||||
assert "metadata" in data
|
||||
|
||||
|
||||
class TestMarkdownExporter:
|
||||
"""Tests for Markdown export functionality."""
|
||||
|
||||
def create_test_result(self):
|
||||
"""Create a test analysis result."""
|
||||
func = FunctionInfo(
|
||||
name="process_data",
|
||||
node_type="function_definition",
|
||||
start_line=5,
|
||||
end_line=15,
|
||||
parameters=[{"name": "data"}, {"name": "options"}],
|
||||
is_async=True,
|
||||
)
|
||||
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/project/utils.py"),
|
||||
language="python",
|
||||
size=300,
|
||||
lines=30,
|
||||
functions=[func],
|
||||
classes=[],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {
|
||||
"total_files": 1,
|
||||
"total_functions": 1,
|
||||
"total_classes": 0,
|
||||
"total_dependencies": 0,
|
||||
"languages": {"python": 1},
|
||||
}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0.05
|
||||
result.error_count = 0
|
||||
|
||||
return result
|
||||
|
||||
def test_export_markdown_header(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert "# CodeSnap Analysis Report" in md_output
|
||||
|
||||
def test_export_markdown_summary(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert "## Summary" in md_output
|
||||
assert "Total Files" in md_output
|
||||
assert "1" in md_output
|
||||
|
||||
def test_export_markdown_language_breakdown(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert "### Language Breakdown" in md_output
|
||||
assert "python" in md_output.lower()
|
||||
|
||||
def test_export_markdown_file_structure(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert "## File Structure" in md_output
|
||||
assert "```" in md_output
|
||||
|
||||
def test_export_markdown_functions(self):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert "process_data" in md_output
|
||||
assert "async" in md_output.lower()
|
||||
|
||||
def test_export_markdown_file(self, tmp_path):
|
||||
result = self.create_test_result()
|
||||
root = Path("/test/project")
|
||||
output_file = tmp_path / "output.md"
|
||||
|
||||
export_markdown_file(result, root, output_file)
|
||||
|
||||
assert output_file.exists()
|
||||
content = output_file.read_text()
|
||||
assert "# CodeSnap Analysis Report" in content
|
||||
|
||||
def test_empty_result(self):
|
||||
result = AnalysisResult()
|
||||
result.summary = {}
|
||||
result.files = []
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0
|
||||
result.error_count = 0
|
||||
|
||||
root = Path("/test")
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert "# CodeSnap Analysis Report" in md_output
|
||||
|
||||
|
||||
class TestLLMExporter:
|
||||
"""Tests for LLM-optimized export functionality."""
|
||||
|
||||
def test_estimate_tokens_python(self):
|
||||
text = "def hello():\n print('hello')"
|
||||
tokens = estimate_tokens(text, "python")
|
||||
|
||||
assert tokens > 0
|
||||
assert tokens < len(text)
|
||||
|
||||
def test_estimate_tokens_markdown(self):
|
||||
text = "# Heading\n\nSome content here."
|
||||
tokens = estimate_tokens(text, "markdown")
|
||||
|
||||
assert tokens > 0
|
||||
|
||||
def test_truncate_under_limit(self):
|
||||
text = "Short text"
|
||||
result = truncate_for_token_limit(text, 100, "markdown")
|
||||
|
||||
assert result == text
|
||||
|
||||
def test_truncate_over_limit(self):
|
||||
text = "A" * 1000
|
||||
result = truncate_for_token_limit(text, 100, "markdown")
|
||||
|
||||
assert len(result) < len(text)
|
||||
assert "[Output truncated due to token limit]" in result
|
||||
|
||||
def test_export_llm_optimized_structure(self):
|
||||
func = FunctionInfo(
|
||||
name="helper",
|
||||
node_type="function",
|
||||
start_line=1,
|
||||
end_line=5,
|
||||
)
|
||||
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/main.py"),
|
||||
language="python",
|
||||
size=100,
|
||||
lines=10,
|
||||
functions=[func],
|
||||
classes=[],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {
|
||||
"total_files": 1,
|
||||
"total_functions": 1,
|
||||
"total_classes": 0,
|
||||
"total_dependencies": 0,
|
||||
"languages": {"python": 1},
|
||||
}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0.01
|
||||
result.error_count = 0
|
||||
|
||||
root = Path("/test")
|
||||
output = export_llm_optimized(result, root)
|
||||
|
||||
assert "## CODEBASE ANALYSIS SUMMARY" in output
|
||||
assert "### STRUCTURE" in output
|
||||
assert "### KEY COMPONENTS" in output
|
||||
|
||||
def test_export_llm_with_max_tokens(self):
|
||||
func = FunctionInfo(
|
||||
name="test",
|
||||
node_type="function",
|
||||
start_line=1,
|
||||
end_line=5,
|
||||
)
|
||||
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/main.py"),
|
||||
language="python",
|
||||
size=100,
|
||||
lines=10,
|
||||
functions=[func],
|
||||
classes=[],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {
|
||||
"total_files": 1,
|
||||
"total_functions": 1,
|
||||
"total_classes": 0,
|
||||
"total_dependencies": 0,
|
||||
"languages": {"python": 1},
|
||||
}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0.01
|
||||
result.error_count = 0
|
||||
|
||||
root = Path("/test")
|
||||
output = export_llm_optimized(result, root, max_tokens=100)
|
||||
|
||||
tokens = estimate_tokens(output, "markdown")
|
||||
assert tokens <= 100 or "[Output truncated" in output
|
||||
|
||||
|
||||
class TestFormatterIntegration:
|
||||
"""Integration tests for formatters."""
|
||||
|
||||
def test_json_is_valid_json(self):
|
||||
func = FunctionInfo(name="test", node_type="func", start_line=1, end_line=10)
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/main.py"),
|
||||
language="python",
|
||||
size=100,
|
||||
lines=10,
|
||||
functions=[func],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {"total_files": 1}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0
|
||||
|
||||
root = Path("/test")
|
||||
|
||||
json_output = export_json(result, root)
|
||||
|
||||
data = json.loads(json_output)
|
||||
assert data is not None
|
||||
|
||||
def test_markdown_is_readable(self):
|
||||
func = FunctionInfo(name="test", node_type="func", start_line=1, end_line=10)
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/main.py"),
|
||||
language="python",
|
||||
size=100,
|
||||
lines=10,
|
||||
functions=[func],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {"total_files": 1}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0
|
||||
|
||||
root = Path("/test")
|
||||
|
||||
md_output = export_markdown(result, root)
|
||||
|
||||
assert md_output is not None
|
||||
assert len(md_output) > 0
|
||||
assert "#" in md_output
|
||||
|
||||
def test_llm_output_has_summary_first(self):
|
||||
func = FunctionInfo(name="test", node_type="func", start_line=1, end_line=10)
|
||||
file_analysis = FileAnalysis(
|
||||
path=Path("/test/main.py"),
|
||||
language="python",
|
||||
size=100,
|
||||
lines=10,
|
||||
functions=[func],
|
||||
)
|
||||
|
||||
result = AnalysisResult()
|
||||
result.summary = {"total_files": 1}
|
||||
result.files = [file_analysis]
|
||||
result.dependencies = []
|
||||
result.metrics = {}
|
||||
result.analysis_time = 0
|
||||
|
||||
root = Path("/test")
|
||||
|
||||
output = export_llm_optimized(result, root)
|
||||
|
||||
summary_pos = output.find("CODEBASE ANALYSIS SUMMARY")
|
||||
structure_pos = output.find("STRUCTURE")
|
||||
|
||||
assert summary_pos < structure_pos
|
||||
Reference in New Issue
Block a user