Add remaining test files: test_merge, test_sync, test_validate
This commit is contained in:
174
confsync/tests/test_validate.py
Normal file
174
confsync/tests/test_validate.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Tests for configuration validation system."""
|
||||
|
||||
import pytest
|
||||
|
||||
from confsync.core.validator import Validator
|
||||
from confsync.models.config_models import (
|
||||
ConfigFile,
|
||||
ConfigCategory,
|
||||
ValidationResult,
|
||||
ValidationIssue,
|
||||
Severity,
|
||||
)
|
||||
|
||||
|
||||
class TestValidator:
|
||||
"""Tests for the Validator class."""
|
||||
|
||||
def test_validate_valid_json(self):
|
||||
"""Test validating valid JSON."""
|
||||
validator = Validator()
|
||||
|
||||
config = ConfigFile(
|
||||
path="/test/settings.json",
|
||||
name="settings.json",
|
||||
category=ConfigCategory.EDITOR,
|
||||
tool_name="test",
|
||||
content='{"key": "value", "number": 42}',
|
||||
)
|
||||
|
||||
result = validator.validate_file(config)
|
||||
|
||||
assert result.is_valid
|
||||
assert result.validated_files == 1
|
||||
assert len(result.issues) == 0
|
||||
|
||||
def test_validate_invalid_json(self):
|
||||
"""Test validating invalid JSON."""
|
||||
validator = Validator()
|
||||
|
||||
config = ConfigFile(
|
||||
path="/test/settings.json",
|
||||
name="settings.json",
|
||||
category=ConfigCategory.EDITOR,
|
||||
tool_name="test",
|
||||
content='{"key": "value",}',
|
||||
)
|
||||
|
||||
result = validator.validate_file(config)
|
||||
|
||||
assert not result.is_valid
|
||||
assert len(result.issues) > 0
|
||||
assert result.issues[0].severity == Severity.ERROR
|
||||
|
||||
def test_validate_valid_yaml(self):
|
||||
"""Test validating valid YAML."""
|
||||
validator = Validator()
|
||||
|
||||
config = ConfigFile(
|
||||
path="/test/config.yaml",
|
||||
name="config.yaml",
|
||||
category=ConfigCategory.EDITOR,
|
||||
tool_name="test",
|
||||
content='key: value\nnumber: 42',
|
||||
)
|
||||
|
||||
result = validator.validate_file(config)
|
||||
|
||||
assert result.is_valid
|
||||
|
||||
def test_validate_valid_toml(self):
|
||||
"""Test validating valid TOML."""
|
||||
validator = Validator()
|
||||
|
||||
config = ConfigFile(
|
||||
path="/test/pyproject.toml",
|
||||
name="pyproject.toml",
|
||||
category=ConfigCategory.EDITOR,
|
||||
tool_name="test",
|
||||
content='[project]\nname = "test"\nversion = "1.0.0"',
|
||||
)
|
||||
|
||||
result = validator.validate_file(config)
|
||||
|
||||
assert result.is_valid
|
||||
|
||||
def test_validate_shell_script_syntax(self):
|
||||
"""Test shell script syntax validation."""
|
||||
validator = Validator()
|
||||
|
||||
config = ConfigFile(
|
||||
path="/test/.bashrc",
|
||||
name=".bashrc",
|
||||
category=ConfigCategory.SHELL,
|
||||
tool_name="bash",
|
||||
content='export PATH=$PATH:/usr/local/bin\nalias ll="ls -la"',
|
||||
)
|
||||
|
||||
result = validator.validate_file(config)
|
||||
|
||||
assert result.is_valid
|
||||
|
||||
|
||||
class TestValidationManifest:
|
||||
"""Tests for manifest-level validation."""
|
||||
|
||||
def test_validate_manifest_multiple_files(self):
|
||||
"""Test validating multiple configuration files."""
|
||||
validator = Validator()
|
||||
|
||||
configs = [
|
||||
ConfigFile(
|
||||
path="/test/file1.json",
|
||||
name="file1.json",
|
||||
category=ConfigCategory.EDITOR,
|
||||
tool_name="test",
|
||||
content='{"key": "value"}',
|
||||
),
|
||||
ConfigFile(
|
||||
path="/test/file2.yaml",
|
||||
name="file2.yaml",
|
||||
category=ConfigCategory.EDITOR,
|
||||
tool_name="test",
|
||||
content='key: value',
|
||||
),
|
||||
]
|
||||
|
||||
result = validator.validate_manifest(configs)
|
||||
|
||||
assert result.is_valid
|
||||
assert result.validated_files == 2
|
||||
|
||||
|
||||
class TestValidationReport:
|
||||
"""Tests for validation report generation."""
|
||||
|
||||
def test_generate_report_no_issues(self):
|
||||
"""Test generating report with no issues."""
|
||||
validator = Validator()
|
||||
result = ValidationResult(is_valid=True, validated_files=5)
|
||||
|
||||
report = validator.generate_report(result)
|
||||
|
||||
assert "No issues found" in report
|
||||
assert "VALID" in report
|
||||
|
||||
def test_generate_report_with_issues(self):
|
||||
"""Test generating report with issues."""
|
||||
validator = Validator()
|
||||
result = ValidationResult(is_valid=False, validated_files=3)
|
||||
|
||||
result.add_issue(ValidationIssue(
|
||||
rule="test_error",
|
||||
message="Test error message",
|
||||
severity=Severity.ERROR,
|
||||
file_path="/test/file.json",
|
||||
))
|
||||
|
||||
result.add_issue(ValidationIssue(
|
||||
rule="test_warning",
|
||||
message="Test warning message",
|
||||
severity=Severity.WARNING,
|
||||
file_path="/test/file.yaml",
|
||||
suggestion="Fix this",
|
||||
))
|
||||
|
||||
report = validator.generate_report(result)
|
||||
|
||||
assert "ERRORS" in report
|
||||
assert "WARNINGS" in report
|
||||
assert "Test error message" in report
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pytest.main([__file__, "-v"])
|
||||
Reference in New Issue
Block a user