From 59790a3f969b18963e5363bdc16bcbe011f494e6 Mon Sep 17 00:00:00 2001 From: 7000pctAUTO Date: Tue, 3 Feb 2026 10:30:10 +0000 Subject: [PATCH] Initial upload of ai-code-audit-cli project --- src/reporting/confidence.py | 145 ++++++++++++++++++++++++++++++++++++ 1 file changed, 145 insertions(+) create mode 100644 src/reporting/confidence.py diff --git a/src/reporting/confidence.py b/src/reporting/confidence.py new file mode 100644 index 0000000..e16c10b --- /dev/null +++ b/src/reporting/confidence.py @@ -0,0 +1,145 @@ +"""Confidence scoring engine for AI Code Audit CLI.""" + +from typing import Optional + +from ..core.models import ScanResult, Issue, IssueCategory, SeverityLevel + + +class ConfidenceScorer: + """Calculate confidence scores based on scan results.""" + + SECURITY_WEIGHT = 3.0 + ERROR_HANDLING_WEIGHT = 2.0 + CODE_QUALITY_WEIGHT = 1.5 + ANTI_PATTERN_WEIGHT = 1.2 + COMPLEXITY_WEIGHT = 1.0 + STYLE_WEIGHT = 0.5 + + SEVERITY_MULTIPLIERS = { + SeverityLevel.CRITICAL: 5.0, + SeverityLevel.HIGH: 3.0, + SeverityLevel.MEDIUM: 1.5, + SeverityLevel.LOW: 0.5, + } + + def __init__(self): + """Initialize the confidence scorer.""" + self.base_score = 100 + + def calculate(self, result: ScanResult) -> int: + """Calculate confidence score (0-100) based on scan results.""" + if result.files_scanned == 0: + return 100 + + deductions = self._calculate_deductions(result) + score = max(0, min(100, self.base_score - deductions)) + return int(score) + + def _calculate_deductions(self, result: ScanResult) -> float: + """Calculate total deductions from issues found.""" + total_deduction = 0.0 + + for issue in result.issues: + category_weight = self._get_category_weight(issue.category) + severity_multiplier = self.SEVERITY_MULTIPLIERS.get( + issue.severity, 1.0 + ) + + deduction = category_weight * severity_multiplier + + if issue.category == IssueCategory.SECURITY: + deduction *= 1.5 + + total_deduction += deduction + + deduction_per_file = total_deduction / max(1, result.files_scanned) + + return deduction_per_file + + def _get_category_weight(self, category: IssueCategory) -> float: + """Get weight for an issue category.""" + weights = { + IssueCategory.SECURITY: self.SECURITY_WEIGHT, + IssueCategory.ERROR_HANDLING: self.ERROR_HANDLING_WEIGHT, + IssueCategory.CODE_QUALITY: self.CODE_QUALITY_WEIGHT, + IssueCategory.ANTI_PATTERN: self.ANTI_PATTERN_WEIGHT, + IssueCategory.COMPLEXITY: self.COMPLEXITY_WEIGHT, + IssueCategory.STYLE: self.STYLE_WEIGHT, + } + return weights.get(category, 1.0) + + def get_score_breakdown(self, result: ScanResult) -> dict: + """Get detailed breakdown of the score calculation.""" + breakdown = { + "base_score": self.base_score, + "total_deductions": 0.0, + "issues_by_category": {}, + "issues_by_severity": {}, + "final_score": self.calculate(result), + } + + category_deductions = {} + severity_deductions = {} + + for issue in result.issues: + category_weight = self._get_category_weight(issue.category) + severity_multiplier = self.SEVERITY_MULTIPLIERS.get(issue.severity, 1.0) + deduction = category_weight * severity_multiplier + + if issue.category.value not in category_deductions: + category_deductions[issue.category.value] = 0.0 + category_deductions[issue.category.value] += deduction + + if issue.severity.value not in severity_deductions: + severity_deductions[issue.severity.value] = 0.0 + severity_deductions[issue.severity.value] += deduction + + breakdown["issues_by_category"] = category_deductions + breakdown["issues_by_severity"] = severity_deductions + breakdown["total_deductions"] = sum(category_deductions.values()) + + return breakdown + + def get_score_grade(self, score: int) -> str: + """Get a letter grade for the confidence score.""" + if score >= 95: + return "A+" + elif score >= 90: + return "A" + elif score >= 85: + return "A-" + elif score >= 80: + return "B+" + elif score >= 75: + return "B" + elif score >= 70: + return "B-" + elif score >= 65: + return "C+" + elif score >= 60: + return "C" + elif score >= 55: + return "C-" + elif score >= 50: + return "D+" + elif score >= 45: + return "D" + elif score >= 40: + return "D-" + else: + return "F" + + def get_score_description(self, score: int) -> str: + """Get a description for the confidence score.""" + if score >= 90: + return "Excellent - Code is well-written and secure" + elif score >= 75: + return "Good - Code is generally sound with minor issues" + elif score >= 60: + return "Fair - Code has some issues that should be addressed" + elif score >= 45: + return "Poor - Code has significant issues requiring attention" + elif score >= 30: + return "Bad - Code has serious issues and security concerns" + else: + return "Critical - Code requires immediate review and fixes"