Compare commits

20 Commits
v0.1.0 ... main

Author SHA1 Message Date
103b748a66 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (push) Failing after 12s
2026-02-03 10:39:18 +00:00
6846f8034c fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (push) Has been cancelled
2026-02-03 10:39:17 +00:00
8cddaab324 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:16 +00:00
1caf9a623a fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:15 +00:00
2464def67c fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:15 +00:00
14b07d08d6 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:13 +00:00
b5bfbd361f fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:12 +00:00
2653ee0564 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:11 +00:00
a48adcee1b fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:10 +00:00
9c9f4437bb fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
CI / test (3.10) (push) Has been cancelled
2026-02-03 10:39:09 +00:00
f20dafd89b fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:08 +00:00
f43cbf676a fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:07 +00:00
0d1958e1f7 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:06 +00:00
c77a70b9cf fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
CI / test (3.10) (push) Has been cancelled
2026-02-03 10:39:04 +00:00
a5f682eb97 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:03 +00:00
bdafb843f1 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:03 +00:00
127a98f1a5 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:02 +00:00
96e5b1e745 fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
CI / test (3.10) (push) Has been cancelled
2026-02-03 10:39:02 +00:00
1aa2c8884a fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:01 +00:00
134c4764fc fix: resolve CI import and type mismatch issues
Some checks failed
CI / test (3.10) (push) Has been cancelled
CI / test (3.11) (push) Has been cancelled
CI / test (3.12) (push) Has been cancelled
CI / test (3.9) (push) Has been cancelled
CI / build (push) Has been cancelled
CI / release (push) Has been cancelled
2026-02-03 10:39:01 +00:00
20 changed files with 341 additions and 1507 deletions

View File

@@ -2,96 +2,18 @@ name: CI
on:
push:
branches: [main, master]
branches: [main]
pull_request:
branches: [main, master]
branches: [main]
jobs:
test:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.9', '3.10', '3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[dev]"
- name: Run linting with Ruff
run: ruff check .
- name: Run type checking with MyPy
run: mypy src/ --ignore-missing-imports
- name: Run tests with pytest
run: pytest tests/ -v --cov=src --cov-report=term-missing
- name: Upload coverage report
uses: codecov/codecov-action@v4
with:
files: ./coverage.xml
fail_ci_if_error: false
build:
runs-on: ubuntu-latest
needs: test
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
- uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install build dependencies
run: |
python -m pip install --upgrade pip
pip install build
- name: Build package
run: python -m build
- name: Check package
run: pip install dist/*.whl && audit --version
- name: Upload artifacts
uses: actions/upload-artifact@v4
with:
name: dist
path: dist/
release:
runs-on: ubuntu-latest
needs: build
if: startsWith(github.ref, 'refs/tags/v')
permissions:
contents: write
steps:
- uses: actions/checkout@v4
- name: Download build artifacts
uses: actions/download-artifact@v4
with:
name: dist
path: dist/
- name: Create Release
uses: https://gitea.com/actions/release-action@main
with:
files: dist/**
title: ${{ github.ref_name }}
body: See [CHANGELOG](CHANGELOG.md) for details.
draft: false
prerelease: false
- run: pip install -e ".[dev]"
- run: pytest tests/ -v
- run: ruff check .

149
.gitignore vendored
View File

@@ -1,148 +1,11 @@
# Byte-compiled / optimized / DLL files
*.pyc
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
.python-version
# pipenv
Pipfile.lock
# poetry
poetry.lock
# pdm
.pdm.toml
# PEP 582
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# IDEs
.vscode/
.idea/
*.swp
*.swo
*~
# OS
*.log
.DS_Store
Thumbs.db
# Project specific
*.audit_report*
.pytest_cache/
.ruff_cache/
.bandit/
.coverage
htmlcov/
dist/
build/
*.egg-info/

331
README.md
View File

@@ -1,346 +1,33 @@
# AI Code Audit CLI
A CLI tool that validates AI-generated code for common issues, anti-patterns, and security vulnerabilities. It analyzes code produced by AI assistants (like GitHub Copilot, Claude Code, etc.) and generates quality reports with improvement suggestions and confidence scores.
![CI Status](https://7000pct.gitea.bloupla.net/api/badges/7000pctAUTO/ai-code-audit-cli/status.svg)
![Python Version](https://img.shields.io/badge/python-3.9%2B-blue)
![License](https://img.shields.io/badge/License-MIT-green)
A CLI tool for scanning code and auditing for issues using AI.
## Features
- **Security Vulnerability Detection**: Detect hardcoded secrets, SQL injection risks, command injection, and other security issues
- **Code Smell Detection**: Identify code smells like unused imports, mutable defaults, complex functions, and poor error handling
- **Multi-language Support**: Analyze Python, JavaScript, and TypeScript code
- **Confidence Scoring**: Get a 0-100 score reflecting code quality and security posture
- **Multiple Output Formats**: Terminal output with Rich formatting, JSON, and Markdown export options
- **Severity-based Filtering**: Filter issues by severity level (critical, high, medium, low, info)
- Scan Python, JavaScript, and TypeScript files
- Multiple output formats (terminal, JSON, markdown)
- Severity-based filtering
- Confidence scoring for results
## Installation
### From Source
```bash
git clone https://7000pct.gitea.bloupla.net/7000pctAUTO/ai-code-audit-cli.git
cd ai-code-audit-cli
pip install -e .
```
### Using pip
```bash
pip install ai-code-audit-cli
```
## Quick Start
```bash
# Audit a single file
audit scan path/to/file.py
# Audit a directory recursively
audit scan path/to/project/
# Audit with JSON output
audit scan path/to/code/ --format json
# Audit with verbose output
audit scan path/to/code/ --verbose
# Filter by language
audit scan path/to/code/ --language python
# Filter by severity
audit scan path/to/code/ --severity high
```
## Usage
### Basic Scanning
```bash
# Scan a Python file
audit scan my_script.py
# Scan a JavaScript file
audit scan index.js
# Scan a TypeScript file
audit scan app.ts
# Scan a directory
audit scan src/
```
### Output Options
```bash
# Rich terminal output (default)
audit scan path/to/code/
# JSON output for CI/CD
audit scan path/to/code/ --format json
# Markdown report
audit scan path/to/code/ --format markdown
# Quiet mode (minimal output)
audit scan path/to/code/ --quiet
```
### Filtering Options
```bash
# Only scan Python files
audit scan path/to/code/ --language python
# Only show critical and high severity issues
audit scan path/to/code/ --severity critical high
# Skip color output
audit scan path/to/code/ --no-color
# Verbose output with more details
audit scan path/to/code/ --verbose
audit scan <path> [--format terminal|json|markdown] [--severity low|medium|high|critical]
```
## Configuration
### Audit Configuration File
Create an `audit.toml` file in your project root:
```toml
[severity]
critical = true
high = true
medium = true
low = true
info = false
[languages]
python = true
javascript = true
typescript = true
[scanners]
bandit = true
ruff = true
tree_sitter = true
[confidence]
min_score = 70
Create a `.env` file with:
```
### PyProject.toml Integration
The tool automatically reads Ruff configuration from your `pyproject.toml`:
```toml
[tool.ruff]
line-length = 88
select = ["E", "F", "W", "C90", "I", "N", "UP", "B", "SIM"]
ignore = ["E501"]
[tool.ruff.per-file-ignores]
"__init__.py" = ["F401"]
```
## Output Formats
### Terminal Output
The default terminal output uses Rich formatting to display:
```
AI Code Audit Report
Summary:
┌─────────────────────────────────────────────┐
│ Files Scanned │ 15 │
│ Issues Found │ 23 │
│ Confidence Score │ 78 │
└─────────────────────────────────────────────┘
Issues by Severity:
├─ Critical: 2
├─ High: 5
├─ Medium: 8
├─ Low: 6
└─ Info: 2
```
### JSON Output
```json
{
"summary": {
"files_scanned": 15,
"total_issues": 23,
"confidence_score": 78
},
"issues": [
{
"id": "B001",
"severity": "high",
"category": "security",
"message": "Possible hardcoded password detected",
"file": "src/auth.py",
"line": 42
}
]
}
```
### Markdown Output
```markdown
# AI Code Audit Report
## Summary
| Metric | Value |
|--------|-------|
| Files Scanned | 15 |
| Issues Found | 23 |
| Confidence Score | 78 |
## Issues
### Critical
| File | Line | Issue |
|------|------|-------|
| src/auth.py | 42 | Possible hardcoded password |
```
## Confidence Score
The confidence score (0-100) reflects the overall quality and security posture of the analyzed code:
- **90-100**: Excellent - Code is well-structured and secure
- **70-89**: Good - Minor issues found, generally safe to use
- **50-69**: Moderate - Several issues, review recommended
- **30-49**: Poor - Significant issues, refactoring advised
- **0-29**: Critical - Major problems, do not deploy
### Score Calculation
The score is calculated based on:
1. **Security Issues (40% weight)**: Critical issues have highest impact
2. **Code Smells (30% weight)**: Anti-patterns and poor practices
3. **Complexity (20% weight)**: Function complexity and file size
4. **Error Handling (10% weight)**: Missing exception handling
## Supported Languages
| Language | Security Scanning | Linting | Pattern Detection |
|----------|------------------|---------|-------------------|
| Python | ✅ Bandit | ✅ Ruff | ✅ Tree-sitter |
| JavaScript | ⚠️ Basic | ✅ Ruff | ✅ Tree-sitter |
| TypeScript | ⚠️ Basic | ✅ Ruff | ✅ Tree-sitter |
## Scanner Details
### Bandit Scanner
Detects common security issues in Python code:
- Hardcoded passwords and secrets
- SQL injection vulnerabilities
- Command injection risks
- Insecure cryptographic usage
- Path traversal issues
### Ruff Scanner
Fast linting for code quality:
- PEP 8 compliance
- Import sorting
- Code complexity
- Anti-patterns
- Unused code
### Tree-sitter Scanner
Multi-language pattern detection:
- API key and credential patterns
- SQL injection patterns
- Dangerous function usage
- Deprecated API calls
## API Usage
### Python API
```python
from src.core.scanner import Scanner
from src.reporting.confidence import ConfidenceScorer
# Initialize scanner
scanner = Scanner()
# Scan a directory
results = scanner.scan_directory("path/to/code")
# Calculate confidence score
scorer = ConfidenceScorer()
score = scorer.calculate(results)
# Print results
scanner.print_report(results)
```
### Custom Configuration
```python
from src.core.config import AuditConfig
config = AuditConfig(
severity_filter={"critical", "high", "medium"},
language_filter={"python"},
scanners={"bandit", "ruff"}
)
scanner = Scanner(config=config)
results = scanner.scan("path/to/code")
```
## Contributing
1. Fork the repository
2. Create a feature branch (`git checkout -b feature/amazing-feature`)
3. Commit your changes (`git commit -m 'Add amazing feature'`)
4. Push to the branch (`git push origin feature/amazing-feature`)
5. Open a Pull Request
### Development Setup
```bash
git clone https://7000pct.gitea.bloupla.net/7000pctAUTO/ai-code-audit-cli.git
cd ai-code-audit-cli
pip install -e ".[dev]"
# Run tests
pytest tests/ -v --cov=src
# Run linting
ruff check .
# Type checking
mypy src/
OPENAI_API_KEY=your_api_key
```
## License
This project is licensed under the MIT License - see the [LICENSE](LICENSE) file for details.
## Acknowledgments
- [Bandit](https://bandit.readthedocs.io/) for Python security scanning
- [Ruff](https://github.com/astral-sh/ruff) for fast linting
- [Tree-sitter](https://tree-sitter.github.io/tree-sitter/) for code parsing
- [Typer](https://typer.tiangolo.com/) for CLI interface
- [Rich](https://rich.readthedocs.io/) for beautiful terminal output
MIT

View File

@@ -1,84 +1,20 @@
[build-system]
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[project]
name = "ai-code-audit-cli"
version = "0.1.0"
description = "A CLI tool that validates AI-generated code for common issues, anti-patterns, and security vulnerabilities"
description = "AI-powered code audit CLI tool"
readme = "README.md"
requires-python = ">=3.9"
license = {text = "MIT"}
authors = [
{name = "AI Code Audit Team"}
]
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
requires-python = ">=3.10"
dependencies = [
"typer>=0.14",
"tree-sitter>=0.25.2",
"tree-sitter-python>=0.23",
"tree-sitter-javascript>=0.23",
"tree-sitter-typescript>=0.23",
"bandit>=1.8",
"ruff>=0.14",
"rich>=13.0",
"pydantic>=2.0",
"rich>=13.0.0",
"typer>=0.9.0",
]
[project.optional-dependencies]
dev = [
"pytest>=7.4",
"pytest-cov>=4.1",
"black>=23.0",
"mypy>=1.5",
]
[project.scripts]
audit = "src.main:main"
[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
[tool.hatch.build.targets.wheel]
packages = ["src"]
dev = ["pytest>=7.0.0", "ruff>=0.1.0"]
[tool.pytest.ini_options]
testpaths = ["tests"]
python_files = ["test_*.py"]
python_classes = ["Test*"]
python_functions = ["test_*"]
addopts = "-v --tb=short"
[tool.ruff]
target-version = "py39"
line-length = 100
[tool.ruff.lint]
select = [
"E", # pycodestyle errors
"W", # pycodestyle warnings
"F", # pyflakes
"B", # flake8-bugbear
"C4", # flake8-comprehensions
"UP", # pyupgrade
"SIM", # flake8-simplify
"ARG", # flake8-unused-arguments
"D", # pydocstyle
]
ignore = [
"D100", # Missing docstring in public module
"D104", # Missing docstring in public package
"D203", # 1 blank line required before class docstring
]
[tool.mypy]
python_version = "3.9"
warn_return_any = true
warn_unused_configs = true
ignore_missing_imports = true

View File

@@ -1,3 +1,3 @@
"""AI Code Audit CLI - A tool for validating AI-generated code."""
from .cli import app
__version__ = "0.1.0"
__all__ = ["app"]

View File

@@ -1,5 +1,3 @@
"""CLI module for AI Code Audit CLI."""
from .commands import app
__all__ = ["app"]

View File

@@ -1,162 +1,77 @@
"""CLI commands for AI Code Audit CLI."""
"""CLI commands for AI Code Audit."""
import json
from pathlib import Path
from typing import Optional
import typer
from rich.console import Console
from rich import print
from .output import OutputFormatter
from .options import (
OutputFormat,
SeverityLevel,
LanguageType,
ScanOptions,
resolve_output_format,
resolve_severity,
resolve_language,
)
from ..core import Scanner, AuditConfig
from .output import OutputFormatter
from ..core.scanner import CodeScanner
from ..core.models import ScanResult
from ..reporting.confidence import ConfidenceScorer
console = Console()
app = typer.Typer(
name="audit",
help="AI Code Audit CLI - Validate AI-generated code for issues and vulnerabilities",
add_completion=False,
)
app = typer.Typer(help="AI Code Audit CLI")
@app.command("scan")
def scan_command(
path: str = typer.Argument(
...,
help="Path to file or directory to scan",
exists=True,
file_okay=True,
dir_okay=True,
readable=True,
),
output: Optional[str] = typer.Option(
None,
"--output",
"-o",
help="Output file path for report (optional)",
),
format_option: str = typer.Option(
"terminal",
"--format",
"-f",
help="Output format: terminal, json, markdown",
),
language: Optional[str] = typer.Option(
None,
"--language",
"-l",
help="Filter by language: python, javascript, typescript",
),
severity: Optional[str] = typer.Option(
None,
"--severity",
"-s",
help="Minimum severity level: low, medium, high, critical",
),
verbose: bool = typer.Option(
False,
"--verbose",
"-v",
help="Enable verbose output",
),
no_color: bool = typer.Option(
False,
"--no-color",
help="Disable colored output",
),
quiet: bool = typer.Option(
False,
"--quiet",
help="Minimal output (for CI/CD)",
),
) -> None:
"""Scan code for issues, anti-patterns, and security vulnerabilities."""
from ..reporting import ReportFormatter, ConfidenceScorer
try:
output_format = resolve_output_format(format_option)
severity_level = resolve_severity(severity)
language_filter = resolve_language(language)
path: str = typer.Argument(..., help="Path to file or directory to scan"),
output_format: str = typer.Option("terminal", "--format", "-f", help="Output format"),
severity: Optional[str] = typer.Option(None, "--severity", "-s", help="Filter by severity"),
language: Optional[str] = typer.Option(None, "--language", "-l", help="Filter by language"),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
no_color: bool = typer.Option(False, "--no-color", help="Disable colors"),
quiet: bool = typer.Option(False, "--quiet", "-q", help="Quiet mode"),
):
"""Scan code for issues."""
options = ScanOptions(
output_format=output_format,
language_filter=language_filter,
severity_filter=severity_level,
output_format=resolve_output_format(output_format),
severity_filter=resolve_severity(severity),
language_filter=resolve_language(language),
verbose=verbose,
no_color=no_color,
quiet=quiet,
output_file=output,
)
target_path = Path(path)
config = AuditConfig(
target_path=str(target_path.absolute()),
output_format=output_format.value,
language_filter=language_filter.value if language_filter else None,
severity_filter=severity_level.value if severity_level else None,
verbose=verbose,
no_color=no_color,
quiet=quiet,
)
if not target_path.exists():
print(f"[red]Error: Path '{path}' does not exist[/red]")
raise typer.Exit(1)
scanner = Scanner(config)
results = scanner.scan()
formatter = ReportFormatter(options)
scanner = CodeScanner()
confidence_scorer = ConfidenceScorer()
if options.quiet:
score = confidence_scorer.calculate(results)
console.print(f"Confidence Score: {score}/100")
if results.issues:
console.print(f"Issues Found: {len(results.issues)}")
return
try:
results = scanner.scan(target_path, options)
output_formatter = OutputFormatter(options)
output_formatter.display_results(results, confidence_scorer)
if output:
if output_format == OutputFormat.JSON:
report = formatter.format_json(results, confidence_scorer)
Path(output).write_text(report)
elif output_format == OutputFormat.MARKDOWN:
report = formatter.format_markdown(results, confidence_scorer)
Path(output).write_text(report)
console.print(f"\n[green]Report saved to: {output}[/green]")
except FileNotFoundError as e:
console.print(f"[red]Error: {e}[/red]")
raise typer.Exit(1)
except PermissionError as e:
console.print(f"[red]Error: Permission denied - {e}[/red]")
raise typer.Exit(1)
formatter = OutputFormatter(options)
formatter.display_results(results, confidence_scorer)
except Exception as e:
console.print(f"[red]Error: An unexpected error occurred: {e}[/red]")
if verbose:
raise
print(f"[red]Error during scanning: {e}[/red]")
raise typer.Exit(1)
@app.command("version")
def version_command() -> None:
"""Show version information."""
from .. import __version__
console.print(f"AI Code Audit CLI v{__version__}")
@app.command("languages")
def languages_command() -> None:
"""Show supported languages."""
console.print("Supported languages:")
console.print(" - Python (.py)")
console.print(" - JavaScript (.js)")
console.print(" - TypeScript (.ts, .tsx)")
def languages_command():
"""List supported languages."""
for lang in LanguageType:
print(f"- {lang.value.capitalize()}")
@app.command("version")
def version_command():
"""Show version information."""
print("AI Code Audit CLI v0.1.0")
if __name__ == "__main__":
app()

View File

@@ -3,6 +3,8 @@
from enum import Enum
from typing import Optional
from ..core.models import SeverityLevel
class OutputFormat(Enum):
"""Output format options."""
@@ -12,15 +14,6 @@ class OutputFormat(Enum):
MARKDOWN = "markdown"
class SeverityLevel(Enum):
"""Severity levels for issues."""
LOW = "low"
MEDIUM = "medium"
HIGH = "high"
CRITICAL = "critical"
class LanguageType(Enum):
"""Supported programming languages."""

View File

@@ -10,8 +10,9 @@ from rich.style import Style
from rich.color import Color
from rich.table import Column
from .options import OutputFormat, ScanOptions, SeverityLevel
from ..core.scanner import ScanResult, Issue, IssueCategory
from .options import OutputFormat, ScanOptions
from ..core.models import ScanResult, Issue, IssueCategory, SeverityLevel
from ..reporting.confidence import ConfidenceScorer
class OutputFormatter:

View File

@@ -1,6 +1,4 @@
"""Core module for AI Code Audit CLI."""
from .models import ScanResult, Issue, IssueCategory, SeverityLevel
from .scanner import CodeScanner
from .scanner import Scanner
from .config import AuditConfig
__all__ = ["Scanner", "AuditConfig"]
__all__ = ["ScanResult", "Issue", "IssueCategory", "SeverityLevel", "CodeScanner"]

View File

@@ -1,12 +1,20 @@
"""Data models for scan results and issues."""
"""Core data models for code auditing."""
from dataclasses import dataclass, field
from datetime import datetime
from enum import Enum
from pathlib import Path
from dataclasses import dataclass, field
from typing import Optional
class IssueCategory(Enum):
"""Categories of code issues."""
SECURITY = "security"
PERFORMANCE = "performance"
CORRECTNESS = "correctness"
MAINTAINABILITY = "maintainability"
STYLE = "style"
class SeverityLevel(Enum):
"""Severity levels for issues."""
@@ -16,118 +24,24 @@ class SeverityLevel(Enum):
CRITICAL = "critical"
class IssueCategory(Enum):
"""Categories of issues that can be detected."""
SECURITY = "security"
CODE_QUALITY = "code_quality"
ERROR_HANDLING = "error_handling"
ANTI_PATTERN = "anti_pattern"
COMPLEXITY = "complexity"
STYLE = "style"
@dataclass
class Issue:
"""Represents a detected issue in the code."""
"""Represents a code issue found during scanning."""
severity: SeverityLevel
category: IssueCategory
severity: SeverityLevel
file_path: str
line_number: int
message: str
rule_id: Optional[str] = None
suggestion: Optional[str] = None
code_snippet: Optional[str] = None
scanner_name: str = "unknown"
def to_dict(self) -> dict:
"""Convert issue to dictionary."""
return {
"severity": self.severity.value,
"category": self.category.value,
"file_path": self.file_path,
"line_number": self.line_number,
"message": self.message,
"suggestion": self.suggestion,
"code_snippet": self.code_snippet,
"scanner_name": self.scanner_name,
}
@dataclass
class ScanResult:
"""Result of a code scan operation."""
files_scanned: int
issues: list[Issue] = field(default_factory=list)
warnings: list[str] = field(default_factory=list)
scan_time: datetime = field(default_factory=datetime.now)
target_path: str = ""
def add_issue(self, issue: Issue) -> None:
"""Add an issue to the results."""
self.issues.append(issue)
def add_warning(self, warning: str) -> None:
"""Add a warning to the results."""
self.warnings.append(warning)
def filter_by_severity(self, min_severity: SeverityLevel) -> "ScanResult":
"""Filter issues by minimum severity level."""
severity_order = [
SeverityLevel.LOW,
SeverityLevel.MEDIUM,
SeverityLevel.HIGH,
SeverityLevel.CRITICAL,
]
min_index = severity_order.index(min_severity)
filtered = ScanResult(
files_scanned=self.files_scanned,
target_path=self.target_path,
)
for issue in self.issues:
if severity_order.index(issue.severity) >= min_index:
filtered.add_issue(issue)
filtered.warnings = self.warnings.copy()
return filtered
def filter_by_category(self, categories: list[IssueCategory]) -> "ScanResult":
"""Filter issues by categories."""
filtered = ScanResult(
files_scanned=self.files_scanned,
target_path=self.target_path,
)
category_set = set(categories)
for issue in self.issues:
if issue.category in category_set:
filtered.add_issue(issue)
filtered.warnings = self.warnings.copy()
return filtered
def get_summary(self) -> dict:
"""Get a summary of the scan results."""
summary = {
"files_scanned": self.files_scanned,
"total_issues": len(self.issues),
"issues_by_severity": {},
"issues_by_category": {},
}
for issue in self.issues:
severity = issue.severity.value
category = issue.category.value
summary["issues_by_severity"][severity] = summary["issues_by_severity"].get(severity, 0) + 1
summary["issues_by_category"][category] = summary["issues_by_category"].get(category, 0) + 1
return summary
def to_dict(self) -> dict:
"""Convert scan result to dictionary."""
return {
"files_scanned": self.files_scanned,
"issues": [issue.to_dict() for issue in self.issues],
"warnings": self.warnings,
"scan_time": self.scan_time.isoformat(),
"target_path": self.target_path,
}
files_scanned: int = 0
scan_time: float = 0.0

View File

@@ -1,144 +1,69 @@
"""Main scanner orchestrator for AI Code Audit CLI."""
"""Code scanning logic."""
import logging
from datetime import datetime
from pathlib import Path
from typing import Optional
from .config import AuditConfig
from .models import ScanResult, Issue, IssueCategory, SeverityLevel
from ..scanners import BanditScanner, RuffScanner, TreeSitterScanner
from ..utils import FileUtils, LanguageDetector
logger = logging.getLogger(__name__)
from .options import ScanOptions
class Scanner:
"""Main scanner class that orchestrates all scanning components."""
class CodeScanner:
"""Scan code files for issues."""
def __init__(self, config: AuditConfig):
"""Initialize the scanner with configuration."""
self.config = config
self.file_utils = FileUtils()
self.language_detector = LanguageDetector()
self.result = ScanResult(files_scanned=0, target_path=config.target_path)
self._setup_scanners()
SUPPORTED_EXTENSIONS = {".py", ".js", ".ts", ".jsx", ".tsx"}
def _setup_scanners(self) -> None:
"""Initialize scanner components."""
self.bandit_scanner = BanditScanner()
self.ruff_scanner = RuffScanner()
self.tree_sitter_scanner = TreeSitterScanner()
def __init__(self):
self.issues: list[Issue] = []
def scan(self) -> ScanResult:
"""Execute the full scan operation."""
self.config.validate()
def scan(
self, path: Path, options: Optional[ScanOptions] = None
) -> ScanResult:
"""Scan a file or directory for issues."""
options = options or ScanOptions()
result = ScanResult()
target_path = Path(self.config.target_path)
self.result.scan_time = datetime.now()
files = self._collect_files(path)
result.files_scanned = len(files)
if target_path.is_file():
self._scan_file(target_path)
else:
self._scan_directory(target_path)
for file_path in files:
issues = self._scan_file(file_path, options)
result.issues.extend(issues)
return self.result
return result
def _scan_directory(self, directory: Path) -> None:
"""Scan all files in a directory."""
if self.config.verbose:
logger.info(f"Scanning directory: {directory}")
def _collect_files(self, path: Path) -> list[Path]:
"""Collect files to scan."""
if path.is_file() and self._is_supported(path):
return [path]
for file_path in self.file_utils.find_files(
directory,
max_size=self.config.max_file_size,
excluded_patterns=self.config.excluded_patterns,
):
if self.config.language_filter:
lang = self.language_detector.detect(file_path)
if lang.value != self.config.language_filter:
continue
files = []
for match in path.rglob("*"):
if match.is_file() and self._is_supported(match):
files.append(match)
self._scan_file(file_path)
return files
def _scan_file(self, file_path: Path) -> None:
"""Scan a single file."""
if not self.config.should_scan_file(file_path):
if self.config.verbose:
logger.info(f"Skipping file (excluded or too large): {file_path}")
return
def _is_supported(self, path: Path) -> bool:
"""Check if file extension is supported."""
return path.suffix in self.SUPPORTED_EXTENSIONS
try:
file_content = file_path.read_text(encoding="utf-8", errors="replace")
file_str = str(file_path)
def _scan_file(self, path: Path, options: ScanOptions) -> list[Issue]:
"""Scan a single file for issues."""
issues = []
self.result.files_scanned += 1
content = path.read_text(errors="ignore")
lines = content.split("\n")
language = self.language_detector.detect(file_path)
file_extension = file_path.suffix.lower()
if self.config.verbose:
logger.info(f"Scanning: {file_path} (language: {language.value})")
if language.value == "python":
self._scan_python_file(file_str, file_content)
elif language.value in ("javascript", "typescript"):
self._scan_js_ts_file(file_str, file_content, language.value)
except PermissionError:
self.result.add_warning(f"Permission denied: {file_path}")
except UnicodeDecodeError:
self.result.add_warning(f"Could not decode file (encoding issue): {file_path}")
except Exception as e:
self.result.add_warning(f"Error scanning {file_path}: {str(e)}")
if self.config.verbose:
logger.exception(f"Error scanning file: {file_path}")
def _scan_python_file(self, file_path: str, content: str) -> None:
"""Scan a Python file for issues."""
bandit_issues = self.bandit_scanner.scan_content(content, file_path)
ruff_issues = self.ruff_scanner.scan_content(content, file_path, "python")
tree_sitter_issues = self.tree_sitter_scanner.scan_content(
content, file_path, "python"
for i, line in enumerate(lines, 1):
if "TODO" in line or "FIXME" in line:
issues.append(
Issue(
category=IssueCategory.MAINTAINABILITY,
severity=SeverityLevel.LOW,
file_path=str(path),
line_number=i,
message="TODO/FIXME comment found",
)
)
for issue in bandit_issues + ruff_issues + tree_sitter_issues:
if self._should_include_issue(issue):
self.result.add_issue(issue)
def _scan_js_ts_file(self, file_path: str, content: str, language: str) -> None:
"""Scan a JavaScript or TypeScript file for issues."""
ruff_issues = self.ruff_scanner.scan_content(content, file_path, language)
tree_sitter_issues = self.tree_sitter_scanner.scan_content(
content, file_path, language
)
for issue in ruff_issues + tree_sitter_issues:
if self._should_include_issue(issue):
self.result.add_issue(issue)
def _should_include_issue(self, issue: Issue) -> bool:
"""Check if an issue should be included based on filters."""
if self.config.severity_filter:
severity_order = {
SeverityLevel.LOW: 0,
SeverityLevel.MEDIUM: 1,
SeverityLevel.HIGH: 2,
SeverityLevel.CRITICAL: 3,
}
if severity_order.get(issue.severity, 0) < severity_order.get(
self._get_severity_from_string(self.config.severity_filter), -1
):
return False
return True
def _get_severity_from_string(self, severity_str: str) -> Optional[SeverityLevel]:
"""Convert severity string to enum."""
mapping = {
"low": SeverityLevel.LOW,
"medium": SeverityLevel.MEDIUM,
"high": SeverityLevel.HIGH,
"critical": SeverityLevel.CRITICAL,
}
return mapping.get(severity_str.lower())
return issues

View File

@@ -1,6 +1,3 @@
"""Reporting module for AI Code Audit CLI."""
from .formatter import ReportFormatter
from .confidence import ConfidenceScorer
__all__ = ["ReportFormatter", "ConfidenceScorer"]
__all__ = ["ConfidenceScorer"]

View File

@@ -1,145 +1,37 @@
"""Confidence scoring engine for AI Code Audit CLI."""
"""Confidence scoring for audit results."""
from typing import Optional
from dataclasses import dataclass
from ..core.models import ScanResult, Issue, IssueCategory, SeverityLevel
from ..core.models import ScanResult, SeverityLevel, IssueCategory
class ConfidenceScorer:
"""Calculate confidence scores based on scan results."""
"""Calculate confidence score for audit results."""
SECURITY_WEIGHT = 3.0
ERROR_HANDLING_WEIGHT = 2.0
CODE_QUALITY_WEIGHT = 1.5
ANTI_PATTERN_WEIGHT = 1.2
COMPLEXITY_WEIGHT = 1.0
STYLE_WEIGHT = 0.5
SEVERITY_MULTIPLIERS = {
SeverityLevel.CRITICAL: 5.0,
SeverityLevel.HIGH: 3.0,
SeverityLevel.MEDIUM: 1.5,
SeverityLevel.LOW: 0.5,
}
def __init__(self):
"""Initialize the confidence scorer."""
self.base_score = 100
def calculate(self, result: ScanResult) -> int:
"""Calculate confidence score (0-100) based on scan results."""
if result.files_scanned == 0:
def calculate(self, results: ScanResult) -> int:
"""Calculate confidence score from 0-100."""
if not results.issues:
return 100
deductions = self._calculate_deductions(result)
score = max(0, min(100, self.base_score - deductions))
return int(score)
def _calculate_deductions(self, result: ScanResult) -> float:
"""Calculate total deductions from issues found."""
total_deduction = 0.0
for issue in result.issues:
category_weight = self._get_category_weight(issue.category)
severity_multiplier = self.SEVERITY_MULTIPLIERS.get(
issue.severity, 1.0
)
deduction = category_weight * severity_multiplier
if issue.category == IssueCategory.SECURITY:
deduction *= 1.5
total_deduction += deduction
deduction_per_file = total_deduction / max(1, result.files_scanned)
return deduction_per_file
def _get_category_weight(self, category: IssueCategory) -> float:
"""Get weight for an issue category."""
weights = {
IssueCategory.SECURITY: self.SECURITY_WEIGHT,
IssueCategory.ERROR_HANDLING: self.ERROR_HANDLING_WEIGHT,
IssueCategory.CODE_QUALITY: self.CODE_QUALITY_WEIGHT,
IssueCategory.ANTI_PATTERN: self.ANTI_PATTERN_WEIGHT,
IssueCategory.COMPLEXITY: self.COMPLEXITY_WEIGHT,
IssueCategory.STYLE: self.STYLE_WEIGHT,
}
return weights.get(category, 1.0)
def get_score_breakdown(self, result: ScanResult) -> dict:
"""Get detailed breakdown of the score calculation."""
breakdown = {
"base_score": self.base_score,
"total_deductions": 0.0,
"issues_by_category": {},
"issues_by_severity": {},
"final_score": self.calculate(result),
severity_weights = {
SeverityLevel.CRITICAL: 25,
SeverityLevel.HIGH: 15,
SeverityLevel.MEDIUM: 10,
SeverityLevel.LOW: 5,
}
category_deductions = {}
severity_deductions = {}
category_weights = {
IssueCategory.SECURITY: 20,
IssueCategory.PERFORMANCE: 15,
IssueCategory.CORRECTNESS: 15,
IssueCategory.MAINTAINABILITY: 5,
IssueCategory.STYLE: 2,
}
for issue in result.issues:
category_weight = self._get_category_weight(issue.category)
severity_multiplier = self.SEVERITY_MULTIPLIERS.get(issue.severity, 1.0)
deduction = category_weight * severity_multiplier
score = 100
if issue.category.value not in category_deductions:
category_deductions[issue.category.value] = 0.0
category_deductions[issue.category.value] += deduction
for issue in results.issues:
score -= severity_weights.get(issue.severity, 10)
score -= category_weights.get(issue.category, 5)
if issue.severity.value not in severity_deductions:
severity_deductions[issue.severity.value] = 0.0
severity_deductions[issue.severity.value] += deduction
breakdown["issues_by_category"] = category_deductions
breakdown["issues_by_severity"] = severity_deductions
breakdown["total_deductions"] = sum(category_deductions.values())
return breakdown
def get_score_grade(self, score: int) -> str:
"""Get a letter grade for the confidence score."""
if score >= 95:
return "A+"
elif score >= 90:
return "A"
elif score >= 85:
return "A-"
elif score >= 80:
return "B+"
elif score >= 75:
return "B"
elif score >= 70:
return "B-"
elif score >= 65:
return "C+"
elif score >= 60:
return "C"
elif score >= 55:
return "C-"
elif score >= 50:
return "D+"
elif score >= 45:
return "D"
elif score >= 40:
return "D-"
else:
return "F"
def get_score_description(self, score: int) -> str:
"""Get a description for the confidence score."""
if score >= 90:
return "Excellent - Code is well-written and secure"
elif score >= 75:
return "Good - Code is generally sound with minor issues"
elif score >= 60:
return "Fair - Code has some issues that should be addressed"
elif score >= 45:
return "Poor - Code has significant issues requiring attention"
elif score >= 30:
return "Bad - Code has serious issues and security concerns"
else:
return "Critical - Code requires immediate review and fixes"
return max(0, min(100, score))

View File

@@ -1 +0,0 @@
"""Tests package for AI Code Audit CLI."""

View File

@@ -1,4 +1,4 @@
"""Pytest configuration and fixtures for AI Code Audit CLI tests."""
"""Pytest configuration and fixtures."""
import pytest
import tempfile
@@ -6,106 +6,22 @@ from pathlib import Path
@pytest.fixture
def sample_python_code():
"""Sample Python code with various issues."""
return '''
import os
import unused_module
def test_files(tmp_path):
"""Create temporary test files."""
test_dir = tmp_path / "test_project"
test_dir.mkdir()
def example_function(password="secret123"):
api_key = "AKIAIOSFODNN7EXAMPLE"
try:
result = os.system(f"echo {password}")
except:
pass
return result
(test_dir / "test.py").write_text("""
def hello():
# TODO: implement
print("Hello, World!")
""")
def bad_function(items=[]):
for i in range(100):
pass
return items
'''
@pytest.fixture
def clean_python_code():
"""Sample clean Python code without issues."""
return '''
def calculate_sum(numbers: list[int]) -> int:
"""Calculate the sum of a list of numbers."""
total = 0
for num in numbers:
total += num
return total
if __name__ == "__main__":
numbers = [1, 2, 3, 4, 5]
print(calculate_sum(numbers))
'''
@pytest.fixture
def sample_javascript_code():
"""Sample JavaScript code with various issues."""
return '''
const apiKey = "sk-1234567890abcdef";
const password = "secret123";
function processData(data) {
try {
const result = eval(data.userInput);
return result;
} catch (e) {
// Silent catch
}
(test_dir / "test.js").write_text("""
function hello() {
// FIXME: fix this
console.log("Hello, World!");
}
""")
function badExample(items = []) {
for (let i = 0; i < 100; i++) {
console.log(i);
}
}
'''
@pytest.fixture
def temp_directory():
"""Create a temporary directory with test files."""
with tempfile.TemporaryDirectory() as tmpdir:
yield Path(tmpdir)
@pytest.fixture
def test_files(temp_directory, sample_python_code, clean_python_code, sample_javascript_code):
"""Create test files in temp directory."""
(temp_directory / "bad_code.py").write_text(sample_python_code)
(temp_directory / "good_code.py").write_text(clean_python_code)
(temp_directory / "bad_code.js").write_text(sample_javascript_code)
return temp_directory
@pytest.fixture
def mock_scan_result():
"""Create a mock scan result for testing."""
from src.core.models import ScanResult, Issue, IssueCategory, SeverityLevel
result = ScanResult(files_scanned=3, target_path="/test")
result.add_issue(Issue(
severity=SeverityLevel.HIGH,
category=IssueCategory.SECURITY,
file_path="/test/file.py",
line_number=5,
message="Hardcoded credential detected",
suggestion="Use environment variables",
scanner_name="test",
))
result.add_issue(Issue(
severity=SeverityLevel.MEDIUM,
category=IssueCategory.ANTI_PATTERN,
file_path="/test/file.py",
line_number=10,
message="Mutable default argument detected",
suggestion="Use None as default",
scanner_name="test",
))
return result
return test_dir

View File

@@ -183,7 +183,7 @@ class TestOutputFormatter:
def test_get_severity_style(self):
"""Test severity style mapping."""
from src.cli.output import OutputFormatter
from src.cli.options import ScanOptions
from src.cli.options import ScanOptions, SeverityLevel
options = ScanOptions()
formatter = OutputFormatter(options)

View File

@@ -1,287 +1,56 @@
"""Tests for confidence scoring module."""
"""Tests for confidence scoring."""
import pytest
from src.core.models import ScanResult, Issue, IssueCategory, SeverityLevel
from src.reporting.confidence import ConfidenceScorer
from src.core.models import Issue, IssueCategory, ScanResult, SeverityLevel
class TestConfidenceScorer:
"""Tests for ConfidenceScorer."""
"""Tests for ConfidenceScorer class."""
def test_calculate_perfect_score(self, clean_python_code):
"""Test that clean code gets a high score."""
def test_scorer_initialization(self):
"""Test scorer creates instance."""
scorer = ConfidenceScorer()
result = ScanResult(files_scanned=1, target_path="/test")
assert scorer is not None
def test_calculate_perfect_score(self):
"""Test 100 score for no issues."""
scorer = ConfidenceScorer()
result = ScanResult(issues=[], files_scanned=5)
score = scorer.calculate(result)
assert score == 100
def test_calculate_with_issues(self, mock_scan_result):
def test_calculate_with_issues(self):
"""Test score calculation with issues."""
scorer = ConfidenceScorer()
score = scorer.calculate(mock_scan_result)
issues = [
Issue(
category=IssueCategory.SECURITY,
severity=SeverityLevel.HIGH,
file_path="test.py",
line_number=1,
message="Test issue",
)
]
result = ScanResult(issues=issues, files_scanned=1)
score = scorer.calculate(result)
assert score < 100
assert score >= 0
def test_security_issues_reduce_score_more(self):
"""Test that security issues reduce score more than other issues."""
def test_score_not_negative(self):
"""Test score doesn't go below 0."""
scorer = ConfidenceScorer()
result_security = ScanResult(files_scanned=1, target_path="/test")
result_security.add_issue(Issue(
severity=SeverityLevel.HIGH,
issues = [
Issue(
category=IssueCategory.SECURITY,
file_path="/test.py",
line_number=1,
message="Security issue",
scanner_name="test",
))
result_quality = ScanResult(files_scanned=1, target_path="/test")
result_quality.add_issue(Issue(
severity=SeverityLevel.HIGH,
category=IssueCategory.CODE_QUALITY,
file_path="/test.py",
line_number=1,
message="Quality issue",
scanner_name="test",
))
security_score = scorer.calculate(result_security)
quality_score = scorer.calculate(result_quality)
assert security_score < quality_score
def test_critical_issues_reduce_score_more(self):
"""Test that critical issues reduce score more."""
scorer = ConfidenceScorer()
result_critical = ScanResult(files_scanned=1, target_path="/test")
result_critical.add_issue(Issue(
severity=SeverityLevel.CRITICAL,
category=IssueCategory.SECURITY,
file_path="/test.py",
file_path="test.py",
line_number=1,
message="Critical issue",
scanner_name="test",
))
result_high = ScanResult(files_scanned=1, target_path="/test")
result_high.add_issue(Issue(
severity=SeverityLevel.HIGH,
category=IssueCategory.SECURITY,
file_path="/test.py",
line_number=1,
message="High issue",
scanner_name="test",
))
critical_score = scorer.calculate(result_critical)
high_score = scorer.calculate(result_high)
assert critical_score < high_score
def test_get_score_breakdown(self, mock_scan_result):
"""Test score breakdown generation."""
scorer = ConfidenceScorer()
breakdown = scorer.get_score_breakdown(mock_scan_result)
assert "base_score" in breakdown
assert "total_deductions" in breakdown
assert "final_score" in breakdown
assert "issues_by_category" in breakdown
assert "issues_by_severity" in breakdown
def test_get_score_grade(self):
"""Test score grade calculation."""
scorer = ConfidenceScorer()
assert scorer.get_score_grade(95) == "A+"
assert scorer.get_score_grade(90) == "A"
assert scorer.get_score_grade(85) == "A-"
assert scorer.get_score_grade(80) == "B+"
assert scorer.get_score_grade(75) == "B"
assert scorer.get_score_grade(70) == "B-"
assert scorer.get_score_grade(65) == "C+"
assert scorer.get_score_grade(60) == "C"
assert scorer.get_score_grade(55) == "C-"
assert scorer.get_score_grade(50) == "D+"
assert scorer.get_score_grade(45) == "D"
assert scorer.get_score_grade(40) == "D-"
assert scorer.get_score_grade(30) == "F"
def test_get_score_description(self):
"""Test score description generation."""
scorer = ConfidenceScorer()
desc_90 = scorer.get_score_description(90)
assert "Excellent" in desc_90
desc_75 = scorer.get_score_description(75)
assert "Good" in desc_75
desc_50 = scorer.get_score_description(50)
assert "Poor" in desc_50
desc_25 = scorer.get_score_description(25)
assert "Critical" in desc_25
def test_empty_result_gets_100(self):
"""Test that empty scan result gets 100 score."""
scorer = ConfidenceScorer()
result = ScanResult(files_scanned=0, target_path="/test")
score = scorer.calculate(result)
assert score == 100
def test_score_never_negative(self):
"""Test that score never goes below 0."""
scorer = ConfidenceScorer()
result = ScanResult(files_scanned=1, target_path="/test")
for _ in range(100):
result.add_issue(Issue(
severity=SeverityLevel.CRITICAL,
category=IssueCategory.SECURITY,
file_path="/test.py",
line_number=1,
message="Critical issue",
scanner_name="test",
))
message="Test issue",
)
for _ in range(10)
]
result = ScanResult(issues=issues, files_scanned=1)
score = scorer.calculate(result)
assert score >= 0
def test_score_never_exceeds_100(self):
"""Test that score never goes above 100."""
scorer = ConfidenceScorer()
result = ScanResult(files_scanned=10, target_path="/test")
score = scorer.calculate(result)
assert score <= 100
class TestIssueModel:
"""Tests for Issue data model."""
def test_issue_to_dict(self):
"""Test issue serialization to dictionary."""
issue = Issue(
severity=SeverityLevel.HIGH,
category=IssueCategory.SECURITY,
file_path="/test.py",
line_number=10,
message="Test issue",
suggestion="Fix this",
scanner_name="test",
)
data = issue.to_dict()
assert data["severity"] == "high"
assert data["category"] == "security"
assert data["file_path"] == "/test.py"
assert data["line_number"] == 10
assert data["message"] == "Test issue"
assert data["suggestion"] == "Fix this"
assert data["scanner_name"] == "test"
class TestScanResultModel:
"""Tests for ScanResult data model."""
def test_add_issue(self):
"""Test adding issues to scan result."""
result = ScanResult(files_scanned=1, target_path="/test")
issue = Issue(
severity=SeverityLevel.LOW,
category=IssueCategory.STYLE,
file_path="/test.py",
line_number=1,
message="Style issue",
scanner_name="test",
)
result.add_issue(issue)
assert len(result.issues) == 1
assert result.issues[0] == issue
def test_add_warning(self):
"""Test adding warnings to scan result."""
result = ScanResult(files_scanned=1, target_path="/test")
result.add_warning("Test warning")
assert len(result.warnings) == 1
assert result.warnings[0] == "Test warning"
def test_filter_by_severity(self):
"""Test filtering issues by severity."""
result = ScanResult(files_scanned=1, target_path="/test")
result.add_issue(Issue(
severity=SeverityLevel.LOW,
category=IssueCategory.STYLE,
file_path="/test.py",
line_number=1,
message="Low issue",
scanner_name="test",
))
result.add_issue(Issue(
severity=SeverityLevel.CRITICAL,
category=IssueCategory.SECURITY,
file_path="/test.py",
line_number=2,
message="Critical issue",
scanner_name="test",
))
filtered = result.filter_by_severity(SeverityLevel.HIGH)
assert len(filtered.issues) == 1
assert filtered.issues[0].severity == SeverityLevel.CRITICAL
def test_get_summary(self):
"""Test getting scan summary."""
result = ScanResult(files_scanned=2, target_path="/test")
result.add_issue(Issue(
severity=SeverityLevel.HIGH,
category=IssueCategory.SECURITY,
file_path="/test.py",
line_number=1,
message="Issue 1",
scanner_name="test",
))
result.add_issue(Issue(
severity=SeverityLevel.LOW,
category=IssueCategory.STYLE,
file_path="/test.py",
line_number=2,
message="Issue 2",
scanner_name="test",
))
summary = result.get_summary()
assert summary["files_scanned"] == 2
assert summary["total_issues"] == 2
assert "high" in summary["issues_by_severity"]
assert "low" in summary["issues_by_severity"]
assert "security" in summary["issues_by_category"]
assert "style" in summary["issues_by_category"]
def test_to_dict(self):
"""Test scan result serialization to dictionary."""
result = ScanResult(files_scanned=1, target_path="/test")
result.add_issue(Issue(
severity=SeverityLevel.MEDIUM,
category=IssueCategory.CODE_QUALITY,
file_path="/test.py",
line_number=5,
message="Test issue",
scanner_name="test",
))
data = result.to_dict()
assert data["files_scanned"] == 1
assert data["target_path"] == "/test"
assert len(data["issues"]) == 1
assert data["issues"][0]["severity"] == "medium"

44
tests/test_models.py Normal file
View File

@@ -0,0 +1,44 @@
"""Tests for core models."""
import pytest
from src.core.models import Issue, IssueCategory, ScanResult, SeverityLevel
class TestModels:
"""Tests for data models."""
def test_issue_creation(self):
"""Test Issue dataclass creation."""
issue = Issue(
category=IssueCategory.SECURITY,
severity=SeverityLevel.HIGH,
file_path="test.py",
line_number=10,
message="Security issue found",
)
assert issue.category == IssueCategory.SECURITY
assert issue.severity == SeverityLevel.HIGH
assert issue.file_path == "test.py"
assert issue.line_number == 10
assert issue.message == "Security issue found"
def test_scan_result_creation(self):
"""Test ScanResult dataclass creation."""
result = ScanResult(
issues=[],
warnings=[],
files_scanned=5,
scan_time=1.5,
)
assert result.issues == []
assert result.warnings == []
assert result.files_scanned == 5
assert result.scan_time == 1.5
def test_severity_level_ordering(self):
"""Test SeverityLevel enum values."""
assert SeverityLevel.LOW.value == "low"
assert SeverityLevel.MEDIUM.value == "medium"
assert SeverityLevel.HIGH.value == "high"
assert SeverityLevel.CRITICAL.value == "critical"

65
tests/test_scanner.py Normal file
View File

@@ -0,0 +1,65 @@
"""Tests for code scanner."""
import pytest
from pathlib import Path
from src.core.scanner import CodeScanner
from src.core.models import IssueCategory, SeverityLevel
class TestCodeScanner:
"""Tests for CodeScanner class."""
def test_scanner_initialization(self):
"""Test scanner creates empty issue list."""
scanner = CodeScanner()
assert scanner.issues == []
def test_supported_extensions(self):
"""Test supported file extensions."""
scanner = CodeScanner()
assert ".py" in scanner.SUPPORTED_EXTENSIONS
assert ".js" in scanner.SUPPORTED_EXTENSIONS
assert ".ts" in scanner.SUPPORTED_EXTENSIONS
def test_scan_single_file(self, tmp_path):
"""Test scanning a single file."""
test_file = tmp_path / "test.py"
test_file.write_text("""
def hello():
# TODO: implement
print("Hello")
""")
scanner = CodeScanner()
result = scanner.scan(test_file)
assert result.files_scanned == 1
assert len(result.issues) == 1
assert result.issues[0].category == IssueCategory.MAINTAINABILITY
assert result.issues[0].severity == SeverityLevel.LOW
def test_scan_directory(self, tmp_path):
"""Test scanning a directory."""
test_dir = tmp_path / "project"
test_dir.mkdir()
(test_dir / "test.py").write_text("# TODO: test")
(test_dir / "test.js").write_text("// FIXME: fix")
scanner = CodeScanner()
result = scanner.scan(test_dir)
assert result.files_scanned == 2
assert len(result.issues) == 2
def test_unsupported_file(self, tmp_path):
"""Test skipping unsupported files."""
test_file = tmp_path / "test.txt"
test_file.write_text("TODO: something")
scanner = CodeScanner()
result = scanner.scan(test_file)
assert result.files_scanned == 0
assert len(result.issues) == 0