Fix CI/CD: Add Gitea Actions workflow and fix linting issues
Some checks failed
CI / test (push) Failing after 13s
Some checks failed
CI / test (push) Failing after 13s
This commit is contained in:
34
local-ai-commit-reviewer/.gitea/workflows/ci.yml
Normal file
34
local-ai-commit-reviewer/.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,34 @@
|
||||
name: CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main, master ]
|
||||
pull_request:
|
||||
branches: [ main, master ]
|
||||
|
||||
jobs:
|
||||
lint-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -e ".[dev]"
|
||||
|
||||
- name: Run Ruff linting
|
||||
run: ruff check src tests
|
||||
|
||||
- name: Run MyPy type checking
|
||||
run: mypy src
|
||||
|
||||
- name: Run pytest
|
||||
run: pytest tests/ -v --tb=short
|
||||
138
local-ai-commit-reviewer/.gitignore
vendored
Normal file
138
local-ai-commit-reviewer/.gitignore
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
pip-wheel-metadata/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
.python-version
|
||||
|
||||
# pipenv
|
||||
Pipfile.lock
|
||||
|
||||
# PEP 582
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# IDEs
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# macOS
|
||||
.DS_Store
|
||||
|
||||
# Project specific
|
||||
.aicr.yaml
|
||||
*.log
|
||||
19
local-ai-commit-reviewer/CHANGELOG.md
Normal file
19
local-ai-commit-reviewer/CHANGELOG.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [0.1.0] - 2024-01-01
|
||||
|
||||
### Added
|
||||
- Initial release of Local AI Commit Reviewer CLI
|
||||
- Support for Ollama LLM backend
|
||||
- Staged change analysis and review
|
||||
- Pre-commit hook integration
|
||||
- Multiple strictness levels (permissive, balanced, strict)
|
||||
- Multi-language support (Python, JavaScript, TypeScript, Go, Rust, Java, C, C++)
|
||||
- Rich terminal output with syntax highlighting
|
||||
- JSON and Markdown export capabilities
|
||||
- Configuration management with Pydantic validation
|
||||
21
local-ai-commit-reviewer/LICENSE
Normal file
21
local-ai-commit-reviewer/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Local AI Commit Reviewer Contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
192
local-ai-commit-reviewer/README.md
Normal file
192
local-ai-commit-reviewer/README.md
Normal file
@@ -0,0 +1,192 @@
|
||||
# Local AI Commit Reviewer CLI
|
||||
|
||||
A CLI tool that reviews Git commits locally using lightweight LLMs (Ollama/MLX) before pushing. It analyzes staged changes, provides inline suggestions, and integrates with Git workflows while preserving code privacy through local processing.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Install the tool
|
||||
pip install local-ai-commit-reviewer
|
||||
|
||||
# Review staged changes before committing
|
||||
aicr review
|
||||
|
||||
# Install pre-commit hook
|
||||
aicr install-hook
|
||||
|
||||
# Review a specific commit
|
||||
aicr review --commit <sha>
|
||||
```
|
||||
|
||||
## Installation
|
||||
|
||||
### From PyPI
|
||||
|
||||
```bash
|
||||
pip install local-ai-commit-reviewer
|
||||
```
|
||||
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
git clone https://github.com/yourusername/local-ai-commit-reviewer.git
|
||||
cd local-ai-commit-reviewer
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.10+
|
||||
- [Ollama](https://ollama.ai/) running locally (or MLX for Apple Silicon)
|
||||
- Git
|
||||
|
||||
## Configuration
|
||||
|
||||
Create a `.aicr.yaml` file in your project root:
|
||||
|
||||
```yaml
|
||||
llm:
|
||||
endpoint: "http://localhost:11434"
|
||||
model: "codellama"
|
||||
timeout: 120
|
||||
|
||||
review:
|
||||
strictness: "balanced"
|
||||
|
||||
hooks:
|
||||
enabled: true
|
||||
fail_on_critical: true
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `AICR_LLM_ENDPOINT` | Custom LLM API endpoint | `http://localhost:11434` |
|
||||
| `AICR_MODEL` | Model name for reviews | `codellama` |
|
||||
| `AICR_CONFIG_PATH` | Path to config file | `.aicr.yaml` |
|
||||
| `AICR_NO_COLOR` | Disable colored output | `false` |
|
||||
|
||||
## Usage
|
||||
|
||||
### Review Staged Changes
|
||||
|
||||
```bash
|
||||
# Review all staged changes
|
||||
aicr review
|
||||
|
||||
# Review with strict mode
|
||||
aicr review --strictness strict
|
||||
|
||||
# Review with permissive mode (only critical issues)
|
||||
aicr review --strictness permissive
|
||||
|
||||
# Output as JSON
|
||||
aicr review --output json
|
||||
|
||||
# Output as Markdown
|
||||
aicr review --output markdown
|
||||
```
|
||||
|
||||
### Review Specific Commit
|
||||
|
||||
```bash
|
||||
aicr review --commit abc123def
|
||||
```
|
||||
|
||||
### Git Hook Integration
|
||||
|
||||
```bash
|
||||
# Install pre-commit hook in current repository
|
||||
aicr install-hook --local
|
||||
|
||||
# Install globally (for new repositories)
|
||||
aicr install-hook --global
|
||||
|
||||
# Skip the hook
|
||||
git commit --no-verify
|
||||
```
|
||||
|
||||
### Configuration Management
|
||||
|
||||
```bash
|
||||
# Show current configuration
|
||||
aicr config --list
|
||||
|
||||
# Set a configuration option
|
||||
aicr config --set llm.model "llama2"
|
||||
|
||||
# Show config file path
|
||||
aicr config --path
|
||||
```
|
||||
|
||||
### Model Management
|
||||
|
||||
```bash
|
||||
# List available models
|
||||
aicr models
|
||||
|
||||
# Check Ollama status
|
||||
aicr status
|
||||
```
|
||||
|
||||
## Supported Languages
|
||||
|
||||
- Python
|
||||
- JavaScript / TypeScript
|
||||
- Go
|
||||
- Rust
|
||||
- Java
|
||||
- C / C++
|
||||
- Ruby
|
||||
- PHP
|
||||
- Swift
|
||||
- Kotlin
|
||||
- Scala
|
||||
|
||||
## Strictness Levels
|
||||
|
||||
| Level | Description |
|
||||
|-------|-------------|
|
||||
| `permissive` | Only critical security and bug issues |
|
||||
| `balanced` | Security, bugs, and major style issues |
|
||||
| `strict` | All issues including performance and documentation |
|
||||
|
||||
## Error Resolution
|
||||
|
||||
| Error | Resolution |
|
||||
|-------|------------|
|
||||
| LLM connection refused | Start Ollama: `ollama serve` |
|
||||
| Model not found | Pull model: `ollama pull <model>` |
|
||||
| Not a Git repository | Run from within a Git repo |
|
||||
| No staged changes | Stage files: `git add <files>` |
|
||||
| Git hook permission denied | `chmod +x .git/hooks/pre-commit` |
|
||||
|
||||
## Development
|
||||
|
||||
```bash
|
||||
# Install development dependencies
|
||||
pip install -e ".[dev]"
|
||||
|
||||
# Run tests
|
||||
pytest tests/ -v
|
||||
|
||||
# Run linting
|
||||
ruff check src/
|
||||
black src/ tests/
|
||||
|
||||
# Type checking
|
||||
mypy src/
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
1. Fork the repository
|
||||
2. Create a feature branch
|
||||
3. Make your changes
|
||||
4. Run tests and linting
|
||||
5. Submit a pull request
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details.
|
||||
121
local-ai-commit-reviewer/config.yaml
Normal file
121
local-ai-commit-reviewer/config.yaml
Normal file
@@ -0,0 +1,121 @@
|
||||
# Local AI Commit Reviewer Configuration
|
||||
# This file contains default settings for the aicr CLI tool
|
||||
|
||||
# LLM Configuration
|
||||
llm:
|
||||
# Default LLM endpoint (Ollama default)
|
||||
endpoint: "http://localhost:11434"
|
||||
# Default model to use for reviews
|
||||
model: "codellama"
|
||||
# Timeout for LLM requests in seconds
|
||||
timeout: 120
|
||||
# Maximum number of tokens to generate
|
||||
max_tokens: 2048
|
||||
# Temperature for generation (0.0-1.0)
|
||||
temperature: 0.3
|
||||
|
||||
# Review Settings
|
||||
review:
|
||||
# Default strictness level: permissive, balanced, strict
|
||||
strictness: "balanced"
|
||||
# Maximum number of issues to report per file
|
||||
max_issues_per_file: 20
|
||||
# Enable syntax highlighting
|
||||
syntax_highlighting: true
|
||||
# Show line numbers in output
|
||||
show_line_numbers: true
|
||||
|
||||
# Language-specific configurations
|
||||
languages:
|
||||
python:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "pep8"
|
||||
- "type-hints"
|
||||
- "docstrings"
|
||||
max_line_length: 100
|
||||
javascript:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "airbnb"
|
||||
max_line_length: 100
|
||||
typescript:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "airbnb"
|
||||
max_line_length: 100
|
||||
go:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "golint"
|
||||
- "staticcheck"
|
||||
rust:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "clippy"
|
||||
java:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "google-java"
|
||||
c:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "cppcheck"
|
||||
cpp:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "cppcheck"
|
||||
|
||||
# Strictness Profiles
|
||||
strictness_profiles:
|
||||
permissive:
|
||||
description: "Focus on critical issues only"
|
||||
check_security: true
|
||||
check_bugs: true
|
||||
check_style: false
|
||||
check_performance: false
|
||||
check_documentation: false
|
||||
min_severity: "warning"
|
||||
balanced:
|
||||
description: "Balanced review of common issues"
|
||||
check_security: true
|
||||
check_bugs: true
|
||||
check_style: true
|
||||
check_performance: false
|
||||
check_documentation: false
|
||||
min_severity: "info"
|
||||
strict:
|
||||
description: "Comprehensive review of all issues"
|
||||
check_security: true
|
||||
check_bugs: true
|
||||
check_style: true
|
||||
check_performance: true
|
||||
check_documentation: true
|
||||
min_severity: "info"
|
||||
|
||||
# Git Hook Configuration
|
||||
hooks:
|
||||
# Enable pre-commit hook by default
|
||||
enabled: true
|
||||
# Exit with error code on critical issues
|
||||
fail_on_critical: true
|
||||
# Allow bypassing the hook with --no-verify
|
||||
allow_bypass: true
|
||||
|
||||
# Output Configuration
|
||||
output:
|
||||
# Default output format: terminal, json, markdown
|
||||
format: "terminal"
|
||||
# Colors theme: dark, light, auto
|
||||
theme: "auto"
|
||||
# Show suggestions for fixes
|
||||
show_suggestions: true
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
# Log level: debug, info, warning, error
|
||||
level: "info"
|
||||
# Log file path (empty for stdout only)
|
||||
log_file: ""
|
||||
# Enable structured logging
|
||||
structured: false
|
||||
82
local-ai-commit-reviewer/examples/.aicr.yaml.example
Normal file
82
local-ai-commit-reviewer/examples/.aicr.yaml.example
Normal file
@@ -0,0 +1,82 @@
|
||||
# Local AI Commit Reviewer - Example Configuration
|
||||
# Copy this file to .aicr.yaml in your project root
|
||||
|
||||
# LLM Configuration
|
||||
llm:
|
||||
# Ollama endpoint
|
||||
endpoint: "http://localhost:11434"
|
||||
# Model to use for reviews
|
||||
model: "codellama"
|
||||
# Request timeout in seconds
|
||||
timeout: 120
|
||||
# Maximum tokens to generate
|
||||
max_tokens: 2048
|
||||
# Temperature (0.0-1.0)
|
||||
temperature: 0.3
|
||||
|
||||
# Review Settings
|
||||
review:
|
||||
# Strictness: permissive, balanced, strict
|
||||
strictness: "balanced"
|
||||
# Maximum issues per file
|
||||
max_issues_per_file: 20
|
||||
# Enable syntax highlighting
|
||||
syntax_highlighting: true
|
||||
# Show line numbers
|
||||
show_line_numbers: true
|
||||
|
||||
# Language-specific configurations
|
||||
languages:
|
||||
python:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "pep8"
|
||||
- "type-hints"
|
||||
- "docstrings"
|
||||
javascript:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "airbnb"
|
||||
typescript:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "airbnb"
|
||||
go:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "golint"
|
||||
- "staticcheck"
|
||||
rust:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "clippy"
|
||||
java:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "google-java"
|
||||
c:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "cppcheck"
|
||||
cpp:
|
||||
enabled: true
|
||||
review_rules:
|
||||
- "cppcheck"
|
||||
|
||||
# Git Hook Configuration
|
||||
hooks:
|
||||
enabled: true
|
||||
fail_on_critical: true
|
||||
allow_bypass: true
|
||||
|
||||
# Output Configuration
|
||||
output:
|
||||
format: "terminal"
|
||||
theme: "auto"
|
||||
show_suggestions: true
|
||||
|
||||
# Logging Configuration
|
||||
logging:
|
||||
level: "info"
|
||||
log_file: ""
|
||||
structured: false
|
||||
81
local-ai-commit-reviewer/pyproject.toml
Normal file
81
local-ai-commit-reviewer/pyproject.toml
Normal file
@@ -0,0 +1,81 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "local-ai-commit-reviewer"
|
||||
version = "0.1.0"
|
||||
description = "A CLI tool that reviews Git commits locally using lightweight LLMs"
|
||||
readme = "README.md"
|
||||
license = {text = "MIT"}
|
||||
requires-python = ">=3.10"
|
||||
authors = [
|
||||
{name = "Local AI Commit Reviewer Contributors"}
|
||||
]
|
||||
keywords = ["git", "cli", "llm", "code-review", "ollama"]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
]
|
||||
dependencies = [
|
||||
"click>=8.1.7",
|
||||
"gitpython>=3.1.43",
|
||||
"ollama>=0.3.3",
|
||||
"rich>=13.7.1",
|
||||
"pydantic>=2.6.1",
|
||||
"pyyaml>=6.0.1",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=7.4.0",
|
||||
"pytest-cov>=4.1.0",
|
||||
"pytest-mock>=3.12.0",
|
||||
"black>=23.0.0",
|
||||
"ruff>=0.1.0",
|
||||
"mypy>=1.7.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
aicr = "src.cli:main"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["src*"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py"]
|
||||
python_functions = ["test_*"]
|
||||
addopts = "-v --tb=short"
|
||||
|
||||
[tool.coverage.run]
|
||||
source = ["src"]
|
||||
omit = ["tests/*"]
|
||||
|
||||
[tool.coverage.report]
|
||||
exclude_lines = ["pragma: no cover", "def __repr__", "raise NotImplementedError"]
|
||||
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
target-version = ["py310"]
|
||||
include = "\\.pyi?$"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 100
|
||||
target-version = "py310"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "W", "F", "I", "UP", "B", "C4", "A", "SIM", "ARG", "PL", "RUF"]
|
||||
ignore = ["E501", "B008", "C901"]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.10"
|
||||
warn_return_any = true
|
||||
warn_unused_configs = true
|
||||
ignore_missing_imports = true
|
||||
6
local-ai-commit-reviewer/requirements.txt
Normal file
6
local-ai-commit-reviewer/requirements.txt
Normal file
@@ -0,0 +1,6 @@
|
||||
click>=8.1.7
|
||||
gitpython>=3.1.43
|
||||
ollama>=0.3.3
|
||||
rich>=13.7.1
|
||||
pydantic>=2.6.1
|
||||
pyyaml>=6.0.1
|
||||
47
local-ai-commit-reviewer/setup.cfg
Normal file
47
local-ai-commit-reviewer/setup.cfg
Normal file
@@ -0,0 +1,47 @@
|
||||
[metadata]
|
||||
name = local-ai-commit-reviewer
|
||||
version = 0.1.0
|
||||
author = Local AI Commit Reviewer Contributors
|
||||
description = A CLI tool that reviews Git commits locally using lightweight LLMs
|
||||
long_description = file: README.md
|
||||
long_description_content_type = text/markdown
|
||||
url = https://github.com/yourusername/local-ai-commit-reviewer
|
||||
license = MIT
|
||||
classifiers =
|
||||
Development Status :: 4 - Beta
|
||||
Intended Audience :: Developers
|
||||
License :: OSI Approved :: MIT License
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.10
|
||||
Programming Language :: Python :: 3.11
|
||||
Programming Language :: Python :: 3.12
|
||||
keywords = git, cli, llm, code-review, ollama
|
||||
|
||||
[options]
|
||||
python_requires = >=3.10
|
||||
install_requires =
|
||||
click>=8.1.7
|
||||
gitpython>=3.1.43
|
||||
ollama>=0.3.3
|
||||
rich>=13.7.1
|
||||
pydantic>=2.6.1
|
||||
pyyaml>=6.0.1
|
||||
|
||||
[options.extras_require]
|
||||
dev =
|
||||
pytest>=7.4.0
|
||||
pytest-cov>=4.1.0
|
||||
pytest-mock>=3.12.0
|
||||
black>=23.0.0
|
||||
ruff>=0.1.0
|
||||
mypy>=1.7.0
|
||||
|
||||
[options.entry_points]
|
||||
console_scripts =
|
||||
aicr = src.cli:main
|
||||
|
||||
[tool:pytest]
|
||||
testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_functions = test_*
|
||||
addopts = -v --tb=short
|
||||
14
local-ai-commit-reviewer/src/__init__.py
Normal file
14
local-ai-commit-reviewer/src/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from . import cli, config, core, formatters, git, hooks, llm, utils
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
__all__ = [
|
||||
"cli",
|
||||
"config",
|
||||
"core",
|
||||
"formatters",
|
||||
"git",
|
||||
"hooks",
|
||||
"llm",
|
||||
"utils",
|
||||
]
|
||||
3
local-ai-commit-reviewer/src/cli/__init__.py
Normal file
3
local-ai-commit-reviewer/src/cli/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .cli import cli, main
|
||||
|
||||
__all__ = ["cli", "main"]
|
||||
337
local-ai-commit-reviewer/src/cli/cli.py
Normal file
337
local-ai-commit-reviewer/src/cli/cli.py
Normal file
@@ -0,0 +1,337 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Union
|
||||
|
||||
import click
|
||||
from rich import print as rprint
|
||||
|
||||
from ..config import Config, get_config
|
||||
from ..core import ReviewEngine, ReviewResult
|
||||
from ..formatters import get_formatter
|
||||
from ..git import FileChange, GitRepo, get_staged_changes
|
||||
from ..git import install_hook as git_install_hook
|
||||
from ..llm import OllamaProvider
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option("--config", "-c", type=click.Path(exists=True), help="Path to config file")
|
||||
@click.option("--endpoint", help="LLM endpoint URL", default=None)
|
||||
@click.option("--model", "-m", help="Model name to use", default=None)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context, config: str | None, endpoint: str | None, model: str | None):
|
||||
ctx.ensure_object(dict)
|
||||
cfg_path = config or os.environ.get("AICR_CONFIG_PATH")
|
||||
cfg = get_config(cfg_path)
|
||||
|
||||
if endpoint:
|
||||
cfg.llm.endpoint = endpoint
|
||||
if model:
|
||||
cfg.llm.model = model
|
||||
|
||||
ctx.obj["config"] = cfg
|
||||
ctx.obj["repo_path"] = Path.cwd()
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--strictness", "-s", type=click.Choice(["permissive", "balanced", "strict"]), default=None)
|
||||
@click.option("--output", "-o", type=click.Choice(["terminal", "json", "markdown"]), default="terminal")
|
||||
@click.option("--commit", "-C", help="Review a specific commit SHA", default=None)
|
||||
@click.option("--hook", is_flag=True, help="Run in hook mode (exit non-zero on critical)")
|
||||
@click.option("--file", "-f", multiple=True, help="Files to review (default: all staged)")
|
||||
@click.pass_context
|
||||
def review( # noqa: PLR0913
|
||||
ctx: click.Context,
|
||||
strictness: str | None,
|
||||
output: str,
|
||||
commit: str | None,
|
||||
hook: bool,
|
||||
file: tuple
|
||||
):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
if strictness is None:
|
||||
strictness = cfg.review.strictness
|
||||
|
||||
try:
|
||||
engine = ReviewEngine(config=cfg)
|
||||
engine.set_repo(ctx.obj["repo_path"])
|
||||
|
||||
if commit:
|
||||
result = engine.review_commit(commit, strictness=strictness)
|
||||
else:
|
||||
files = _get_files_to_review(ctx.obj["repo_path"], file)
|
||||
|
||||
if not files:
|
||||
rprint("[yellow]No staged changes found. Stage files with 'git add <files>' first.[/yellow]")
|
||||
if hook:
|
||||
sys.exit(0)
|
||||
return
|
||||
|
||||
result = engine.review_staged_changes(files, strictness=strictness)
|
||||
|
||||
formatter = get_formatter(output)
|
||||
output_text = formatter.format(result)
|
||||
rprint(output_text)
|
||||
|
||||
if output == "json":
|
||||
ctx.obj["result_json"] = result.to_json()
|
||||
elif output == "markdown":
|
||||
ctx.obj["result_markdown"] = result.to_markdown()
|
||||
|
||||
_handle_hook_exit(result, hook, cfg)
|
||||
|
||||
except Exception as e:
|
||||
rprint(f"[red]Error during review: {e}[/red]")
|
||||
if hook:
|
||||
sys.exit(1)
|
||||
raise
|
||||
|
||||
|
||||
def _get_files_to_review(repo_path: Path, file: tuple) -> list[FileChange]:
|
||||
if file:
|
||||
changes = []
|
||||
for filename in file:
|
||||
repo = GitRepo(repo_path)
|
||||
diff = repo.get_staged_diff(filename)
|
||||
if diff:
|
||||
changes.append(FileChange(
|
||||
filename=filename,
|
||||
status="M",
|
||||
diff=diff
|
||||
))
|
||||
return changes
|
||||
return get_staged_changes(repo_path)
|
||||
|
||||
|
||||
def _handle_hook_exit(result: ReviewResult, hook: bool, cfg: Config) -> None:
|
||||
if not hook:
|
||||
return
|
||||
if result.has_critical_issues() and cfg.hooks.fail_on_critical:
|
||||
rprint("\n[red]Critical issues found. Commit blocked.[/red]")
|
||||
sys.exit(1)
|
||||
if not result.has_issues():
|
||||
rprint("[green]No issues found. Proceeding with commit.[/green]")
|
||||
sys.exit(0)
|
||||
if not cfg.hooks.fail_on_critical:
|
||||
rprint("\n[yellow]Issues found but not blocking commit (fail_on_critical=false).[/yellow]")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--local", is_flag=True, help="Install hook locally (in current repo)")
|
||||
@click.option("--global", "global_", is_flag=True, help="Install hook globally")
|
||||
@click.option("--force", is_flag=True, help="Overwrite existing hook")
|
||||
@click.pass_context
|
||||
def hook(ctx: click.Context, local: bool, global_: bool, force: bool):
|
||||
ctx.ensure_object(dict)
|
||||
|
||||
if not local and not global_:
|
||||
local = True
|
||||
|
||||
if global_:
|
||||
home = Path.home()
|
||||
git_template = home / ".git-template" / "hooks"
|
||||
if not git_template.exists():
|
||||
rprint("[yellow]Git template directory not found. Creating...[/yellow]")
|
||||
git_template.mkdir(parents=True, exist_ok=True)
|
||||
(git_template / "pre-commit").write_text(_get_hook_script())
|
||||
rprint(f"[green]Global hook template created at {git_template}[/green]")
|
||||
rprint("[yellow]Note: New repos will use this template. Existing repos need local install.[/yellow]")
|
||||
else:
|
||||
rprint("[green]Global hook template already exists.[/green]")
|
||||
else:
|
||||
repo_path = ctx.obj["repo_path"]
|
||||
git_hooks = repo_path / ".git" / "hooks"
|
||||
hook_path = git_hooks / "pre-commit"
|
||||
|
||||
if hook_path.exists() and not force:
|
||||
rprint(f"[yellow]Hook already exists at {hook_path}. Use --force to overwrite.[/yellow]")
|
||||
return
|
||||
|
||||
if git_install_hook(repo_path, "pre-commit", _get_hook_script()):
|
||||
rprint(f"[green]Pre-commit hook installed at {hook_path}[/green]")
|
||||
else:
|
||||
rprint("[red]Failed to install hook.[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _get_hook_script() -> str:
|
||||
return """#!/bin/bash
|
||||
# Local AI Commit Reviewer - Pre-commit Hook
|
||||
# Automatically reviews staged changes before committing
|
||||
|
||||
set -e
|
||||
|
||||
# Allow bypass with --no-verify
|
||||
if [ "$1" = "--no-verify" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Run the AI commit reviewer
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
python -m aicr review --hook --strictness balanced || exit 1
|
||||
"""
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--set", "set_opt", nargs=2, multiple=True, help="Set config option (key value)")
|
||||
@click.option("--get", help="Get config option value", default=None)
|
||||
@click.option("--list", is_flag=True, help="List all config options")
|
||||
@click.option("--path", is_flag=True, help="Show config file path")
|
||||
@click.pass_context
|
||||
def config(ctx: click.Context, set_opt: tuple, get: str | None, list_: bool, path: bool):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
if path:
|
||||
config_path = os.environ.get("AICR_CONFIG_PATH") or str(Path.cwd() / ".aicr.yaml")
|
||||
rprint(f"Config path: {config_path}")
|
||||
return
|
||||
|
||||
if get:
|
||||
value = _get_nested_attr(cfg, get)
|
||||
if value is not None:
|
||||
rprint(f"{get}: {value}")
|
||||
else:
|
||||
rprint(f"[red]Unknown config option: {get}[/red]")
|
||||
return
|
||||
|
||||
if list_:
|
||||
for section in ["llm", "review", "languages", "hooks", "output", "logging"]:
|
||||
section_obj = getattr(cfg, section, None)
|
||||
if section_obj:
|
||||
rprint(f"[bold]{section.upper()}[/bold]")
|
||||
for key, value in section_obj.model_dump().items():
|
||||
rprint(f" {key}: {value}")
|
||||
return
|
||||
|
||||
if set_opt:
|
||||
for key, value in set_opt:
|
||||
_set_nested_attr(cfg, key, value)
|
||||
rprint("[green]Configuration updated.[/green]")
|
||||
return
|
||||
|
||||
rprint("[bold]Local AI Commit Reviewer Configuration[/bold]")
|
||||
rprint(f"LLM Endpoint: {cfg.llm.endpoint}")
|
||||
rprint(f"Model: {cfg.llm.model}")
|
||||
rprint(f"Strictness: {cfg.review.strictness}")
|
||||
|
||||
|
||||
def _get_nested_attr(obj, attr_path: str):
|
||||
parts = attr_path.split(".")
|
||||
current = obj
|
||||
for part in parts:
|
||||
if hasattr(current, part):
|
||||
current = getattr(current, part)
|
||||
else:
|
||||
return None
|
||||
return current
|
||||
|
||||
|
||||
def _set_nested_attr(obj, attr_path: str, value: Any) -> None:
|
||||
parts = attr_path.split(".")
|
||||
current: Any = obj
|
||||
for part in parts[:-1]:
|
||||
if hasattr(current, part):
|
||||
current = getattr(current, part)
|
||||
|
||||
final_attr = parts[-1]
|
||||
if hasattr(current, final_attr):
|
||||
attr = getattr(type(current), final_attr, None)
|
||||
if attr is not None and hasattr(attr, "annotation"):
|
||||
type_hint = attr.annotation # type: ignore[attr-defined]
|
||||
if getattr(type_hint, "__origin__", None) is Union:
|
||||
type_hint = type_hint.__args__[0]
|
||||
if hasattr(type_hint, "__name__"):
|
||||
if type_hint.__name__ == "int" and isinstance(value, str):
|
||||
value = int(value)
|
||||
elif type_hint.__name__ == "float" and isinstance(value, str):
|
||||
value = float(value)
|
||||
elif type_hint.__name__ == "bool" and isinstance(value, str):
|
||||
value = value.lower() in ("true", "1", "yes")
|
||||
setattr(current, final_attr, value)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.pass_context
|
||||
def models(ctx: click.Context):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
try:
|
||||
provider = OllamaProvider(
|
||||
endpoint=cfg.llm.endpoint,
|
||||
model=cfg.llm.model
|
||||
)
|
||||
|
||||
if not provider.is_available():
|
||||
rprint("[red]Ollama is not available. Make sure it's running.[/red]")
|
||||
rprint("Start Ollama with: ollama serve")
|
||||
sys.exit(1)
|
||||
|
||||
models = provider.list_models()
|
||||
|
||||
if not models:
|
||||
rprint("[yellow]No models found. Pull a model first.[/yellow]")
|
||||
rprint("Example: ollama pull codellama")
|
||||
return
|
||||
|
||||
rprint("[bold]Available Models[/bold]\n")
|
||||
for model in models:
|
||||
rprint(f" {model.name}")
|
||||
rprint(f" Size: {model.size}")
|
||||
rprint(f" Modified: {model.modified}\n")
|
||||
|
||||
except Exception as e:
|
||||
rprint(f"[red]Error listing models: {e}[/red]")
|
||||
raise
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.pass_context
|
||||
def status(ctx: click.Context):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
rprint("[bold]Local AI Commit Reviewer Status[/bold]\n")
|
||||
|
||||
rprint("[bold]Configuration:[/bold]")
|
||||
rprint(f" LLM Endpoint: {cfg.llm.endpoint}")
|
||||
rprint(f" Model: {cfg.llm.model}")
|
||||
rprint(f" Strictness: {cfg.review.strictness}\n")
|
||||
|
||||
try:
|
||||
provider = OllamaProvider(
|
||||
endpoint=cfg.llm.endpoint,
|
||||
model=cfg.llm.model
|
||||
)
|
||||
|
||||
if provider.is_available():
|
||||
rprint("[green]✓ Ollama is running[/green]")
|
||||
models = provider.list_models()
|
||||
rprint(f" {len(models)} model(s) available")
|
||||
else:
|
||||
rprint("[red]✗ Ollama is not running[/red]")
|
||||
rprint(" Start with: ollama serve")
|
||||
except Exception as e:
|
||||
rprint(f"[red]✗ Error checking Ollama: {e}[/red]")
|
||||
|
||||
repo = GitRepo(ctx.obj["repo_path"])
|
||||
if repo.is_valid():
|
||||
rprint("\n[green]✓ Valid Git repository[/green]")
|
||||
branch = repo.get_current_branch()
|
||||
rprint(f" Branch: {branch}")
|
||||
|
||||
staged = repo.get_staged_files()
|
||||
rprint(f" Staged files: {len(staged)}")
|
||||
else:
|
||||
rprint("\n[yellow]⚠ Not a Git repository[/yellow]")
|
||||
|
||||
|
||||
def main():
|
||||
cli(obj={})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
3
local-ai-commit-reviewer/src/config/__init__.py
Normal file
3
local-ai-commit-reviewer/src/config/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .config import Config, ConfigLoader, Languages, StrictnessProfile, get_config
|
||||
|
||||
__all__ = ["Config", "ConfigLoader", "Languages", "StrictnessProfile", "get_config"]
|
||||
164
local-ai-commit-reviewer/src/config/config.py
Normal file
164
local-ai-commit-reviewer/src/config/config.py
Normal file
@@ -0,0 +1,164 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml # type: ignore[import-untyped]
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class LLMConfig(BaseModel):
|
||||
endpoint: str = "http://localhost:11434"
|
||||
model: str = "codellama"
|
||||
timeout: int = 120
|
||||
max_tokens: int = 2048
|
||||
temperature: float = 0.3
|
||||
|
||||
|
||||
class ReviewSettings(BaseModel):
|
||||
strictness: str = "balanced"
|
||||
max_issues_per_file: int = 20
|
||||
syntax_highlighting: bool = True
|
||||
show_line_numbers: bool = True
|
||||
|
||||
|
||||
class LanguageConfig(BaseModel):
|
||||
enabled: bool = True
|
||||
review_rules: list[str] = Field(default_factory=list)
|
||||
max_line_length: int = 100
|
||||
|
||||
|
||||
class Languages(BaseModel):
|
||||
python: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["pep8", "type-hints", "docstrings"]))
|
||||
javascript: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["airbnb"]))
|
||||
typescript: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["airbnb"]))
|
||||
go: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["golint", "staticcheck"]))
|
||||
rust: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["clippy"]))
|
||||
java: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["google-java"]))
|
||||
c: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["cppcheck"]))
|
||||
cpp: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["cppcheck"]))
|
||||
|
||||
def get_language_config(self, language: str) -> LanguageConfig | None:
|
||||
return getattr(self, language.lower(), None)
|
||||
|
||||
|
||||
class StrictnessProfile(BaseModel):
|
||||
description: str = ""
|
||||
check_security: bool = True
|
||||
check_bugs: bool = True
|
||||
check_style: bool = True
|
||||
check_performance: bool = False
|
||||
check_documentation: bool = False
|
||||
min_severity: str = "info"
|
||||
|
||||
|
||||
class StrictnessProfiles(BaseModel):
|
||||
permissive: StrictnessProfile = Field(default_factory=lambda: StrictnessProfile(
|
||||
description="Focus on critical issues only",
|
||||
check_security=True,
|
||||
check_bugs=True,
|
||||
check_style=False,
|
||||
check_performance=False,
|
||||
check_documentation=False,
|
||||
min_severity="warning"
|
||||
))
|
||||
balanced: StrictnessProfile = Field(default_factory=lambda: StrictnessProfile(
|
||||
description="Balanced review of common issues",
|
||||
check_security=True,
|
||||
check_bugs=True,
|
||||
check_style=True,
|
||||
check_performance=False,
|
||||
check_documentation=False,
|
||||
min_severity="info"
|
||||
))
|
||||
strict: StrictnessProfile = Field(default_factory=lambda: StrictnessProfile(
|
||||
description="Comprehensive review of all issues",
|
||||
check_security=True,
|
||||
check_bugs=True,
|
||||
check_style=True,
|
||||
check_performance=True,
|
||||
check_documentation=True,
|
||||
min_severity="info"
|
||||
))
|
||||
|
||||
def get_profile(self, name: str) -> StrictnessProfile:
|
||||
return getattr(self, name.lower(), self.balanced)
|
||||
|
||||
|
||||
class HooksConfig(BaseModel):
|
||||
enabled: bool = True
|
||||
fail_on_critical: bool = True
|
||||
allow_bypass: bool = True
|
||||
|
||||
|
||||
class OutputConfig(BaseModel):
|
||||
format: str = "terminal"
|
||||
theme: str = "auto"
|
||||
show_suggestions: bool = True
|
||||
|
||||
|
||||
class LoggingConfig(BaseModel):
|
||||
level: str = "info"
|
||||
log_file: str = ""
|
||||
structured: bool = False
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
llm: LLMConfig = Field(default_factory=LLMConfig)
|
||||
review: ReviewSettings = Field(default_factory=ReviewSettings)
|
||||
languages: Languages = Field(default_factory=Languages)
|
||||
strictness_profiles: StrictnessProfiles = Field(default_factory=StrictnessProfiles)
|
||||
hooks: HooksConfig = Field(default_factory=HooksConfig)
|
||||
output: OutputConfig = Field(default_factory=OutputConfig)
|
||||
logging: LoggingConfig = Field(default_factory=LoggingConfig)
|
||||
|
||||
|
||||
class ConfigLoader:
|
||||
def __init__(self, config_path: str | None = None):
|
||||
self.config_path = config_path
|
||||
self.global_config: Path | None = None
|
||||
self.project_config: Path | None = None
|
||||
|
||||
def find_config_files(self) -> tuple[Path | None, Path | None]:
|
||||
env_config_path = os.environ.get("AICR_CONFIG_PATH")
|
||||
|
||||
if env_config_path:
|
||||
env_path = Path(env_config_path)
|
||||
if env_path.exists():
|
||||
return env_path, None
|
||||
|
||||
self.global_config = Path.home() / ".aicr.yaml"
|
||||
self.project_config = Path.cwd() / ".aicr.yaml"
|
||||
|
||||
if self.project_config.exists():
|
||||
return self.project_config, self.global_config
|
||||
|
||||
if self.global_config.exists():
|
||||
return self.global_config, None
|
||||
|
||||
return None, None
|
||||
|
||||
def load(self) -> Config:
|
||||
config_path, global_path = self.find_config_files()
|
||||
|
||||
config_data: dict[str, Any] = {}
|
||||
|
||||
if global_path and global_path.exists():
|
||||
with open(global_path) as f:
|
||||
global_data = yaml.safe_load(f) or {}
|
||||
config_data.update(global_data)
|
||||
|
||||
if config_path and config_path.exists():
|
||||
with open(config_path) as f:
|
||||
project_data = yaml.safe_load(f) or {}
|
||||
config_data.update(project_data)
|
||||
|
||||
return Config(**config_data)
|
||||
|
||||
def save(self, config: Config, path: Path) -> None:
|
||||
with open(path, "w") as f:
|
||||
yaml.dump(config.model_dump(), f, default_flow_style=False)
|
||||
|
||||
|
||||
def get_config(config_path: str | None = None) -> Config:
|
||||
loader = ConfigLoader(config_path)
|
||||
return loader.load()
|
||||
3
local-ai-commit-reviewer/src/core/__init__.py
Normal file
3
local-ai-commit-reviewer/src/core/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .review_engine import Issue, IssueCategory, IssueSeverity, ReviewEngine, ReviewResult
|
||||
|
||||
__all__ = ["Issue", "IssueCategory", "IssueSeverity", "ReviewEngine", "ReviewResult"]
|
||||
423
local-ai-commit-reviewer/src/core/review_engine.py
Normal file
423
local-ai-commit-reviewer/src/core/review_engine.py
Normal file
@@ -0,0 +1,423 @@
|
||||
import json
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from ..config import Config, StrictnessProfile
|
||||
from ..git import FileChange, GitRepo
|
||||
from ..llm import LLMProvider, OllamaProvider
|
||||
from ..llm.templates import ReviewPromptTemplates
|
||||
|
||||
|
||||
class IssueSeverity(str, Enum):
|
||||
CRITICAL = "critical"
|
||||
WARNING = "warning"
|
||||
INFO = "info"
|
||||
|
||||
|
||||
class IssueCategory(str, Enum):
|
||||
BUG = "bug"
|
||||
SECURITY = "security"
|
||||
STYLE = "style"
|
||||
PERFORMANCE = "performance"
|
||||
DOCUMENTATION = "documentation"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Issue:
|
||||
file: str
|
||||
line: int
|
||||
severity: IssueSeverity
|
||||
category: IssueCategory
|
||||
message: str
|
||||
suggestion: str | None = None
|
||||
raw_line: str | None = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"file": self.file,
|
||||
"line": self.line,
|
||||
"severity": self.severity.value,
|
||||
"category": self.category.value,
|
||||
"message": self.message,
|
||||
"suggestion": self.suggestion,
|
||||
"raw_line": self.raw_line
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "Issue":
|
||||
return cls(
|
||||
file=data["file"],
|
||||
line=data["line"],
|
||||
severity=IssueSeverity(data["severity"]),
|
||||
category=IssueCategory(data["category"]),
|
||||
message=data["message"],
|
||||
suggestion=data.get("suggestion"),
|
||||
raw_line=data.get("raw_line")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReviewSummary:
|
||||
critical_count: int = 0
|
||||
warning_count: int = 0
|
||||
info_count: int = 0
|
||||
files_reviewed: int = 0
|
||||
lines_changed: int = 0
|
||||
overall_assessment: str = ""
|
||||
issues_by_category: dict = field(default_factory=dict)
|
||||
issues_by_file: dict = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"critical_count": self.critical_count,
|
||||
"warning_count": self.warning_count,
|
||||
"info_count": self.info_count,
|
||||
"files_reviewed": self.files_reviewed,
|
||||
"lines_changed": self.lines_changed,
|
||||
"overall_assessment": self.overall_assessment,
|
||||
"issues_by_category": self.issues_by_category,
|
||||
"issues_by_file": self.issues_by_file
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReviewResult:
|
||||
issues: list[Issue] = field(default_factory=list)
|
||||
summary: ReviewSummary = field(default_factory=ReviewSummary)
|
||||
model_used: str = ""
|
||||
tokens_used: int = 0
|
||||
review_mode: str = ""
|
||||
error: str | None = None
|
||||
|
||||
def has_critical_issues(self) -> bool:
|
||||
return any(issue.severity == IssueSeverity.CRITICAL for issue in self.issues)
|
||||
|
||||
def has_issues(self) -> bool:
|
||||
return len(self.issues) > 0
|
||||
|
||||
def get_issues_by_severity(self, severity: IssueSeverity) -> list[Issue]:
|
||||
return [issue for issue in self.issues if issue.severity == severity]
|
||||
|
||||
def get_issues_by_file(self, filename: str) -> list[Issue]:
|
||||
return [issue for issue in self.issues if issue.file == filename]
|
||||
|
||||
def get_issues_by_category(self, category: IssueCategory) -> list[Issue]:
|
||||
return [issue for issue in self.issues if issue.category == category]
|
||||
|
||||
def to_json(self) -> str:
|
||||
return json.dumps({
|
||||
"issues": [issue.to_dict() for issue in self.issues],
|
||||
"summary": self.summary.to_dict(),
|
||||
"model_used": self.model_used,
|
||||
"tokens_used": self.tokens_used,
|
||||
"review_mode": self.review_mode
|
||||
}, indent=2)
|
||||
|
||||
def to_markdown(self) -> str:
|
||||
lines = ["# AI Commit Review Results\n"]
|
||||
|
||||
lines.append("## Summary\n")
|
||||
lines.append(f"- **Files Reviewed**: {self.summary.files_reviewed}")
|
||||
lines.append(f"- **Lines Changed**: {self.summary.lines_changed}")
|
||||
lines.append(f"- **Critical Issues**: {self.summary.critical_count}")
|
||||
lines.append(f"- **Warnings**: {self.summary.warning_count}")
|
||||
lines.append(f"- **Info**: {self.summary.info_count}")
|
||||
lines.append(f"- **Assessment**: {self.summary.overall_assessment}\n")
|
||||
|
||||
if self.issues:
|
||||
lines.append("## Issues Found\n")
|
||||
|
||||
for severity in [IssueSeverity.CRITICAL, IssueSeverity.WARNING, IssueSeverity.INFO]:
|
||||
severity_issues = self.get_issues_by_severity(severity)
|
||||
if severity_issues:
|
||||
lines.append(f"### {severity.value.upper()} ({len(severity_issues)})\n")
|
||||
for issue in severity_issues:
|
||||
lines.append(f"#### {issue.file}:{issue.line}")
|
||||
lines.append(f"- **Category**: {issue.category.value}")
|
||||
lines.append(f"- **Message**: {issue.message}")
|
||||
if issue.suggestion:
|
||||
lines.append(f"- **Suggestion**: {issue.suggestion}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class ReviewEngine:
|
||||
def __init__(
|
||||
self,
|
||||
config: Config | None = None,
|
||||
llm_provider: LLMProvider | None = None
|
||||
):
|
||||
self.config = config or Config()
|
||||
self.llm_provider = llm_provider or OllamaProvider(
|
||||
endpoint=self.config.llm.endpoint,
|
||||
model=self.config.llm.model,
|
||||
timeout=self.config.llm.timeout
|
||||
)
|
||||
self.repo: GitRepo | None = None
|
||||
|
||||
def set_repo(self, path: Path) -> None:
|
||||
self.repo = GitRepo(path)
|
||||
|
||||
def _parse_llm_response(self, response_text: str, files: list[FileChange]) -> ReviewResult:
|
||||
result = ReviewResult()
|
||||
|
||||
try:
|
||||
json_match = re.search(r'\{[\s\S]*\}', response_text)
|
||||
if json_match:
|
||||
json_str = json_match.group()
|
||||
data = json.loads(json_str)
|
||||
|
||||
issues_data = data.get("issues", [])
|
||||
for issue_data in issues_data:
|
||||
try:
|
||||
issue = Issue.from_dict(issue_data)
|
||||
result.issues.append(issue)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
summary_data = data.get("summary", {})
|
||||
result.summary.critical_count = summary_data.get("critical_count", 0)
|
||||
result.summary.warning_count = summary_data.get("warning_count", 0)
|
||||
result.summary.info_count = summary_data.get("info_count", 0)
|
||||
result.summary.overall_assessment = summary_data.get("overall_assessment", "")
|
||||
else:
|
||||
text_issues = self._parse_text_response(response_text, files)
|
||||
result.issues = text_issues
|
||||
|
||||
except json.JSONDecodeError:
|
||||
result.issues = self._parse_text_response(response_text, files)
|
||||
|
||||
return result
|
||||
|
||||
def _parse_text_response(self, response_text: str, files: list[FileChange]) -> list[Issue]: # noqa: ARG002
|
||||
issues = []
|
||||
lines = response_text.split("\n")
|
||||
|
||||
current_file = ""
|
||||
for line in lines:
|
||||
file_match = re.match(r'^\*\*(.+?)\*\*:\s*(\d+)', line)
|
||||
if file_match:
|
||||
current_file = file_match.group(1)
|
||||
line_num = int(file_match.group(2))
|
||||
|
||||
severity = IssueSeverity.WARNING
|
||||
if "critical" in line.lower():
|
||||
severity = IssueSeverity.CRITICAL
|
||||
elif "security" in line.lower():
|
||||
severity = IssueSeverity.CRITICAL
|
||||
category = IssueCategory.SECURITY
|
||||
else:
|
||||
category = IssueCategory.BUG
|
||||
|
||||
message = line
|
||||
suggestion = None
|
||||
if "->" in line:
|
||||
parts = line.split("->")
|
||||
message = parts[0].strip()
|
||||
suggestion = "->".join(parts[1:]).strip()
|
||||
|
||||
issues.append(Issue(
|
||||
file=current_file,
|
||||
line=line_num,
|
||||
severity=severity,
|
||||
category=category,
|
||||
message=message,
|
||||
suggestion=suggestion
|
||||
))
|
||||
|
||||
return issues
|
||||
|
||||
def _get_strictness_profile(self) -> StrictnessProfile:
|
||||
return self.config.strictness_profiles.get_profile(
|
||||
self.config.review.strictness
|
||||
)
|
||||
|
||||
def _filter_issues_by_strictness(self, issues: list[Issue]) -> list[Issue]:
|
||||
profile = self._get_strictness_profile()
|
||||
|
||||
severity_order = {
|
||||
IssueSeverity.CRITICAL: 0,
|
||||
IssueSeverity.WARNING: 1,
|
||||
IssueSeverity.INFO: 2
|
||||
}
|
||||
|
||||
min_severity = profile.min_severity.lower()
|
||||
min_level = 2
|
||||
if min_severity == "critical":
|
||||
min_level = 0
|
||||
elif min_severity == "warning":
|
||||
min_level = 1
|
||||
|
||||
filtered = []
|
||||
for issue in issues:
|
||||
level = severity_order.get(issue.severity, 2)
|
||||
if level <= min_level:
|
||||
if issue.category == IssueCategory.SECURITY and not profile.check_security:
|
||||
continue
|
||||
if issue.category == IssueCategory.BUG and not profile.check_bugs:
|
||||
continue
|
||||
if issue.category == IssueCategory.STYLE and not profile.check_style:
|
||||
continue
|
||||
if issue.category == IssueCategory.PERFORMANCE and not profile.check_performance:
|
||||
continue
|
||||
if issue.category == IssueCategory.DOCUMENTATION and not profile.check_documentation:
|
||||
continue
|
||||
filtered.append(issue)
|
||||
|
||||
return filtered
|
||||
|
||||
def _aggregate_summary(self, issues: list[Issue], files: list[FileChange]) -> ReviewSummary:
|
||||
summary = ReviewSummary()
|
||||
summary.files_reviewed = len(files)
|
||||
summary.lines_changed = sum(
|
||||
sum(1 for line in f.diff.split("\n") if line.startswith("+") and not line.startswith("+++"))
|
||||
for f in files
|
||||
)
|
||||
|
||||
for issue in issues:
|
||||
if issue.severity == IssueSeverity.CRITICAL:
|
||||
summary.critical_count += 1
|
||||
elif issue.severity == IssueSeverity.WARNING:
|
||||
summary.warning_count += 1
|
||||
else:
|
||||
summary.info_count += 1
|
||||
|
||||
if issue.category.value not in summary.issues_by_category:
|
||||
summary.issues_by_category[issue.category.value] = []
|
||||
summary.issues_by_category[issue.category.value].append(issue.file)
|
||||
|
||||
if issue.file not in summary.issues_by_file:
|
||||
summary.issues_by_file[issue.file] = []
|
||||
summary.issues_by_file[issue.file].append(issue.line)
|
||||
|
||||
if summary.critical_count > 0:
|
||||
summary.overall_assessment = "Critical issues found. Review recommended before committing."
|
||||
elif summary.warning_count > 0:
|
||||
summary.overall_assessment = "Warnings found. Consider addressing before committing."
|
||||
elif summary.info_count > 0:
|
||||
summary.overall_assessment = "Minor issues found. Ready for commit with optional fixes."
|
||||
else:
|
||||
summary.overall_assessment = "No issues found. Code is ready for commit."
|
||||
|
||||
return summary
|
||||
|
||||
def review_staged_changes(
|
||||
self,
|
||||
files: list[FileChange] | None = None,
|
||||
strictness: str | None = None,
|
||||
language: str | None = None
|
||||
) -> ReviewResult:
|
||||
if files is None:
|
||||
if self.repo is None:
|
||||
self.repo = GitRepo(Path.cwd())
|
||||
files = self.repo.get_all_staged_changes()
|
||||
|
||||
if not files:
|
||||
return ReviewResult(error="No staged changes found")
|
||||
|
||||
result = ReviewResult()
|
||||
result.review_mode = strictness or self.config.review.strictness
|
||||
|
||||
if strictness is None:
|
||||
strictness = self.config.review.strictness
|
||||
|
||||
all_issues = []
|
||||
|
||||
for file_change in files:
|
||||
if not file_change.diff.strip():
|
||||
continue
|
||||
|
||||
file_language = language
|
||||
if not file_language and self.repo is not None:
|
||||
file_language = self.repo.get_file_language(file_change.filename)
|
||||
|
||||
prompt = ReviewPromptTemplates.get_prompt(
|
||||
diff=file_change.diff,
|
||||
strictness=strictness,
|
||||
language=file_language or ""
|
||||
)
|
||||
|
||||
try:
|
||||
if self.llm_provider.is_available():
|
||||
response = self.llm_provider.generate(
|
||||
prompt,
|
||||
max_tokens=self.config.llm.max_tokens,
|
||||
temperature=self.config.llm.temperature
|
||||
)
|
||||
result.model_used = response.model
|
||||
result.tokens_used += response.tokens_used
|
||||
|
||||
file_result = self._parse_llm_response(response.text, [file_change])
|
||||
all_issues.extend(file_result.issues)
|
||||
else:
|
||||
result.error = "LLM provider is not available"
|
||||
return result
|
||||
except Exception as e:
|
||||
result.error = f"Review failed: {e!s}"
|
||||
return result
|
||||
|
||||
filtered_issues = self._filter_issues_by_strictness(all_issues)
|
||||
max_issues = self.config.review.max_issues_per_file
|
||||
limited_issues = filtered_issues[:max_issues * len(files)]
|
||||
result.issues = limited_issues
|
||||
result.summary = self._aggregate_summary(limited_issues, files)
|
||||
|
||||
return result
|
||||
|
||||
def review_commit(
|
||||
self,
|
||||
sha: str,
|
||||
strictness: str | None = None
|
||||
) -> ReviewResult:
|
||||
if self.repo is None:
|
||||
self.repo = GitRepo(Path.cwd())
|
||||
|
||||
commit_info = self.repo.get_commit_info(sha)
|
||||
if commit_info is None:
|
||||
return ReviewResult(error=f"Commit {sha} not found")
|
||||
|
||||
result = ReviewResult()
|
||||
result.review_mode = strictness or self.config.review.strictness
|
||||
|
||||
if strictness is None:
|
||||
strictness = self.config.review.strictness
|
||||
|
||||
all_issues = []
|
||||
|
||||
for file_change in commit_info.changes:
|
||||
if not file_change.diff.strip():
|
||||
continue
|
||||
|
||||
prompt = ReviewPromptTemplates.get_commit_review_prompt(
|
||||
diff=file_change.diff,
|
||||
commit_message=commit_info.message,
|
||||
strictness=strictness
|
||||
)
|
||||
|
||||
try:
|
||||
if self.llm_provider.is_available():
|
||||
response = self.llm_provider.generate(
|
||||
prompt,
|
||||
max_tokens=self.config.llm.max_tokens,
|
||||
temperature=self.config.llm.temperature
|
||||
)
|
||||
result.model_used = response.model
|
||||
result.tokens_used += response.tokens_used
|
||||
|
||||
file_result = self._parse_llm_response(response.text, [file_change])
|
||||
all_issues.extend(file_result.issues)
|
||||
else:
|
||||
result.error = "LLM provider is not available"
|
||||
return result
|
||||
except Exception as e:
|
||||
result.error = f"Review failed: {e!s}"
|
||||
return result
|
||||
|
||||
filtered_issues = self._filter_issues_by_strictness(all_issues)
|
||||
result.issues = filtered_issues
|
||||
result.summary = self._aggregate_summary(filtered_issues, commit_info.changes)
|
||||
|
||||
return result
|
||||
3
local-ai-commit-reviewer/src/formatters/__init__.py
Normal file
3
local-ai-commit-reviewer/src/formatters/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .formatters import JSONFormatter, MarkdownFormatter, TerminalFormatter, get_formatter
|
||||
|
||||
__all__ = ["JSONFormatter", "MarkdownFormatter", "TerminalFormatter", "get_formatter"]
|
||||
141
local-ai-commit-reviewer/src/formatters/formatters.py
Normal file
141
local-ai-commit-reviewer/src/formatters/formatters.py
Normal file
@@ -0,0 +1,141 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.style import Style
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
|
||||
from ..core import Issue, IssueCategory, IssueSeverity, ReviewResult
|
||||
|
||||
|
||||
class BaseFormatter(ABC):
|
||||
@abstractmethod
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
pass
|
||||
|
||||
|
||||
class TerminalFormatter(BaseFormatter):
|
||||
def __init__(self, theme: str = "auto", show_line_numbers: bool = True):
|
||||
self.console = Console()
|
||||
self.show_line_numbers = show_line_numbers
|
||||
self.use_colors = theme != "dark" if theme == "auto" else theme == "dark"
|
||||
|
||||
def _get_severity_style(self, severity: IssueSeverity) -> Style:
|
||||
styles = {
|
||||
IssueSeverity.CRITICAL: Style(color="red", bold=True),
|
||||
IssueSeverity.WARNING: Style(color="yellow"),
|
||||
IssueSeverity.INFO: Style(color="blue"),
|
||||
}
|
||||
return styles.get(severity, Style())
|
||||
|
||||
def _get_category_icon(self, category: IssueCategory) -> str:
|
||||
icons = {
|
||||
IssueCategory.BUG: "[BUG]",
|
||||
IssueCategory.SECURITY: "[SECURITY]",
|
||||
IssueCategory.STYLE: "[STYLE]",
|
||||
IssueCategory.PERFORMANCE: "[PERF]",
|
||||
IssueCategory.DOCUMENTATION: "[DOC]",
|
||||
}
|
||||
return icons.get(category, "")
|
||||
|
||||
def _format_issue(self, issue: Issue) -> Text:
|
||||
text = Text()
|
||||
text.append(f"{issue.file}:{issue.line} ", style="dim")
|
||||
text.append(f"[{issue.severity.value.upper()}] ", self._get_severity_style(issue.severity))
|
||||
text.append(f"{self._get_category_icon(issue.category)} ")
|
||||
text.append(issue.message)
|
||||
|
||||
if issue.suggestion:
|
||||
text.append("\n Suggestion: ", style="dim")
|
||||
text.append(issue.suggestion)
|
||||
|
||||
return text
|
||||
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
output: list[Panel | Table | str] = []
|
||||
|
||||
if result.error:
|
||||
output.append(Panel(
|
||||
f"[red]Error: {result.error}[/red]",
|
||||
title="Review Failed",
|
||||
expand=False
|
||||
))
|
||||
return "\n".join(str(p) for p in output)
|
||||
|
||||
summary = result.summary
|
||||
|
||||
summary_panel = Panel(
|
||||
f"[bold]Files Reviewed:[/bold] {summary.files_reviewed}\n"
|
||||
f"[bold]Lines Changed:[/bold] {summary.lines_changed}\n\n"
|
||||
f"[red]Critical:[/red] {summary.critical_count} "
|
||||
f"[yellow]Warnings:[/yellow] {summary.warning_count} "
|
||||
f"[blue]Info:[/blue] {summary.info_count}\n\n"
|
||||
f"[bold]Assessment:[/bold] {summary.overall_assessment}",
|
||||
title="Review Summary",
|
||||
expand=False
|
||||
)
|
||||
output.append(summary_panel)
|
||||
|
||||
if result.issues:
|
||||
issues_table = Table(title="Issues Found", show_header=True)
|
||||
issues_table.add_column("File", style="dim")
|
||||
issues_table.add_column("Line", justify="right", style="dim")
|
||||
issues_table.add_column("Severity", width=10)
|
||||
issues_table.add_column("Category", width=12)
|
||||
issues_table.add_column("Message")
|
||||
|
||||
for issue in result.issues:
|
||||
issues_table.add_row(
|
||||
issue.file,
|
||||
str(issue.line),
|
||||
f"[{issue.severity.value.upper()}]",
|
||||
f"[{issue.category.value.upper()}]",
|
||||
issue.message,
|
||||
style=self._get_severity_style(issue.severity)
|
||||
)
|
||||
|
||||
output.append(issues_table)
|
||||
|
||||
suggestions_panel = Panel(
|
||||
"\n".join(
|
||||
f"[bold]{issue.file}:{issue.line}[/bold]\n"
|
||||
f" {issue.message}\n"
|
||||
+ (f" [green]→ {issue.suggestion}[/green]\n" if issue.suggestion else "")
|
||||
for issue in result.issues if issue.suggestion
|
||||
),
|
||||
title="Suggestions",
|
||||
expand=False
|
||||
)
|
||||
output.append(suggestions_panel)
|
||||
|
||||
model_info = Panel(
|
||||
f"[bold]Model:[/bold] {result.model_used}\n"
|
||||
f"[bold]Tokens Used:[/bold] {result.tokens_used}\n"
|
||||
f"[bold]Mode:[/bold] {result.review_mode}",
|
||||
title="Review Info",
|
||||
expand=False
|
||||
)
|
||||
output.append(model_info)
|
||||
|
||||
return "\n".join(str(o) for o in output)
|
||||
|
||||
|
||||
class JSONFormatter(BaseFormatter):
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
return result.to_json()
|
||||
|
||||
|
||||
class MarkdownFormatter(BaseFormatter):
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
return result.to_markdown()
|
||||
|
||||
|
||||
def get_formatter(format_type: str = "terminal", **kwargs) -> BaseFormatter:
|
||||
formatters: dict[str, type[BaseFormatter]] = {
|
||||
"terminal": TerminalFormatter,
|
||||
"json": JSONFormatter,
|
||||
"markdown": MarkdownFormatter,
|
||||
}
|
||||
formatter_class = formatters.get(format_type, TerminalFormatter)
|
||||
return formatter_class(**kwargs) # type: ignore[arg-type]
|
||||
3
local-ai-commit-reviewer/src/git/__init__.py
Normal file
3
local-ai-commit-reviewer/src/git/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .git import FileChange, GitRepo, get_commit_context, get_staged_changes, install_hook
|
||||
|
||||
__all__ = ["FileChange", "GitRepo", "get_commit_context", "get_staged_changes", "install_hook"]
|
||||
278
local-ai-commit-reviewer/src/git/git.py
Normal file
278
local-ai-commit-reviewer/src/git/git.py
Normal file
@@ -0,0 +1,278 @@
|
||||
import subprocess
|
||||
from dataclasses import dataclass
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileChange:
|
||||
filename: str
|
||||
status: str
|
||||
diff: str
|
||||
old_content: str | None = None
|
||||
new_content: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class CommitInfo:
|
||||
sha: str
|
||||
message: str
|
||||
author: str
|
||||
date: str
|
||||
changes: list[FileChange]
|
||||
|
||||
|
||||
class GitRepo:
|
||||
def __init__(self, path: Path | None = None):
|
||||
self.path = path or Path.cwd()
|
||||
self.repo = self._get_repo()
|
||||
|
||||
def _get_repo(self) -> Path | None:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "--show-toplevel"],
|
||||
cwd=self.path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return Path(result.stdout.strip())
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
return None
|
||||
|
||||
def is_valid(self) -> bool:
|
||||
return self.repo is not None and (self.repo / ".git").exists()
|
||||
|
||||
def get_staged_files(self) -> list[str]:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "diff", "--cached", "--name-only"],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout.strip().split("\n") if result.stdout.strip() else []
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
return []
|
||||
|
||||
def get_staged_diff(self, filename: str) -> str:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "diff", "--cached", "--", filename],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
return result.stdout if result.returncode == 0 else ""
|
||||
except subprocess.TimeoutExpired:
|
||||
return ""
|
||||
|
||||
def get_all_staged_changes(self) -> list[FileChange]:
|
||||
files = self.get_staged_files()
|
||||
changes = []
|
||||
for filename in files:
|
||||
diff = self.get_staged_diff(filename)
|
||||
status = self._get_file_status(filename)
|
||||
changes.append(FileChange(
|
||||
filename=filename,
|
||||
status=status,
|
||||
diff=diff
|
||||
))
|
||||
return changes
|
||||
|
||||
def _get_file_status(self, filename: str) -> str:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "diff", "--cached", "--name-status", "--", filename],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
if result.returncode == 0 and result.stdout.strip():
|
||||
return result.stdout.strip().split()[0]
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
return "M"
|
||||
|
||||
def get_commit_info(self, sha: str) -> CommitInfo | None:
|
||||
try:
|
||||
message_result = subprocess.run(
|
||||
["git", "log", "-1", "--format=%B", sha],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
|
||||
author_result = subprocess.run(
|
||||
["git", "log", "-1", "--format=%an|%ae|%ad", "--date=iso", sha],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
|
||||
if message_result.returncode == 0 and author_result.returncode == 0:
|
||||
message = message_result.stdout.strip()
|
||||
author_parts = author_result.stdout.strip().split("|")
|
||||
author = author_parts[0] if author_parts else "Unknown"
|
||||
date = author_parts[2] if len(author_parts) > 2 else "" # noqa: PLR2004
|
||||
|
||||
changes = self._get_commit_changes(sha)
|
||||
|
||||
return CommitInfo(
|
||||
sha=sha,
|
||||
message=message,
|
||||
author=author,
|
||||
date=date,
|
||||
changes=changes
|
||||
)
|
||||
except subprocess.TimeoutExpired:
|
||||
pass
|
||||
return None
|
||||
|
||||
def _get_commit_changes(self, sha: str) -> list[FileChange]:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "show", "--stat", sha],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
files = []
|
||||
if result.returncode == 0:
|
||||
for line in result.stdout.split("\n"):
|
||||
if line.startswith(" ") and "|" in line:
|
||||
filename = line.split("|")[0].strip()
|
||||
diff = self._get_commit_file_diff(sha, filename)
|
||||
files.append(FileChange(
|
||||
filename=filename,
|
||||
status="M",
|
||||
diff=diff
|
||||
))
|
||||
return files
|
||||
except subprocess.TimeoutExpired:
|
||||
return []
|
||||
return []
|
||||
|
||||
def _get_commit_file_diff(self, sha: str, filename: str) -> str:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "show", f"{sha} -- {filename}"],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
check=False
|
||||
)
|
||||
return result.stdout if result.returncode == 0 else ""
|
||||
except subprocess.TimeoutExpired:
|
||||
return ""
|
||||
|
||||
def get_current_branch(self) -> str:
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "rev-parse", "--abbrev-ref", "HEAD"],
|
||||
cwd=self.repo,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
check=False
|
||||
)
|
||||
return result.stdout.strip() if result.returncode == 0 else "unknown"
|
||||
except subprocess.TimeoutExpired:
|
||||
return "unknown"
|
||||
|
||||
def get_file_language(self, filename: str) -> str:
|
||||
ext_map = {
|
||||
".py": "python",
|
||||
".js": "javascript",
|
||||
".ts": "typescript",
|
||||
".go": "go",
|
||||
".rs": "rust",
|
||||
".java": "java",
|
||||
".c": "c",
|
||||
".cpp": "cpp",
|
||||
".h": "c",
|
||||
".hpp": "cpp",
|
||||
".jsx": "javascript",
|
||||
".tsx": "typescript",
|
||||
}
|
||||
ext = Path(filename).suffix.lower()
|
||||
return ext_map.get(ext, "unknown")
|
||||
|
||||
def get_diff_stats(self, diff: str) -> tuple[int, int]:
|
||||
additions = 0
|
||||
deletions = 0
|
||||
for line in diff.split("\n"):
|
||||
if line.startswith("+") and not line.startswith("+++"):
|
||||
additions += 1
|
||||
elif line.startswith("-") and not line.startswith("---"):
|
||||
deletions += 1
|
||||
return additions, deletions
|
||||
|
||||
|
||||
def get_staged_changes(path: Path | None = None) -> list[FileChange]:
|
||||
repo = GitRepo(path)
|
||||
return repo.get_all_staged_changes()
|
||||
|
||||
|
||||
def get_commit_context(sha: str, path: Path | None = None) -> CommitInfo | None:
|
||||
repo = GitRepo(path)
|
||||
return repo.get_commit_info(sha)
|
||||
|
||||
|
||||
def install_hook(repo_path: Path, hook_type: str = "pre-commit", content: str | None = None) -> bool:
|
||||
hooks_dir = repo_path / ".git" / "hooks"
|
||||
hooks_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
hook_path = hooks_dir / hook_type
|
||||
|
||||
if hook_path.exists():
|
||||
backup_path = hooks_dir / f"{hook_type}.backup"
|
||||
hook_path.rename(backup_path)
|
||||
|
||||
if content is None:
|
||||
content = _get_default_hook_script()
|
||||
|
||||
try:
|
||||
hook_path.write_text(content)
|
||||
hook_path.chmod(0o755)
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def _get_default_hook_script() -> str:
|
||||
return """#!/bin/bash
|
||||
# Local AI Commit Reviewer - Pre-commit Hook
|
||||
# This hook runs code review on staged changes before committing
|
||||
|
||||
set -e
|
||||
|
||||
# Check if running with --no-verify
|
||||
if [ "$1" = "--no-verify" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Run the AI commit reviewer
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
python -m aicr review --hook || exit 1
|
||||
"""
|
||||
3
local-ai-commit-reviewer/src/hooks/__init__.py
Normal file
3
local-ai-commit-reviewer/src/hooks/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .hooks import check_hook_installed, install_pre_commit_hook
|
||||
|
||||
__all__ = ["check_hook_installed", "install_pre_commit_hook"]
|
||||
69
local-ai-commit-reviewer/src/hooks/hooks.py
Normal file
69
local-ai-commit-reviewer/src/hooks/hooks.py
Normal file
@@ -0,0 +1,69 @@
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def get_hook_script() -> str:
|
||||
return """#!/bin/bash
|
||||
# Local AI Commit Reviewer - Pre-commit Hook
|
||||
# Automatically reviews staged changes before committing
|
||||
|
||||
set -e
|
||||
|
||||
# Allow bypass with --no-verify
|
||||
if [ "$1" = "--no-verify" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Change to repository root
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
|
||||
# Run the AI commit reviewer
|
||||
python -m aicr review --hook --strictness balanced || exit 1
|
||||
"""
|
||||
|
||||
|
||||
def install_pre_commit_hook(
|
||||
repo_path: Path,
|
||||
content: str | None = None,
|
||||
force: bool = False
|
||||
) -> bool:
|
||||
hooks_dir = repo_path / ".git" / "hooks"
|
||||
hooks_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
hook_path = hooks_dir / "pre-commit"
|
||||
|
||||
if hook_path.exists() and not force:
|
||||
return False
|
||||
|
||||
if content is None:
|
||||
content = get_hook_script()
|
||||
|
||||
try:
|
||||
hook_path.write_text(content)
|
||||
hook_path.chmod(0o755)
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
|
||||
def check_hook_installed(repo_path: Path) -> bool:
|
||||
hook_path = repo_path / ".git" / "hooks" / "pre-commit"
|
||||
if not hook_path.exists():
|
||||
return False
|
||||
|
||||
content = hook_path.read_text()
|
||||
return "aicr" in content or "local-ai-commit-reviewer" in content
|
||||
|
||||
|
||||
def uninstall_hook(repo_path: Path) -> bool:
|
||||
hook_path = repo_path / ".git" / "hooks" / "pre-commit"
|
||||
if not hook_path.exists():
|
||||
return True
|
||||
|
||||
try:
|
||||
hook_path.unlink()
|
||||
return True
|
||||
except OSError:
|
||||
return False
|
||||
4
local-ai-commit-reviewer/src/llm/__init__.py
Normal file
4
local-ai-commit-reviewer/src/llm/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .ollama import OllamaProvider
|
||||
from .provider import LLMProvider
|
||||
|
||||
__all__ = ["LLMProvider", "OllamaProvider"]
|
||||
143
local-ai-commit-reviewer/src/llm/ollama.py
Normal file
143
local-ai-commit-reviewer/src/llm/ollama.py
Normal file
@@ -0,0 +1,143 @@
|
||||
import asyncio
|
||||
from collections.abc import AsyncIterator
|
||||
from datetime import datetime
|
||||
|
||||
import ollama
|
||||
|
||||
from .provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
class OllamaProvider(LLMProvider):
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str = "http://localhost:11434",
|
||||
model: str = "codellama",
|
||||
timeout: int = 120
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self._client: ollama.Client | None = None
|
||||
|
||||
@property
|
||||
def client(self) -> ollama.Client:
|
||||
if self._client is None:
|
||||
self._client = ollama.Client(host=self.endpoint)
|
||||
return self._client
|
||||
|
||||
def is_available(self) -> bool:
|
||||
try:
|
||||
self.health_check()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def health_check(self) -> bool:
|
||||
try:
|
||||
response = self.client.ps()
|
||||
return response is not None
|
||||
except Exception as e:
|
||||
raise ConnectionError(f"Ollama health check failed: {e}") from None
|
||||
|
||||
def generate(self, prompt: str, **kwargs) -> LLMResponse:
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful code review assistant. Provide concise, constructive feedback on code changes."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": temperature,
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
text=response["message"]["content"],
|
||||
model=self.model,
|
||||
tokens_used=response.get("eval_count", 0),
|
||||
finish_reason=response.get("done_reason", "stop")
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama generation failed: {e}") from None
|
||||
|
||||
async def agenerate(self, prompt: str, **kwargs) -> LLMResponse:
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
response = await asyncio.to_thread(
|
||||
self.client.chat,
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful code review assistant. Provide concise, constructive feedback on code changes."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": temperature,
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
text=response["message"]["content"],
|
||||
model=self.model,
|
||||
tokens_used=response.get("eval_count", 0),
|
||||
finish_reason=response.get("done_reason", "stop")
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama async generation failed: {e}") from None
|
||||
|
||||
async def stream_generate(self, prompt: str, **kwargs) -> AsyncIterator[str]: # type: ignore[misc]
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful code review assistant. Provide concise, constructive feedback on code changes."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": temperature,
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if "message" in chunk and "content" in chunk["message"]:
|
||||
yield chunk["message"]["content"]
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama streaming failed: {e}") from None
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
try:
|
||||
response = self.client.ps()
|
||||
models = []
|
||||
if response and "models" in response:
|
||||
for model in response["models"]:
|
||||
models.append(ModelInfo(
|
||||
name=model.get("name", "unknown"),
|
||||
size=model.get("size", "unknown"),
|
||||
modified=model.get("modified", datetime.now().isoformat()),
|
||||
digest=model.get("digest", "")
|
||||
))
|
||||
return models
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def pull_model(self, model_name: str) -> bool:
|
||||
try:
|
||||
for _ in self.client.pull(model_name, stream=True):
|
||||
pass
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
45
local-ai-commit-reviewer/src/llm/provider.py
Normal file
45
local-ai-commit-reviewer/src/llm/provider.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import AsyncIterator
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class LLMResponse:
|
||||
text: str
|
||||
model: str
|
||||
tokens_used: int
|
||||
finish_reason: str
|
||||
|
||||
|
||||
@dataclass
|
||||
class ModelInfo:
|
||||
name: str
|
||||
size: str
|
||||
modified: str
|
||||
digest: str
|
||||
|
||||
|
||||
class LLMProvider(ABC):
|
||||
@abstractmethod
|
||||
def is_available(self) -> bool:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def generate(self, prompt: str, **kwargs) -> LLMResponse:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
async def agenerate(self, prompt: str, **kwargs) -> LLMResponse:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def stream_generate(self, prompt: str, **kwargs) -> AsyncIterator[str]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def health_check(self) -> bool:
|
||||
pass
|
||||
133
local-ai-commit-reviewer/src/llm/templates.py
Normal file
133
local-ai-commit-reviewer/src/llm/templates.py
Normal file
@@ -0,0 +1,133 @@
|
||||
class ReviewPromptTemplates:
|
||||
base_prompt: str = """You are an expert code reviewer analyzing staged changes in a Git repository.
|
||||
|
||||
Review the following code changes and provide detailed feedback on:
|
||||
1. Potential bugs and security vulnerabilities
|
||||
2. Code style and best practices violations
|
||||
3. Performance concerns
|
||||
4. Documentation issues
|
||||
5. Suggestions for improvement
|
||||
|
||||
Respond in the following JSON format:
|
||||
{{
|
||||
"issues": [
|
||||
{{
|
||||
"file": "filename",
|
||||
"line": line_number,
|
||||
"severity": "critical|warning|info",
|
||||
"category": "bug|security|style|performance|documentation",
|
||||
"message": "description of the issue",
|
||||
"suggestion": "suggested fix (if applicable)"
|
||||
}}
|
||||
],
|
||||
"summary": {{
|
||||
"critical_count": number,
|
||||
"warning_count": number,
|
||||
"info_count": number,
|
||||
"overall_assessment": "brief summary"
|
||||
}}
|
||||
}}
|
||||
|
||||
Only include issues that match the strictness level: {strictness}
|
||||
|
||||
{strictness_settings}
|
||||
|
||||
Review the following diff:
|
||||
```
|
||||
{diff}
|
||||
```
|
||||
"""
|
||||
|
||||
permissive_settings: str = """Strictness: PERMISSIVE
|
||||
- Only report critical security issues
|
||||
- Only report definite bugs (not potential issues)
|
||||
- Ignore style and formatting issues
|
||||
- Ignore performance concerns
|
||||
- Ignore documentation issues
|
||||
"""
|
||||
|
||||
balanced_settings: str = """Strictness: BALANCED
|
||||
- Report all security issues
|
||||
- Report all definite bugs and potential bugs
|
||||
- Report major style violations
|
||||
- Ignore minor performance concerns
|
||||
- Ignore documentation issues unless critical
|
||||
"""
|
||||
|
||||
strict_settings: str = """Strictness: STRICT
|
||||
- Report all security issues (even minor)
|
||||
- Report all bugs (definite and potential)
|
||||
- Report all style violations
|
||||
- Report performance concerns
|
||||
- Report documentation issues
|
||||
- Suggest specific improvements
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
def get_prompt(cls, diff: str, strictness: str = "balanced", language: str = "unknown") -> str:
|
||||
settings_map = {
|
||||
"permissive": cls.permissive_settings,
|
||||
"balanced": cls.balanced_settings,
|
||||
"strict": cls.strict_settings
|
||||
}
|
||||
|
||||
settings = settings_map.get(strictness.lower(), cls.balanced_settings)
|
||||
|
||||
base = cls.base_prompt.format(
|
||||
strictness=strictness.upper(),
|
||||
strictness_settings=settings,
|
||||
diff=diff
|
||||
)
|
||||
|
||||
if language != "unknown":
|
||||
base += f"\n\nNote: This code is in {language}. Apply {language}-specific best practices."
|
||||
|
||||
return base
|
||||
|
||||
@classmethod
|
||||
def get_commit_review_prompt(cls, diff: str, commit_message: str, strictness: str = "balanced") -> str:
|
||||
prompt = f"""Review the following commit with message: "{commit_message}"
|
||||
|
||||
Analyze whether the changes align with the commit message and provide feedback.
|
||||
|
||||
"""
|
||||
prompt += cls.get_prompt(diff, strictness)
|
||||
return prompt
|
||||
|
||||
@classmethod
|
||||
def get_security_review_prompt(cls, diff: str) -> str:
|
||||
template = """You are a security expert reviewing code changes for vulnerabilities.
|
||||
|
||||
Focus specifically on:
|
||||
1. Injection vulnerabilities (SQL, command, code injection)
|
||||
2. Authentication and authorization issues
|
||||
3. Sensitive data exposure
|
||||
4. Cryptographic weaknesses
|
||||
5. Path traversal and file inclusion
|
||||
6. Dependency security issues
|
||||
|
||||
Provide findings in JSON format:
|
||||
```
|
||||
{{
|
||||
"vulnerabilities": [
|
||||
{{
|
||||
"file": "filename",
|
||||
"line": line_number,
|
||||
"severity": "critical|high|medium|low",
|
||||
"type": "vulnerability type",
|
||||
"description": "detailed description",
|
||||
"exploit_scenario": "how it could be exploited",
|
||||
"fix": "recommended fix"
|
||||
}}
|
||||
],
|
||||
"secure_patterns": ["list of good security practices observed"],
|
||||
"concerns": ["list of potential security concerns"]
|
||||
}}
|
||||
```
|
||||
|
||||
Review the following diff:
|
||||
```
|
||||
{diff}
|
||||
```
|
||||
"""
|
||||
return template.format(diff=diff)
|
||||
3
local-ai-commit-reviewer/src/utils/__init__.py
Normal file
3
local-ai-commit-reviewer/src/utils/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .utils import get_file_language, sanitize_output, setup_logging
|
||||
|
||||
__all__ = ["get_file_language", "sanitize_output", "setup_logging"]
|
||||
66
local-ai-commit-reviewer/src/utils/utils.py
Normal file
66
local-ai-commit-reviewer/src/utils/utils.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import logging
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def setup_logging(level: str = "info", log_file: str | None = None) -> logging.Logger:
|
||||
logger = logging.getLogger("aicr")
|
||||
logger.setLevel(getattr(logging, level.upper(), logging.INFO))
|
||||
|
||||
formatter = logging.Formatter(
|
||||
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
)
|
||||
|
||||
stream_handler = logging.StreamHandler(sys.stdout)
|
||||
stream_handler.setFormatter(formatter)
|
||||
logger.addHandler(stream_handler)
|
||||
|
||||
if log_file:
|
||||
file_handler = logging.FileHandler(log_file)
|
||||
file_handler.setFormatter(formatter)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
def get_file_language(filename: str) -> str:
|
||||
ext_map = {
|
||||
".py": "python",
|
||||
".js": "javascript",
|
||||
".ts": "typescript",
|
||||
".go": "go",
|
||||
".rs": "rust",
|
||||
".java": "java",
|
||||
".c": "c",
|
||||
".cpp": "cpp",
|
||||
".h": "c",
|
||||
".hpp": "cpp",
|
||||
".jsx": "javascript",
|
||||
".tsx": "typescript",
|
||||
".rb": "ruby",
|
||||
".php": "php",
|
||||
".swift": "swift",
|
||||
".kt": "kotlin",
|
||||
".scala": "scala",
|
||||
}
|
||||
ext = Path(filename).suffix.lower()
|
||||
return ext_map.get(ext, "unknown")
|
||||
|
||||
|
||||
def sanitize_output(text: str) -> str:
|
||||
return text.strip()
|
||||
|
||||
|
||||
def truncate_text(text: str, max_length: int = 2000, suffix: str = "...") -> str:
|
||||
if len(text) <= max_length:
|
||||
return text
|
||||
return text[:max_length - len(suffix)] + suffix
|
||||
|
||||
|
||||
def format_file_size(size: float) -> str:
|
||||
KB_SIZE = 1024
|
||||
for unit in ["B", "KB", "MB", "GB"]:
|
||||
if size < KB_SIZE:
|
||||
return f"{size:.1f}{unit}"
|
||||
size /= 1024
|
||||
return f"{size:.1f}TB"
|
||||
126
local-ai-commit-reviewer/tests/conftest.py
Normal file
126
local-ai-commit-reviewer/tests/conftest.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from src.config import Config
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_git_repo():
|
||||
"""Create a temporary Git repository for testing."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
repo_path = Path(tmpdir)
|
||||
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True, check=False)
|
||||
yield repo_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_python_file(temp_git_repo):
|
||||
"""Create a sample Python file in the temp repo."""
|
||||
test_file = temp_git_repo / "test.py"
|
||||
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
|
||||
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_js_file(temp_git_repo):
|
||||
"""Create a sample JavaScript file."""
|
||||
test_file = temp_git_repo / "test.js"
|
||||
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
|
||||
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_diff():
|
||||
"""Return a sample diff for testing."""
|
||||
return """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
def hello():
|
||||
+ print("hello")
|
||||
return True
|
||||
- return False
|
||||
"""
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config():
|
||||
"""Return a default Config instance."""
|
||||
return Config()
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
"""Mock LLM provider for testing."""
|
||||
|
||||
def __init__(self, available: bool = True, response_text: str | None = None):
|
||||
self._available = available
|
||||
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text=self._response_text,
|
||||
model="mock-model",
|
||||
tokens_used=50,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return [
|
||||
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
|
||||
]
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_provider():
|
||||
"""Return a mock LLM provider."""
|
||||
return MockLLMProvider(available=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_unavailable():
|
||||
"""Return a mock LLM provider that's not available."""
|
||||
return MockLLMProvider(available=False)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_with_issues():
|
||||
"""Return a mock LLM provider that returns issues."""
|
||||
response = '''{
|
||||
"issues": [
|
||||
{
|
||||
"file": "test.py",
|
||||
"line": 2,
|
||||
"severity": "warning",
|
||||
"category": "style",
|
||||
"message": "Missing docstring for function",
|
||||
"suggestion": "Add a docstring above the function definition"
|
||||
}
|
||||
],
|
||||
"summary": {
|
||||
"critical_count": 0,
|
||||
"warning_count": 1,
|
||||
"info_count": 0,
|
||||
"overall_assessment": "Minor style issues found"
|
||||
}
|
||||
}'''
|
||||
return MockLLMProvider(available=True, response_text=response)
|
||||
126
local-ai-commit-reviewer/tests/fixtures/sample_repo.py
vendored
Normal file
126
local-ai-commit-reviewer/tests/fixtures/sample_repo.py
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from src.config import Config
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_git_repo():
|
||||
"""Create a temporary Git repository for testing."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
repo_path = Path(tmpdir)
|
||||
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True, check=False)
|
||||
yield repo_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_python_file(temp_git_repo):
|
||||
"""Create a sample Python file in the temp repo."""
|
||||
test_file = temp_git_repo / "test.py"
|
||||
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
|
||||
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_js_file(temp_git_repo):
|
||||
"""Create a sample JavaScript file."""
|
||||
test_file = temp_git_repo / "test.js"
|
||||
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
|
||||
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_diff():
|
||||
"""Return a sample diff for testing."""
|
||||
return """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
def hello():
|
||||
+ print("hello")
|
||||
return True
|
||||
- return False
|
||||
"""
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config():
|
||||
"""Return a default Config instance."""
|
||||
return Config()
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
"""Mock LLM provider for testing."""
|
||||
|
||||
def __init__(self, available: bool = True, response_text: str | None = None):
|
||||
self._available = available
|
||||
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text=self._response_text,
|
||||
model="mock-model",
|
||||
tokens_used=50,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return [
|
||||
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
|
||||
]
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_provider():
|
||||
"""Return a mock LLM provider."""
|
||||
return MockLLMProvider(available=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_unavailable():
|
||||
"""Return a mock LLM provider that's not available."""
|
||||
return MockLLMProvider(available=False)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_with_issues():
|
||||
"""Return a mock LLM provider that returns issues."""
|
||||
response = '''{
|
||||
"issues": [
|
||||
{
|
||||
"file": "test.py",
|
||||
"line": 2,
|
||||
"severity": "warning",
|
||||
"category": "style",
|
||||
"message": "Missing docstring for function",
|
||||
"suggestion": "Add a docstring above the function definition"
|
||||
}
|
||||
],
|
||||
"summary": {
|
||||
"critical_count": 0,
|
||||
"warning_count": 1,
|
||||
"info_count": 0,
|
||||
"overall_assessment": "Minor style issues found"
|
||||
}
|
||||
}'''
|
||||
return MockLLMProvider(available=True, response_text=response)
|
||||
@@ -0,0 +1,46 @@
|
||||
from fixtures.sample_repo import MockLLMProvider
|
||||
|
||||
|
||||
class TestReviewWorkflow:
|
||||
def test_review_with_no_staged_changes(self, temp_git_repo, mock_config):
|
||||
from src.core.review_engine import ReviewEngine # noqa: PLC0415
|
||||
|
||||
engine = ReviewEngine(config=mock_config, llm_provider=MockLLMProvider())
|
||||
engine.set_repo(temp_git_repo)
|
||||
result = engine.review_staged_changes([])
|
||||
assert result.error == "No staged changes found"
|
||||
|
||||
def test_review_with_staged_file(self, temp_git_repo, mock_config, request):
|
||||
from src.core.review_engine import ReviewEngine # noqa: PLC0415
|
||||
from src.git import get_staged_changes # noqa: PLC0415
|
||||
|
||||
request.getfixturevalue("sample_python_file")
|
||||
changes = get_staged_changes(temp_git_repo)
|
||||
|
||||
engine = ReviewEngine(config=mock_config, llm_provider=MockLLMProvider())
|
||||
engine.set_repo(temp_git_repo)
|
||||
result = engine.review_staged_changes(changes)
|
||||
|
||||
assert result.review_mode == "balanced"
|
||||
assert result.error is None or len(result.issues) >= 0
|
||||
|
||||
|
||||
class TestHookInstallation:
|
||||
def test_install_hook(self, temp_git_repo):
|
||||
from src.hooks import install_pre_commit_hook # noqa: PLC0415
|
||||
|
||||
result = install_pre_commit_hook(temp_git_repo)
|
||||
assert result is True
|
||||
|
||||
hook_path = temp_git_repo / ".git" / "hooks" / "pre-commit"
|
||||
assert hook_path.exists()
|
||||
|
||||
content = hook_path.read_text()
|
||||
assert "aicr" in content or "review" in content
|
||||
|
||||
def test_check_hook_installed(self, temp_git_repo):
|
||||
from src.hooks import check_hook_installed, install_pre_commit_hook # noqa: PLC0415
|
||||
|
||||
assert check_hook_installed(temp_git_repo) is False
|
||||
install_pre_commit_hook(temp_git_repo)
|
||||
assert check_hook_installed(temp_git_repo) is True
|
||||
51
local-ai-commit-reviewer/tests/unit/test_config.py
Normal file
51
local-ai-commit-reviewer/tests/unit/test_config.py
Normal file
@@ -0,0 +1,51 @@
|
||||
|
||||
from src.config import Config, ConfigLoader
|
||||
|
||||
|
||||
class TestConfig:
|
||||
def test_default_config(self):
|
||||
config = Config()
|
||||
assert config.llm.endpoint == "http://localhost:11434"
|
||||
assert config.llm.model == "codellama"
|
||||
assert config.review.strictness == "balanced"
|
||||
assert config.hooks.enabled is True
|
||||
|
||||
def test_config_from_dict(self):
|
||||
data = {
|
||||
"llm": {
|
||||
"endpoint": "http://custom:9000",
|
||||
"model": "custom-model"
|
||||
},
|
||||
"review": {
|
||||
"strictness": "strict"
|
||||
}
|
||||
}
|
||||
config = Config(**data)
|
||||
assert config.llm.endpoint == "http://custom:9000"
|
||||
assert config.llm.model == "custom-model"
|
||||
assert config.review.strictness == "strict"
|
||||
|
||||
def test_language_config(self):
|
||||
config = Config()
|
||||
py_config = config.languages.get_language_config("python")
|
||||
assert py_config is not None
|
||||
assert py_config.enabled is True
|
||||
|
||||
def test_strictness_profiles(self):
|
||||
config = Config()
|
||||
permissive = config.strictness_profiles.get_profile("permissive")
|
||||
assert permissive.check_style is False
|
||||
strict = config.strictness_profiles.get_profile("strict")
|
||||
assert strict.check_performance is True
|
||||
|
||||
|
||||
class TestConfigLoader:
|
||||
def test_load_default_config(self):
|
||||
loader = ConfigLoader()
|
||||
config = loader.load()
|
||||
assert isinstance(config, Config)
|
||||
|
||||
def test_find_config_files_nonexistent(self):
|
||||
loader = ConfigLoader("/nonexistent/path.yaml")
|
||||
path, _global_path = loader.find_config_files()
|
||||
assert path is None
|
||||
40
local-ai-commit-reviewer/tests/unit/test_git.py
Normal file
40
local-ai-commit-reviewer/tests/unit/test_git.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from pathlib import Path
|
||||
|
||||
from src.git.git import FileChange, GitRepo
|
||||
|
||||
|
||||
class TestGitRepo:
|
||||
def test_get_file_language(self):
|
||||
repo = GitRepo(Path.cwd())
|
||||
assert repo.get_file_language("test.py") == "python"
|
||||
assert repo.get_file_language("test.js") == "javascript"
|
||||
assert repo.get_file_language("test.go") == "go"
|
||||
assert repo.get_file_language("test.rs") == "rust"
|
||||
assert repo.get_file_language("test.unknown") == "unknown"
|
||||
|
||||
def test_get_diff_stats(self):
|
||||
repo = GitRepo(Path.cwd())
|
||||
diff = """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
def hello():
|
||||
+ print("hello")
|
||||
return True
|
||||
- return False
|
||||
"""
|
||||
additions, deletions = repo.get_diff_stats(diff)
|
||||
assert additions == 1
|
||||
assert deletions == 1
|
||||
|
||||
|
||||
class TestFileChange:
|
||||
def test_file_change_creation(self):
|
||||
change = FileChange(
|
||||
filename="test.py",
|
||||
status="M",
|
||||
diff="diff content"
|
||||
)
|
||||
assert change.filename == "test.py"
|
||||
assert change.status == "M"
|
||||
assert change.diff == "diff content"
|
||||
52
local-ai-commit-reviewer/tests/unit/test_llm.py
Normal file
52
local-ai-commit-reviewer/tests/unit/test_llm.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
def __init__(self, available: bool = True):
|
||||
self._available = available
|
||||
self._models = []
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text="Mock review response",
|
||||
model="mock-model",
|
||||
tokens_used=100,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
class TestLLMProvider:
|
||||
def test_mock_provider_is_available(self):
|
||||
provider = MockLLMProvider(available=True)
|
||||
assert provider.is_available() is True
|
||||
|
||||
def test_mock_provider_not_available(self):
|
||||
provider = MockLLMProvider(available=False)
|
||||
assert provider.is_available() is False
|
||||
|
||||
def test_mock_generate(self):
|
||||
provider = MockLLMProvider()
|
||||
response = provider.generate("test prompt")
|
||||
assert isinstance(response, LLMResponse)
|
||||
assert response.text == "Mock review response"
|
||||
assert response.model == "mock-model"
|
||||
|
||||
def test_mock_list_models(self):
|
||||
provider = MockLLMProvider()
|
||||
models = provider.list_models()
|
||||
assert isinstance(models, list)
|
||||
76
local-ai-commit-reviewer/tests/unit/test_review.py
Normal file
76
local-ai-commit-reviewer/tests/unit/test_review.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from src.core.review_engine import Issue, IssueCategory, IssueSeverity, ReviewResult, ReviewSummary
|
||||
|
||||
|
||||
class TestIssue:
|
||||
def test_issue_creation(self):
|
||||
issue = Issue(
|
||||
file="test.py",
|
||||
line=10,
|
||||
severity=IssueSeverity.WARNING,
|
||||
category=IssueCategory.STYLE,
|
||||
message="Missing docstring",
|
||||
suggestion="Add a docstring"
|
||||
)
|
||||
assert issue.file == "test.py"
|
||||
assert issue.line == 10 # noqa: PLR2004
|
||||
assert issue.severity == IssueSeverity.WARNING
|
||||
|
||||
def test_issue_to_dict(self):
|
||||
issue = Issue(
|
||||
file="test.py",
|
||||
line=10,
|
||||
severity=IssueSeverity.CRITICAL,
|
||||
category=IssueCategory.BUG,
|
||||
message="Potential bug"
|
||||
)
|
||||
data = issue.to_dict()
|
||||
assert data["file"] == "test.py"
|
||||
assert data["severity"] == "critical"
|
||||
assert data["category"] == "bug"
|
||||
|
||||
|
||||
class TestReviewResult:
|
||||
def test_review_result_no_issues(self):
|
||||
result = ReviewResult()
|
||||
assert result.has_issues() is False
|
||||
assert result.has_critical_issues() is False
|
||||
|
||||
def test_review_result_with_issues(self):
|
||||
result = ReviewResult()
|
||||
result.issues = [
|
||||
Issue(
|
||||
file="test.py",
|
||||
line=1,
|
||||
severity=IssueSeverity.CRITICAL,
|
||||
category=IssueCategory.SECURITY,
|
||||
message="SQL injection"
|
||||
)
|
||||
]
|
||||
assert result.has_issues() is True
|
||||
assert result.has_critical_issues() is True
|
||||
|
||||
def test_get_issues_by_severity(self):
|
||||
result = ReviewResult()
|
||||
result.issues = [
|
||||
Issue(file="a.py", line=1, severity=IssueSeverity.CRITICAL, category=IssueCategory.BUG, message="Bug1"),
|
||||
Issue(file="b.py", line=2, severity=IssueSeverity.WARNING, category=IssueCategory.STYLE, message="Style1"),
|
||||
Issue(file="c.py", line=3, severity=IssueSeverity.INFO, category=IssueCategory.DOCUMENTATION, message="Doc1"),
|
||||
]
|
||||
critical = result.get_issues_by_severity(IssueSeverity.CRITICAL)
|
||||
assert len(critical) == 1
|
||||
assert critical[0].file == "a.py"
|
||||
|
||||
|
||||
class TestReviewSummary:
|
||||
def test_review_summary_aggregation(self):
|
||||
summary = ReviewSummary()
|
||||
summary.files_reviewed = 5
|
||||
summary.lines_changed = 100
|
||||
summary.critical_count = 2
|
||||
summary.warning_count = 5
|
||||
summary.info_count = 10
|
||||
summary.overall_assessment = "Good"
|
||||
|
||||
data = summary.to_dict()
|
||||
assert data["files_reviewed"] == 5 # noqa: PLR2004
|
||||
assert data["critical_count"] == 2 # noqa: PLR2004
|
||||
Reference in New Issue
Block a user