Compare commits
56 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| cb2f8e7afd | |||
| fbdd44f79d | |||
| 6295a12675 | |||
| 4835e112e2 | |||
| dd9fc7b6e4 | |||
| 97aec492c4 | |||
| e3d2b5a463 | |||
| 32fcdc3566 | |||
| 85f2f93dd5 | |||
| fd222ea2e6 | |||
| 39b33a6b5e | |||
| ce659e4525 | |||
| 4d55364455 | |||
| 5a9da2d197 | |||
| da674d9aef | |||
| 65f5dc6200 | |||
| 2d29b09a4c | |||
| 48d52b5f7b | |||
| 87e9478eb2 | |||
| 805473e7b4 | |||
| 2ea0a2c5b9 | |||
| db65e4b5fa | |||
| 4aa97162b6 | |||
| 2ac98ae685 | |||
| 188bd8a9cf | |||
| a490bc51d1 | |||
| 5dea7dca14 | |||
| f9cacd1b76 | |||
| ffe1b2fd80 | |||
| 23f6326d6e | |||
| e0476f7449 | |||
| fb76194c2a | |||
| 0ca76fed49 | |||
| d345f3af86 | |||
| 58eb45532a | |||
| 2476a121b9 | |||
| 0d143c5923 | |||
| 3d7107e79d | |||
| 59d1782c17 | |||
| da1a064831 | |||
| 07f2a768f8 | |||
| 6fee113a88 | |||
| 7df0b1432f | |||
| b9c0930c38 | |||
| 7d38451b36 | |||
| 89b7f0ba8e | |||
| 88d894cf75 | |||
| 9abbcdc72b | |||
| 3ef5fb4864 | |||
| 9d96659a9d | |||
| dffe9b3bc6 | |||
| ce63ec78ea | |||
| 58a45aa7bf | |||
| 60e6ba857c | |||
| 82ed484841 | |||
| a20a3c2b27 |
@@ -1,51 +1,34 @@
|
||||
name: CI
|
||||
name: CI/CD
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, master]
|
||||
branches: [ main, master ]
|
||||
pull_request:
|
||||
branches: [main, master]
|
||||
branches: [ main, master ]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
lint-and-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
python-version: '3.10'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -e ".[dev]"
|
||||
|
||||
- name: Run tests
|
||||
- name: Run Ruff linting
|
||||
run: ruff check src tests
|
||||
|
||||
- name: Run MyPy type checking
|
||||
run: mypy src
|
||||
|
||||
- name: Run pytest
|
||||
run: pytest tests/ -v --tb=short
|
||||
|
||||
- name: Run linting
|
||||
run: ruff check src/ tests/
|
||||
|
||||
- name: Run type checking
|
||||
run: mypy src/
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
needs: test
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Build package
|
||||
run: |
|
||||
pip install build
|
||||
python -m build
|
||||
|
||||
- name: Verify build
|
||||
run: |
|
||||
pip install dist/*.whl
|
||||
aicr --help
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
# Local AI Commit Reviewer CLI
|
||||
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
[](https://www.python.org/downloads/)
|
||||
[](https://7000pct.gitea.bloupla.net/7000pctAUTO/local-ai-commit-reviewer/actions)
|
||||
|
||||
A CLI tool that reviews Git commits locally using lightweight LLMs (Ollama/MLX) before pushing. It analyzes staged changes, provides inline suggestions, and integrates with Git workflows while preserving code privacy through local processing.
|
||||
|
||||
## Quick Start
|
||||
@@ -33,7 +29,7 @@ pip install local-ai-commit-reviewer
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
git clone https://7000pct.gitea.bloupla.net/7000pctAUTO/local-ai-commit-reviewer.git
|
||||
git clone https://github.com/yourusername/local-ai-commit-reviewer.git
|
||||
cd local-ai-commit-reviewer
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
81
app/local-ai-commit-reviewer/pyproject.toml
Normal file
81
app/local-ai-commit-reviewer/pyproject.toml
Normal file
@@ -0,0 +1,81 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "local-ai-commit-reviewer"
|
||||
version = "0.1.0"
|
||||
description = "A CLI tool that reviews Git commits locally using lightweight LLMs"
|
||||
readme = "README.md"
|
||||
license = {text = "MIT"}
|
||||
requires-python = ">=3.10"
|
||||
authors = [
|
||||
{name = "Local AI Commit Reviewer Contributors"}
|
||||
]
|
||||
keywords = ["git", "cli", "llm", "code-review", "ollama"]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
]
|
||||
dependencies = [
|
||||
"click>=8.1.7",
|
||||
"gitpython>=3.1.43",
|
||||
"ollama>=0.3.3",
|
||||
"rich>=13.7.1",
|
||||
"pydantic>=2.6.1",
|
||||
"pyyaml>=6.0.1",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=7.4.0",
|
||||
"pytest-cov>=4.1.0",
|
||||
"pytest-mock>=3.12.0",
|
||||
"black>=23.0.0",
|
||||
"ruff>=0.1.0",
|
||||
"mypy>=1.7.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
aicr = "src.cli:main"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["src*"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py"]
|
||||
python_functions = ["test_*"]
|
||||
addopts = "-v --tb=short"
|
||||
|
||||
[tool.coverage.run]
|
||||
source = ["src"]
|
||||
omit = ["tests/*"]
|
||||
|
||||
[tool.coverage.report]
|
||||
exclude_lines = ["pragma: no cover", "def __repr__", "raise NotImplementedError"]
|
||||
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
target-version = ["py310"]
|
||||
include = "\\.pyi?$"
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 100
|
||||
target-version = "py310"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "W", "F", "I", "UP", "B", "C4", "A", "SIM", "ARG", "PL", "RUF"]
|
||||
ignore = ["E501", "B008", "C901"]
|
||||
|
||||
[tool.mypy]
|
||||
python_version = "3.10"
|
||||
warn_return_any = true
|
||||
warn_unused_configs = true
|
||||
ignore_missing_imports = true
|
||||
337
app/local-ai-commit-reviewer/src/cli/cli.py
Normal file
337
app/local-ai-commit-reviewer/src/cli/cli.py
Normal file
@@ -0,0 +1,337 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Union
|
||||
|
||||
import click
|
||||
from rich import print as rprint
|
||||
|
||||
from ..config import Config, get_config
|
||||
from ..core import ReviewEngine, ReviewResult
|
||||
from ..formatters import get_formatter
|
||||
from ..git import FileChange, GitRepo, get_staged_changes
|
||||
from ..git import install_hook as git_install_hook
|
||||
from ..llm import OllamaProvider
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option("--config", "-c", type=click.Path(exists=True), help="Path to config file")
|
||||
@click.option("--endpoint", help="LLM endpoint URL", default=None)
|
||||
@click.option("--model", "-m", help="Model name to use", default=None)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context, config: str | None, endpoint: str | None, model: str | None):
|
||||
ctx.ensure_object(dict)
|
||||
cfg_path = config or os.environ.get("AICR_CONFIG_PATH")
|
||||
cfg = get_config(cfg_path)
|
||||
|
||||
if endpoint:
|
||||
cfg.llm.endpoint = endpoint
|
||||
if model:
|
||||
cfg.llm.model = model
|
||||
|
||||
ctx.obj["config"] = cfg
|
||||
ctx.obj["repo_path"] = Path.cwd()
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--strictness", "-s", type=click.Choice(["permissive", "balanced", "strict"]), default=None)
|
||||
@click.option("--output", "-o", type=click.Choice(["terminal", "json", "markdown"]), default="terminal")
|
||||
@click.option("--commit", "-C", help="Review a specific commit SHA", default=None)
|
||||
@click.option("--hook", is_flag=True, help="Run in hook mode (exit non-zero on critical)")
|
||||
@click.option("--file", "-f", multiple=True, help="Files to review (default: all staged)")
|
||||
@click.pass_context
|
||||
def review( # noqa: PLR0913
|
||||
ctx: click.Context,
|
||||
strictness: str | None,
|
||||
output: str,
|
||||
commit: str | None,
|
||||
hook: bool,
|
||||
file: tuple
|
||||
):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
if strictness is None:
|
||||
strictness = cfg.review.strictness
|
||||
|
||||
try:
|
||||
engine = ReviewEngine(config=cfg)
|
||||
engine.set_repo(ctx.obj["repo_path"])
|
||||
|
||||
if commit:
|
||||
result = engine.review_commit(commit, strictness=strictness)
|
||||
else:
|
||||
files = _get_files_to_review(ctx.obj["repo_path"], file)
|
||||
|
||||
if not files:
|
||||
rprint("[yellow]No staged changes found. Stage files with 'git add <files>' first.[/yellow]")
|
||||
if hook:
|
||||
sys.exit(0)
|
||||
return
|
||||
|
||||
result = engine.review_staged_changes(files, strictness=strictness)
|
||||
|
||||
formatter = get_formatter(output)
|
||||
output_text = formatter.format(result)
|
||||
rprint(output_text)
|
||||
|
||||
if output == "json":
|
||||
ctx.obj["result_json"] = result.to_json()
|
||||
elif output == "markdown":
|
||||
ctx.obj["result_markdown"] = result.to_markdown()
|
||||
|
||||
_handle_hook_exit(result, hook, cfg)
|
||||
|
||||
except Exception as e:
|
||||
rprint(f"[red]Error during review: {e}[/red]")
|
||||
if hook:
|
||||
sys.exit(1)
|
||||
raise
|
||||
|
||||
|
||||
def _get_files_to_review(repo_path: Path, file: tuple) -> list[FileChange]:
|
||||
if file:
|
||||
changes = []
|
||||
for filename in file:
|
||||
repo = GitRepo(repo_path)
|
||||
diff = repo.get_staged_diff(filename)
|
||||
if diff:
|
||||
changes.append(FileChange(
|
||||
filename=filename,
|
||||
status="M",
|
||||
diff=diff
|
||||
))
|
||||
return changes
|
||||
return get_staged_changes(repo_path)
|
||||
|
||||
|
||||
def _handle_hook_exit(result: ReviewResult, hook: bool, cfg: Config) -> None:
|
||||
if not hook:
|
||||
return
|
||||
if result.has_critical_issues() and cfg.hooks.fail_on_critical:
|
||||
rprint("\n[red]Critical issues found. Commit blocked.[/red]")
|
||||
sys.exit(1)
|
||||
if not result.has_issues():
|
||||
rprint("[green]No issues found. Proceeding with commit.[/green]")
|
||||
sys.exit(0)
|
||||
if not cfg.hooks.fail_on_critical:
|
||||
rprint("\n[yellow]Issues found but not blocking commit (fail_on_critical=false).[/yellow]")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--local", is_flag=True, help="Install hook locally (in current repo)")
|
||||
@click.option("--global", "global_", is_flag=True, help="Install hook globally")
|
||||
@click.option("--force", is_flag=True, help="Overwrite existing hook")
|
||||
@click.pass_context
|
||||
def hook(ctx: click.Context, local: bool, global_: bool, force: bool):
|
||||
ctx.ensure_object(dict)
|
||||
|
||||
if not local and not global_:
|
||||
local = True
|
||||
|
||||
if global_:
|
||||
home = Path.home()
|
||||
git_template = home / ".git-template" / "hooks"
|
||||
if not git_template.exists():
|
||||
rprint("[yellow]Git template directory not found. Creating...[/yellow]")
|
||||
git_template.mkdir(parents=True, exist_ok=True)
|
||||
(git_template / "pre-commit").write_text(_get_hook_script())
|
||||
rprint(f"[green]Global hook template created at {git_template}[/green]")
|
||||
rprint("[yellow]Note: New repos will use this template. Existing repos need local install.[/yellow]")
|
||||
else:
|
||||
rprint("[green]Global hook template already exists.[/green]")
|
||||
else:
|
||||
repo_path = ctx.obj["repo_path"]
|
||||
git_hooks = repo_path / ".git" / "hooks"
|
||||
hook_path = git_hooks / "pre-commit"
|
||||
|
||||
if hook_path.exists() and not force:
|
||||
rprint(f"[yellow]Hook already exists at {hook_path}. Use --force to overwrite.[/yellow]")
|
||||
return
|
||||
|
||||
if git_install_hook(repo_path, "pre-commit", _get_hook_script()):
|
||||
rprint(f"[green]Pre-commit hook installed at {hook_path}[/green]")
|
||||
else:
|
||||
rprint("[red]Failed to install hook.[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def _get_hook_script() -> str:
|
||||
return """#!/bin/bash
|
||||
# Local AI Commit Reviewer - Pre-commit Hook
|
||||
# Automatically reviews staged changes before committing
|
||||
|
||||
set -e
|
||||
|
||||
# Allow bypass with --no-verify
|
||||
if [ "$1" = "--no-verify" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get the directory where this script is located
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
|
||||
# Run the AI commit reviewer
|
||||
cd "$SCRIPT_DIR/../.."
|
||||
python -m aicr review --hook --strictness balanced || exit 1
|
||||
"""
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--set", "set_opt", nargs=2, multiple=True, help="Set config option (key value)")
|
||||
@click.option("--get", help="Get config option value", default=None)
|
||||
@click.option("--list", is_flag=True, help="List all config options")
|
||||
@click.option("--path", is_flag=True, help="Show config file path")
|
||||
@click.pass_context
|
||||
def config(ctx: click.Context, set_opt: tuple, get: str | None, list_: bool, path: bool):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
if path:
|
||||
config_path = os.environ.get("AICR_CONFIG_PATH") or str(Path.cwd() / ".aicr.yaml")
|
||||
rprint(f"Config path: {config_path}")
|
||||
return
|
||||
|
||||
if get:
|
||||
value = _get_nested_attr(cfg, get)
|
||||
if value is not None:
|
||||
rprint(f"{get}: {value}")
|
||||
else:
|
||||
rprint(f"[red]Unknown config option: {get}[/red]")
|
||||
return
|
||||
|
||||
if list_:
|
||||
for section in ["llm", "review", "languages", "hooks", "output", "logging"]:
|
||||
section_obj = getattr(cfg, section, None)
|
||||
if section_obj:
|
||||
rprint(f"[bold]{section.upper()}[/bold]")
|
||||
for key, value in section_obj.model_dump().items():
|
||||
rprint(f" {key}: {value}")
|
||||
return
|
||||
|
||||
if set_opt:
|
||||
for key, value in set_opt:
|
||||
_set_nested_attr(cfg, key, value)
|
||||
rprint("[green]Configuration updated.[/green]")
|
||||
return
|
||||
|
||||
rprint("[bold]Local AI Commit Reviewer Configuration[/bold]")
|
||||
rprint(f"LLM Endpoint: {cfg.llm.endpoint}")
|
||||
rprint(f"Model: {cfg.llm.model}")
|
||||
rprint(f"Strictness: {cfg.review.strictness}")
|
||||
|
||||
|
||||
def _get_nested_attr(obj, attr_path: str):
|
||||
parts = attr_path.split(".")
|
||||
current = obj
|
||||
for part in parts:
|
||||
if hasattr(current, part):
|
||||
current = getattr(current, part)
|
||||
else:
|
||||
return None
|
||||
return current
|
||||
|
||||
|
||||
def _set_nested_attr(obj, attr_path: str, value: Any) -> None:
|
||||
parts = attr_path.split(".")
|
||||
current: Any = obj
|
||||
for part in parts[:-1]:
|
||||
if hasattr(current, part):
|
||||
current = getattr(current, part)
|
||||
|
||||
final_attr = parts[-1]
|
||||
if hasattr(current, final_attr):
|
||||
attr = getattr(type(current), final_attr, None)
|
||||
if attr is not None and hasattr(attr, "annotation"):
|
||||
type_hint = attr.annotation # type: ignore[attr-defined]
|
||||
if getattr(type_hint, "__origin__", None) is Union:
|
||||
type_hint = type_hint.__args__[0]
|
||||
if hasattr(type_hint, "__name__"):
|
||||
if type_hint.__name__ == "int" and isinstance(value, str):
|
||||
value = int(value)
|
||||
elif type_hint.__name__ == "float" and isinstance(value, str):
|
||||
value = float(value)
|
||||
elif type_hint.__name__ == "bool" and isinstance(value, str):
|
||||
value = value.lower() in ("true", "1", "yes")
|
||||
setattr(current, final_attr, value)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.pass_context
|
||||
def models(ctx: click.Context):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
try:
|
||||
provider = OllamaProvider(
|
||||
endpoint=cfg.llm.endpoint,
|
||||
model=cfg.llm.model
|
||||
)
|
||||
|
||||
if not provider.is_available():
|
||||
rprint("[red]Ollama is not available. Make sure it's running.[/red]")
|
||||
rprint("Start Ollama with: ollama serve")
|
||||
sys.exit(1)
|
||||
|
||||
models = provider.list_models()
|
||||
|
||||
if not models:
|
||||
rprint("[yellow]No models found. Pull a model first.[/yellow]")
|
||||
rprint("Example: ollama pull codellama")
|
||||
return
|
||||
|
||||
rprint("[bold]Available Models[/bold]\n")
|
||||
for model in models:
|
||||
rprint(f" {model.name}")
|
||||
rprint(f" Size: {model.size}")
|
||||
rprint(f" Modified: {model.modified}\n")
|
||||
|
||||
except Exception as e:
|
||||
rprint(f"[red]Error listing models: {e}[/red]")
|
||||
raise
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.pass_context
|
||||
def status(ctx: click.Context):
|
||||
cfg: Config = ctx.obj["config"]
|
||||
|
||||
rprint("[bold]Local AI Commit Reviewer Status[/bold]\n")
|
||||
|
||||
rprint("[bold]Configuration:[/bold]")
|
||||
rprint(f" LLM Endpoint: {cfg.llm.endpoint}")
|
||||
rprint(f" Model: {cfg.llm.model}")
|
||||
rprint(f" Strictness: {cfg.review.strictness}\n")
|
||||
|
||||
try:
|
||||
provider = OllamaProvider(
|
||||
endpoint=cfg.llm.endpoint,
|
||||
model=cfg.llm.model
|
||||
)
|
||||
|
||||
if provider.is_available():
|
||||
rprint("[green]✓ Ollama is running[/green]")
|
||||
models = provider.list_models()
|
||||
rprint(f" {len(models)} model(s) available")
|
||||
else:
|
||||
rprint("[red]✗ Ollama is not running[/red]")
|
||||
rprint(" Start with: ollama serve")
|
||||
except Exception as e:
|
||||
rprint(f"[red]✗ Error checking Ollama: {e}[/red]")
|
||||
|
||||
repo = GitRepo(ctx.obj["repo_path"])
|
||||
if repo.is_valid():
|
||||
rprint("\n[green]✓ Valid Git repository[/green]")
|
||||
branch = repo.get_current_branch()
|
||||
rprint(f" Branch: {branch}")
|
||||
|
||||
staged = repo.get_staged_files()
|
||||
rprint(f" Staged files: {len(staged)}")
|
||||
else:
|
||||
rprint("\n[yellow]⚠ Not a Git repository[/yellow]")
|
||||
|
||||
|
||||
def main():
|
||||
cli(obj={})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
164
app/local-ai-commit-reviewer/src/config/config.py
Normal file
164
app/local-ai-commit-reviewer/src/config/config.py
Normal file
@@ -0,0 +1,164 @@
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml # type: ignore[import-untyped]
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class LLMConfig(BaseModel):
|
||||
endpoint: str = "http://localhost:11434"
|
||||
model: str = "codellama"
|
||||
timeout: int = 120
|
||||
max_tokens: int = 2048
|
||||
temperature: float = 0.3
|
||||
|
||||
|
||||
class ReviewSettings(BaseModel):
|
||||
strictness: str = "balanced"
|
||||
max_issues_per_file: int = 20
|
||||
syntax_highlighting: bool = True
|
||||
show_line_numbers: bool = True
|
||||
|
||||
|
||||
class LanguageConfig(BaseModel):
|
||||
enabled: bool = True
|
||||
review_rules: list[str] = Field(default_factory=list)
|
||||
max_line_length: int = 100
|
||||
|
||||
|
||||
class Languages(BaseModel):
|
||||
python: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["pep8", "type-hints", "docstrings"]))
|
||||
javascript: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["airbnb"]))
|
||||
typescript: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["airbnb"]))
|
||||
go: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["golint", "staticcheck"]))
|
||||
rust: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["clippy"]))
|
||||
java: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["google-java"]))
|
||||
c: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["cppcheck"]))
|
||||
cpp: LanguageConfig = Field(default_factory=lambda: LanguageConfig(review_rules=["cppcheck"]))
|
||||
|
||||
def get_language_config(self, language: str) -> LanguageConfig | None:
|
||||
return getattr(self, language.lower(), None)
|
||||
|
||||
|
||||
class StrictnessProfile(BaseModel):
|
||||
description: str = ""
|
||||
check_security: bool = True
|
||||
check_bugs: bool = True
|
||||
check_style: bool = True
|
||||
check_performance: bool = False
|
||||
check_documentation: bool = False
|
||||
min_severity: str = "info"
|
||||
|
||||
|
||||
class StrictnessProfiles(BaseModel):
|
||||
permissive: StrictnessProfile = Field(default_factory=lambda: StrictnessProfile(
|
||||
description="Focus on critical issues only",
|
||||
check_security=True,
|
||||
check_bugs=True,
|
||||
check_style=False,
|
||||
check_performance=False,
|
||||
check_documentation=False,
|
||||
min_severity="warning"
|
||||
))
|
||||
balanced: StrictnessProfile = Field(default_factory=lambda: StrictnessProfile(
|
||||
description="Balanced review of common issues",
|
||||
check_security=True,
|
||||
check_bugs=True,
|
||||
check_style=True,
|
||||
check_performance=False,
|
||||
check_documentation=False,
|
||||
min_severity="info"
|
||||
))
|
||||
strict: StrictnessProfile = Field(default_factory=lambda: StrictnessProfile(
|
||||
description="Comprehensive review of all issues",
|
||||
check_security=True,
|
||||
check_bugs=True,
|
||||
check_style=True,
|
||||
check_performance=True,
|
||||
check_documentation=True,
|
||||
min_severity="info"
|
||||
))
|
||||
|
||||
def get_profile(self, name: str) -> StrictnessProfile:
|
||||
return getattr(self, name.lower(), self.balanced)
|
||||
|
||||
|
||||
class HooksConfig(BaseModel):
|
||||
enabled: bool = True
|
||||
fail_on_critical: bool = True
|
||||
allow_bypass: bool = True
|
||||
|
||||
|
||||
class OutputConfig(BaseModel):
|
||||
format: str = "terminal"
|
||||
theme: str = "auto"
|
||||
show_suggestions: bool = True
|
||||
|
||||
|
||||
class LoggingConfig(BaseModel):
|
||||
level: str = "info"
|
||||
log_file: str = ""
|
||||
structured: bool = False
|
||||
|
||||
|
||||
class Config(BaseModel):
|
||||
llm: LLMConfig = Field(default_factory=LLMConfig)
|
||||
review: ReviewSettings = Field(default_factory=ReviewSettings)
|
||||
languages: Languages = Field(default_factory=Languages)
|
||||
strictness_profiles: StrictnessProfiles = Field(default_factory=StrictnessProfiles)
|
||||
hooks: HooksConfig = Field(default_factory=HooksConfig)
|
||||
output: OutputConfig = Field(default_factory=OutputConfig)
|
||||
logging: LoggingConfig = Field(default_factory=LoggingConfig)
|
||||
|
||||
|
||||
class ConfigLoader:
|
||||
def __init__(self, config_path: str | None = None):
|
||||
self.config_path = config_path
|
||||
self.global_config: Path | None = None
|
||||
self.project_config: Path | None = None
|
||||
|
||||
def find_config_files(self) -> tuple[Path | None, Path | None]:
|
||||
env_config_path = os.environ.get("AICR_CONFIG_PATH")
|
||||
|
||||
if env_config_path:
|
||||
env_path = Path(env_config_path)
|
||||
if env_path.exists():
|
||||
return env_path, None
|
||||
|
||||
self.global_config = Path.home() / ".aicr.yaml"
|
||||
self.project_config = Path.cwd() / ".aicr.yaml"
|
||||
|
||||
if self.project_config.exists():
|
||||
return self.project_config, self.global_config
|
||||
|
||||
if self.global_config.exists():
|
||||
return self.global_config, None
|
||||
|
||||
return None, None
|
||||
|
||||
def load(self) -> Config:
|
||||
config_path, global_path = self.find_config_files()
|
||||
|
||||
config_data: dict[str, Any] = {}
|
||||
|
||||
if global_path and global_path.exists():
|
||||
with open(global_path) as f:
|
||||
global_data = yaml.safe_load(f) or {}
|
||||
config_data.update(global_data)
|
||||
|
||||
if config_path and config_path.exists():
|
||||
with open(config_path) as f:
|
||||
project_data = yaml.safe_load(f) or {}
|
||||
config_data.update(project_data)
|
||||
|
||||
return Config(**config_data)
|
||||
|
||||
def save(self, config: Config, path: Path) -> None:
|
||||
with open(path, "w") as f:
|
||||
yaml.dump(config.model_dump(), f, default_flow_style=False)
|
||||
|
||||
|
||||
def get_config(config_path: str | None = None) -> Config:
|
||||
loader = ConfigLoader(config_path)
|
||||
return loader.load()
|
||||
423
app/local-ai-commit-reviewer/src/core/review_engine.py
Normal file
423
app/local-ai-commit-reviewer/src/core/review_engine.py
Normal file
@@ -0,0 +1,423 @@
|
||||
import json
|
||||
import re
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from ..config import Config, StrictnessProfile
|
||||
from ..git import FileChange, GitRepo
|
||||
from ..llm import LLMProvider, OllamaProvider
|
||||
from ..llm.templates import ReviewPromptTemplates
|
||||
|
||||
|
||||
class IssueSeverity(str, Enum):
|
||||
CRITICAL = "critical"
|
||||
WARNING = "warning"
|
||||
INFO = "info"
|
||||
|
||||
|
||||
class IssueCategory(str, Enum):
|
||||
BUG = "bug"
|
||||
SECURITY = "security"
|
||||
STYLE = "style"
|
||||
PERFORMANCE = "performance"
|
||||
DOCUMENTATION = "documentation"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Issue:
|
||||
file: str
|
||||
line: int
|
||||
severity: IssueSeverity
|
||||
category: IssueCategory
|
||||
message: str
|
||||
suggestion: str | None = None
|
||||
raw_line: str | None = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"file": self.file,
|
||||
"line": self.line,
|
||||
"severity": self.severity.value,
|
||||
"category": self.category.value,
|
||||
"message": self.message,
|
||||
"suggestion": self.suggestion,
|
||||
"raw_line": self.raw_line
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "Issue":
|
||||
return cls(
|
||||
file=data["file"],
|
||||
line=data["line"],
|
||||
severity=IssueSeverity(data["severity"]),
|
||||
category=IssueCategory(data["category"]),
|
||||
message=data["message"],
|
||||
suggestion=data.get("suggestion"),
|
||||
raw_line=data.get("raw_line")
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReviewSummary:
|
||||
critical_count: int = 0
|
||||
warning_count: int = 0
|
||||
info_count: int = 0
|
||||
files_reviewed: int = 0
|
||||
lines_changed: int = 0
|
||||
overall_assessment: str = ""
|
||||
issues_by_category: dict = field(default_factory=dict)
|
||||
issues_by_file: dict = field(default_factory=dict)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"critical_count": self.critical_count,
|
||||
"warning_count": self.warning_count,
|
||||
"info_count": self.info_count,
|
||||
"files_reviewed": self.files_reviewed,
|
||||
"lines_changed": self.lines_changed,
|
||||
"overall_assessment": self.overall_assessment,
|
||||
"issues_by_category": self.issues_by_category,
|
||||
"issues_by_file": self.issues_by_file
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReviewResult:
|
||||
issues: list[Issue] = field(default_factory=list)
|
||||
summary: ReviewSummary = field(default_factory=ReviewSummary)
|
||||
model_used: str = ""
|
||||
tokens_used: int = 0
|
||||
review_mode: str = ""
|
||||
error: str | None = None
|
||||
|
||||
def has_critical_issues(self) -> bool:
|
||||
return any(issue.severity == IssueSeverity.CRITICAL for issue in self.issues)
|
||||
|
||||
def has_issues(self) -> bool:
|
||||
return len(self.issues) > 0
|
||||
|
||||
def get_issues_by_severity(self, severity: IssueSeverity) -> list[Issue]:
|
||||
return [issue for issue in self.issues if issue.severity == severity]
|
||||
|
||||
def get_issues_by_file(self, filename: str) -> list[Issue]:
|
||||
return [issue for issue in self.issues if issue.file == filename]
|
||||
|
||||
def get_issues_by_category(self, category: IssueCategory) -> list[Issue]:
|
||||
return [issue for issue in self.issues if issue.category == category]
|
||||
|
||||
def to_json(self) -> str:
|
||||
return json.dumps({
|
||||
"issues": [issue.to_dict() for issue in self.issues],
|
||||
"summary": self.summary.to_dict(),
|
||||
"model_used": self.model_used,
|
||||
"tokens_used": self.tokens_used,
|
||||
"review_mode": self.review_mode
|
||||
}, indent=2)
|
||||
|
||||
def to_markdown(self) -> str:
|
||||
lines = ["# AI Commit Review Results\n"]
|
||||
|
||||
lines.append("## Summary\n")
|
||||
lines.append(f"- **Files Reviewed**: {self.summary.files_reviewed}")
|
||||
lines.append(f"- **Lines Changed**: {self.summary.lines_changed}")
|
||||
lines.append(f"- **Critical Issues**: {self.summary.critical_count}")
|
||||
lines.append(f"- **Warnings**: {self.summary.warning_count}")
|
||||
lines.append(f"- **Info**: {self.summary.info_count}")
|
||||
lines.append(f"- **Assessment**: {self.summary.overall_assessment}\n")
|
||||
|
||||
if self.issues:
|
||||
lines.append("## Issues Found\n")
|
||||
|
||||
for severity in [IssueSeverity.CRITICAL, IssueSeverity.WARNING, IssueSeverity.INFO]:
|
||||
severity_issues = self.get_issues_by_severity(severity)
|
||||
if severity_issues:
|
||||
lines.append(f"### {severity.value.upper()} ({len(severity_issues)})\n")
|
||||
for issue in severity_issues:
|
||||
lines.append(f"#### {issue.file}:{issue.line}")
|
||||
lines.append(f"- **Category**: {issue.category.value}")
|
||||
lines.append(f"- **Message**: {issue.message}")
|
||||
if issue.suggestion:
|
||||
lines.append(f"- **Suggestion**: {issue.suggestion}")
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
class ReviewEngine:
|
||||
def __init__(
|
||||
self,
|
||||
config: Config | None = None,
|
||||
llm_provider: LLMProvider | None = None
|
||||
):
|
||||
self.config = config or Config()
|
||||
self.llm_provider = llm_provider or OllamaProvider(
|
||||
endpoint=self.config.llm.endpoint,
|
||||
model=self.config.llm.model,
|
||||
timeout=self.config.llm.timeout
|
||||
)
|
||||
self.repo: GitRepo | None = None
|
||||
|
||||
def set_repo(self, path: Path) -> None:
|
||||
self.repo = GitRepo(path)
|
||||
|
||||
def _parse_llm_response(self, response_text: str, files: list[FileChange]) -> ReviewResult:
|
||||
result = ReviewResult()
|
||||
|
||||
try:
|
||||
json_match = re.search(r'\{[\s\S]*\}', response_text)
|
||||
if json_match:
|
||||
json_str = json_match.group()
|
||||
data = json.loads(json_str)
|
||||
|
||||
issues_data = data.get("issues", [])
|
||||
for issue_data in issues_data:
|
||||
try:
|
||||
issue = Issue.from_dict(issue_data)
|
||||
result.issues.append(issue)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
summary_data = data.get("summary", {})
|
||||
result.summary.critical_count = summary_data.get("critical_count", 0)
|
||||
result.summary.warning_count = summary_data.get("warning_count", 0)
|
||||
result.summary.info_count = summary_data.get("info_count", 0)
|
||||
result.summary.overall_assessment = summary_data.get("overall_assessment", "")
|
||||
else:
|
||||
text_issues = self._parse_text_response(response_text, files)
|
||||
result.issues = text_issues
|
||||
|
||||
except json.JSONDecodeError:
|
||||
result.issues = self._parse_text_response(response_text, files)
|
||||
|
||||
return result
|
||||
|
||||
def _parse_text_response(self, response_text: str, files: list[FileChange]) -> list[Issue]: # noqa: ARG002
|
||||
issues = []
|
||||
lines = response_text.split("\n")
|
||||
|
||||
current_file = ""
|
||||
for line in lines:
|
||||
file_match = re.match(r'^\*\*(.+?)\*\*:\s*(\d+)', line)
|
||||
if file_match:
|
||||
current_file = file_match.group(1)
|
||||
line_num = int(file_match.group(2))
|
||||
|
||||
severity = IssueSeverity.WARNING
|
||||
if "critical" in line.lower():
|
||||
severity = IssueSeverity.CRITICAL
|
||||
elif "security" in line.lower():
|
||||
severity = IssueSeverity.CRITICAL
|
||||
category = IssueCategory.SECURITY
|
||||
else:
|
||||
category = IssueCategory.BUG
|
||||
|
||||
message = line
|
||||
suggestion = None
|
||||
if "->" in line:
|
||||
parts = line.split("->")
|
||||
message = parts[0].strip()
|
||||
suggestion = "->".join(parts[1:]).strip()
|
||||
|
||||
issues.append(Issue(
|
||||
file=current_file,
|
||||
line=line_num,
|
||||
severity=severity,
|
||||
category=category,
|
||||
message=message,
|
||||
suggestion=suggestion
|
||||
))
|
||||
|
||||
return issues
|
||||
|
||||
def _get_strictness_profile(self) -> StrictnessProfile:
|
||||
return self.config.strictness_profiles.get_profile(
|
||||
self.config.review.strictness
|
||||
)
|
||||
|
||||
def _filter_issues_by_strictness(self, issues: list[Issue]) -> list[Issue]:
|
||||
profile = self._get_strictness_profile()
|
||||
|
||||
severity_order = {
|
||||
IssueSeverity.CRITICAL: 0,
|
||||
IssueSeverity.WARNING: 1,
|
||||
IssueSeverity.INFO: 2
|
||||
}
|
||||
|
||||
min_severity = profile.min_severity.lower()
|
||||
min_level = 2
|
||||
if min_severity == "critical":
|
||||
min_level = 0
|
||||
elif min_severity == "warning":
|
||||
min_level = 1
|
||||
|
||||
filtered = []
|
||||
for issue in issues:
|
||||
level = severity_order.get(issue.severity, 2)
|
||||
if level <= min_level:
|
||||
if issue.category == IssueCategory.SECURITY and not profile.check_security:
|
||||
continue
|
||||
if issue.category == IssueCategory.BUG and not profile.check_bugs:
|
||||
continue
|
||||
if issue.category == IssueCategory.STYLE and not profile.check_style:
|
||||
continue
|
||||
if issue.category == IssueCategory.PERFORMANCE and not profile.check_performance:
|
||||
continue
|
||||
if issue.category == IssueCategory.DOCUMENTATION and not profile.check_documentation:
|
||||
continue
|
||||
filtered.append(issue)
|
||||
|
||||
return filtered
|
||||
|
||||
def _aggregate_summary(self, issues: list[Issue], files: list[FileChange]) -> ReviewSummary:
|
||||
summary = ReviewSummary()
|
||||
summary.files_reviewed = len(files)
|
||||
summary.lines_changed = sum(
|
||||
sum(1 for line in f.diff.split("\n") if line.startswith("+") and not line.startswith("+++"))
|
||||
for f in files
|
||||
)
|
||||
|
||||
for issue in issues:
|
||||
if issue.severity == IssueSeverity.CRITICAL:
|
||||
summary.critical_count += 1
|
||||
elif issue.severity == IssueSeverity.WARNING:
|
||||
summary.warning_count += 1
|
||||
else:
|
||||
summary.info_count += 1
|
||||
|
||||
if issue.category.value not in summary.issues_by_category:
|
||||
summary.issues_by_category[issue.category.value] = []
|
||||
summary.issues_by_category[issue.category.value].append(issue.file)
|
||||
|
||||
if issue.file not in summary.issues_by_file:
|
||||
summary.issues_by_file[issue.file] = []
|
||||
summary.issues_by_file[issue.file].append(issue.line)
|
||||
|
||||
if summary.critical_count > 0:
|
||||
summary.overall_assessment = "Critical issues found. Review recommended before committing."
|
||||
elif summary.warning_count > 0:
|
||||
summary.overall_assessment = "Warnings found. Consider addressing before committing."
|
||||
elif summary.info_count > 0:
|
||||
summary.overall_assessment = "Minor issues found. Ready for commit with optional fixes."
|
||||
else:
|
||||
summary.overall_assessment = "No issues found. Code is ready for commit."
|
||||
|
||||
return summary
|
||||
|
||||
def review_staged_changes(
|
||||
self,
|
||||
files: list[FileChange] | None = None,
|
||||
strictness: str | None = None,
|
||||
language: str | None = None
|
||||
) -> ReviewResult:
|
||||
if files is None:
|
||||
if self.repo is None:
|
||||
self.repo = GitRepo(Path.cwd())
|
||||
files = self.repo.get_all_staged_changes()
|
||||
|
||||
if not files:
|
||||
return ReviewResult(error="No staged changes found")
|
||||
|
||||
result = ReviewResult()
|
||||
result.review_mode = strictness or self.config.review.strictness
|
||||
|
||||
if strictness is None:
|
||||
strictness = self.config.review.strictness
|
||||
|
||||
all_issues = []
|
||||
|
||||
for file_change in files:
|
||||
if not file_change.diff.strip():
|
||||
continue
|
||||
|
||||
file_language = language
|
||||
if not file_language and self.repo is not None:
|
||||
file_language = self.repo.get_file_language(file_change.filename)
|
||||
|
||||
prompt = ReviewPromptTemplates.get_prompt(
|
||||
diff=file_change.diff,
|
||||
strictness=strictness,
|
||||
language=file_language or ""
|
||||
)
|
||||
|
||||
try:
|
||||
if self.llm_provider.is_available():
|
||||
response = self.llm_provider.generate(
|
||||
prompt,
|
||||
max_tokens=self.config.llm.max_tokens,
|
||||
temperature=self.config.llm.temperature
|
||||
)
|
||||
result.model_used = response.model
|
||||
result.tokens_used += response.tokens_used
|
||||
|
||||
file_result = self._parse_llm_response(response.text, [file_change])
|
||||
all_issues.extend(file_result.issues)
|
||||
else:
|
||||
result.error = "LLM provider is not available"
|
||||
return result
|
||||
except Exception as e:
|
||||
result.error = f"Review failed: {e!s}"
|
||||
return result
|
||||
|
||||
filtered_issues = self._filter_issues_by_strictness(all_issues)
|
||||
max_issues = self.config.review.max_issues_per_file
|
||||
limited_issues = filtered_issues[:max_issues * len(files)]
|
||||
result.issues = limited_issues
|
||||
result.summary = self._aggregate_summary(limited_issues, files)
|
||||
|
||||
return result
|
||||
|
||||
def review_commit(
|
||||
self,
|
||||
sha: str,
|
||||
strictness: str | None = None
|
||||
) -> ReviewResult:
|
||||
if self.repo is None:
|
||||
self.repo = GitRepo(Path.cwd())
|
||||
|
||||
commit_info = self.repo.get_commit_info(sha)
|
||||
if commit_info is None:
|
||||
return ReviewResult(error=f"Commit {sha} not found")
|
||||
|
||||
result = ReviewResult()
|
||||
result.review_mode = strictness or self.config.review.strictness
|
||||
|
||||
if strictness is None:
|
||||
strictness = self.config.review.strictness
|
||||
|
||||
all_issues = []
|
||||
|
||||
for file_change in commit_info.changes:
|
||||
if not file_change.diff.strip():
|
||||
continue
|
||||
|
||||
prompt = ReviewPromptTemplates.get_commit_review_prompt(
|
||||
diff=file_change.diff,
|
||||
commit_message=commit_info.message,
|
||||
strictness=strictness
|
||||
)
|
||||
|
||||
try:
|
||||
if self.llm_provider.is_available():
|
||||
response = self.llm_provider.generate(
|
||||
prompt,
|
||||
max_tokens=self.config.llm.max_tokens,
|
||||
temperature=self.config.llm.temperature
|
||||
)
|
||||
result.model_used = response.model
|
||||
result.tokens_used += response.tokens_used
|
||||
|
||||
file_result = self._parse_llm_response(response.text, [file_change])
|
||||
all_issues.extend(file_result.issues)
|
||||
else:
|
||||
result.error = "LLM provider is not available"
|
||||
return result
|
||||
except Exception as e:
|
||||
result.error = f"Review failed: {e!s}"
|
||||
return result
|
||||
|
||||
filtered_issues = self._filter_issues_by_strictness(all_issues)
|
||||
result.issues = filtered_issues
|
||||
result.summary = self._aggregate_summary(filtered_issues, commit_info.changes)
|
||||
|
||||
return result
|
||||
141
app/local-ai-commit-reviewer/src/formatters/formatters.py
Normal file
141
app/local-ai-commit-reviewer/src/formatters/formatters.py
Normal file
@@ -0,0 +1,141 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.style import Style
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
|
||||
from ..core import Issue, IssueCategory, IssueSeverity, ReviewResult
|
||||
|
||||
|
||||
class BaseFormatter(ABC):
|
||||
@abstractmethod
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
pass
|
||||
|
||||
|
||||
class TerminalFormatter(BaseFormatter):
|
||||
def __init__(self, theme: str = "auto", show_line_numbers: bool = True):
|
||||
self.console = Console()
|
||||
self.show_line_numbers = show_line_numbers
|
||||
self.use_colors = theme != "dark" if theme == "auto" else theme == "dark"
|
||||
|
||||
def _get_severity_style(self, severity: IssueSeverity) -> Style:
|
||||
styles = {
|
||||
IssueSeverity.CRITICAL: Style(color="red", bold=True),
|
||||
IssueSeverity.WARNING: Style(color="yellow"),
|
||||
IssueSeverity.INFO: Style(color="blue"),
|
||||
}
|
||||
return styles.get(severity, Style())
|
||||
|
||||
def _get_category_icon(self, category: IssueCategory) -> str:
|
||||
icons = {
|
||||
IssueCategory.BUG: "[BUG]",
|
||||
IssueCategory.SECURITY: "[SECURITY]",
|
||||
IssueCategory.STYLE: "[STYLE]",
|
||||
IssueCategory.PERFORMANCE: "[PERF]",
|
||||
IssueCategory.DOCUMENTATION: "[DOC]",
|
||||
}
|
||||
return icons.get(category, "")
|
||||
|
||||
def _format_issue(self, issue: Issue) -> Text:
|
||||
text = Text()
|
||||
text.append(f"{issue.file}:{issue.line} ", style="dim")
|
||||
text.append(f"[{issue.severity.value.upper()}] ", self._get_severity_style(issue.severity))
|
||||
text.append(f"{self._get_category_icon(issue.category)} ")
|
||||
text.append(issue.message)
|
||||
|
||||
if issue.suggestion:
|
||||
text.append("\n Suggestion: ", style="dim")
|
||||
text.append(issue.suggestion)
|
||||
|
||||
return text
|
||||
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
output: list[Panel | Table | str] = []
|
||||
|
||||
if result.error:
|
||||
output.append(Panel(
|
||||
f"[red]Error: {result.error}[/red]",
|
||||
title="Review Failed",
|
||||
expand=False
|
||||
))
|
||||
return "\n".join(str(p) for p in output)
|
||||
|
||||
summary = result.summary
|
||||
|
||||
summary_panel = Panel(
|
||||
f"[bold]Files Reviewed:[/bold] {summary.files_reviewed}\n"
|
||||
f"[bold]Lines Changed:[/bold] {summary.lines_changed}\n\n"
|
||||
f"[red]Critical:[/red] {summary.critical_count} "
|
||||
f"[yellow]Warnings:[/yellow] {summary.warning_count} "
|
||||
f"[blue]Info:[/blue] {summary.info_count}\n\n"
|
||||
f"[bold]Assessment:[/bold] {summary.overall_assessment}",
|
||||
title="Review Summary",
|
||||
expand=False
|
||||
)
|
||||
output.append(summary_panel)
|
||||
|
||||
if result.issues:
|
||||
issues_table = Table(title="Issues Found", show_header=True)
|
||||
issues_table.add_column("File", style="dim")
|
||||
issues_table.add_column("Line", justify="right", style="dim")
|
||||
issues_table.add_column("Severity", width=10)
|
||||
issues_table.add_column("Category", width=12)
|
||||
issues_table.add_column("Message")
|
||||
|
||||
for issue in result.issues:
|
||||
issues_table.add_row(
|
||||
issue.file,
|
||||
str(issue.line),
|
||||
f"[{issue.severity.value.upper()}]",
|
||||
f"[{issue.category.value.upper()}]",
|
||||
issue.message,
|
||||
style=self._get_severity_style(issue.severity)
|
||||
)
|
||||
|
||||
output.append(issues_table)
|
||||
|
||||
suggestions_panel = Panel(
|
||||
"\n".join(
|
||||
f"[bold]{issue.file}:{issue.line}[/bold]\n"
|
||||
f" {issue.message}\n"
|
||||
+ (f" [green]→ {issue.suggestion}[/green]\n" if issue.suggestion else "")
|
||||
for issue in result.issues if issue.suggestion
|
||||
),
|
||||
title="Suggestions",
|
||||
expand=False
|
||||
)
|
||||
output.append(suggestions_panel)
|
||||
|
||||
model_info = Panel(
|
||||
f"[bold]Model:[/bold] {result.model_used}\n"
|
||||
f"[bold]Tokens Used:[/bold] {result.tokens_used}\n"
|
||||
f"[bold]Mode:[/bold] {result.review_mode}",
|
||||
title="Review Info",
|
||||
expand=False
|
||||
)
|
||||
output.append(model_info)
|
||||
|
||||
return "\n".join(str(o) for o in output)
|
||||
|
||||
|
||||
class JSONFormatter(BaseFormatter):
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
return result.to_json()
|
||||
|
||||
|
||||
class MarkdownFormatter(BaseFormatter):
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
return result.to_markdown()
|
||||
|
||||
|
||||
def get_formatter(format_type: str = "terminal", **kwargs) -> BaseFormatter:
|
||||
formatters: dict[str, type[BaseFormatter]] = {
|
||||
"terminal": TerminalFormatter,
|
||||
"json": JSONFormatter,
|
||||
"markdown": MarkdownFormatter,
|
||||
}
|
||||
formatter_class = formatters.get(format_type, TerminalFormatter)
|
||||
return formatter_class(**kwargs) # type: ignore[arg-type]
|
||||
143
app/local-ai-commit-reviewer/src/llm/ollama.py
Normal file
143
app/local-ai-commit-reviewer/src/llm/ollama.py
Normal file
@@ -0,0 +1,143 @@
|
||||
import asyncio
|
||||
from collections.abc import AsyncIterator
|
||||
from datetime import datetime
|
||||
|
||||
import ollama
|
||||
|
||||
from .provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
class OllamaProvider(LLMProvider):
|
||||
def __init__(
|
||||
self,
|
||||
endpoint: str = "http://localhost:11434",
|
||||
model: str = "codellama",
|
||||
timeout: int = 120
|
||||
):
|
||||
self.endpoint = endpoint
|
||||
self.model = model
|
||||
self.timeout = timeout
|
||||
self._client: ollama.Client | None = None
|
||||
|
||||
@property
|
||||
def client(self) -> ollama.Client:
|
||||
if self._client is None:
|
||||
self._client = ollama.Client(host=self.endpoint)
|
||||
return self._client
|
||||
|
||||
def is_available(self) -> bool:
|
||||
try:
|
||||
self.health_check()
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
def health_check(self) -> bool:
|
||||
try:
|
||||
response = self.client.ps()
|
||||
return response is not None
|
||||
except Exception as e:
|
||||
raise ConnectionError(f"Ollama health check failed: {e}") from None
|
||||
|
||||
def generate(self, prompt: str, **kwargs) -> LLMResponse:
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful code review assistant. Provide concise, constructive feedback on code changes."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": temperature,
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
text=response["message"]["content"],
|
||||
model=self.model,
|
||||
tokens_used=response.get("eval_count", 0),
|
||||
finish_reason=response.get("done_reason", "stop")
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama generation failed: {e}") from None
|
||||
|
||||
async def agenerate(self, prompt: str, **kwargs) -> LLMResponse:
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
response = await asyncio.to_thread(
|
||||
self.client.chat,
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful code review assistant. Provide concise, constructive feedback on code changes."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": temperature,
|
||||
},
|
||||
stream=False
|
||||
)
|
||||
|
||||
return LLMResponse(
|
||||
text=response["message"]["content"],
|
||||
model=self.model,
|
||||
tokens_used=response.get("eval_count", 0),
|
||||
finish_reason=response.get("done_reason", "stop")
|
||||
)
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama async generation failed: {e}") from None
|
||||
|
||||
async def stream_generate(self, prompt: str, **kwargs) -> AsyncIterator[str]: # type: ignore[misc]
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
response = self.client.chat(
|
||||
model=self.model,
|
||||
messages=[
|
||||
{"role": "system", "content": "You are a helpful code review assistant. Provide concise, constructive feedback on code changes."},
|
||||
{"role": "user", "content": prompt}
|
||||
],
|
||||
options={
|
||||
"num_predict": max_tokens,
|
||||
"temperature": temperature,
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for chunk in response:
|
||||
if "message" in chunk and "content" in chunk["message"]:
|
||||
yield chunk["message"]["content"]
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama streaming failed: {e}") from None
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
try:
|
||||
response = self.client.ps()
|
||||
models = []
|
||||
if response and "models" in response:
|
||||
for model in response["models"]:
|
||||
models.append(ModelInfo(
|
||||
name=model.get("name", "unknown"),
|
||||
size=model.get("size", "unknown"),
|
||||
modified=model.get("modified", datetime.now().isoformat()),
|
||||
digest=model.get("digest", "")
|
||||
))
|
||||
return models
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def pull_model(self, model_name: str) -> bool:
|
||||
try:
|
||||
for _ in self.client.pull(model_name, stream=True):
|
||||
pass
|
||||
return True
|
||||
except Exception:
|
||||
return False
|
||||
126
app/local-ai-commit-reviewer/tests/conftest.py
Normal file
126
app/local-ai-commit-reviewer/tests/conftest.py
Normal file
@@ -0,0 +1,126 @@
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from src.config import Config
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_git_repo():
|
||||
"""Create a temporary Git repository for testing."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
repo_path = Path(tmpdir)
|
||||
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True, check=False)
|
||||
yield repo_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_python_file(temp_git_repo):
|
||||
"""Create a sample Python file in the temp repo."""
|
||||
test_file = temp_git_repo / "test.py"
|
||||
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
|
||||
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_js_file(temp_git_repo):
|
||||
"""Create a sample JavaScript file."""
|
||||
test_file = temp_git_repo / "test.js"
|
||||
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
|
||||
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_diff():
|
||||
"""Return a sample diff for testing."""
|
||||
return """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
def hello():
|
||||
+ print("hello")
|
||||
return True
|
||||
- return False
|
||||
"""
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config():
|
||||
"""Return a default Config instance."""
|
||||
return Config()
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
"""Mock LLM provider for testing."""
|
||||
|
||||
def __init__(self, available: bool = True, response_text: str | None = None):
|
||||
self._available = available
|
||||
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text=self._response_text,
|
||||
model="mock-model",
|
||||
tokens_used=50,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return [
|
||||
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
|
||||
]
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_provider():
|
||||
"""Return a mock LLM provider."""
|
||||
return MockLLMProvider(available=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_unavailable():
|
||||
"""Return a mock LLM provider that's not available."""
|
||||
return MockLLMProvider(available=False)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_with_issues():
|
||||
"""Return a mock LLM provider that returns issues."""
|
||||
response = '''{
|
||||
"issues": [
|
||||
{
|
||||
"file": "test.py",
|
||||
"line": 2,
|
||||
"severity": "warning",
|
||||
"category": "style",
|
||||
"message": "Missing docstring for function",
|
||||
"suggestion": "Add a docstring above the function definition"
|
||||
}
|
||||
],
|
||||
"summary": {
|
||||
"critical_count": 0,
|
||||
"warning_count": 1,
|
||||
"info_count": 0,
|
||||
"overall_assessment": "Minor style issues found"
|
||||
}
|
||||
}'''
|
||||
return MockLLMProvider(available=True, response_text=response)
|
||||
126
app/local-ai-commit-reviewer/tests/fixtures/sample_repo.py
vendored
Normal file
126
app/local-ai-commit-reviewer/tests/fixtures/sample_repo.py
vendored
Normal file
@@ -0,0 +1,126 @@
|
||||
import subprocess
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from src.config import Config
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_git_repo():
|
||||
"""Create a temporary Git repository for testing."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
repo_path = Path(tmpdir)
|
||||
subprocess.run(["git", "init"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.email", "test@test.com"], cwd=repo_path, capture_output=True, check=False)
|
||||
subprocess.run(["git", "config", "user.name", "Test"], cwd=repo_path, capture_output=True, check=False)
|
||||
yield repo_path
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_python_file(temp_git_repo):
|
||||
"""Create a sample Python file in the temp repo."""
|
||||
test_file = temp_git_repo / "test.py"
|
||||
test_file.write_text('def hello():\n print("Hello, World!")\n return True\n')
|
||||
subprocess.run(["git", "add", "test.py"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_js_file(temp_git_repo):
|
||||
"""Create a sample JavaScript file."""
|
||||
test_file = temp_git_repo / "test.js"
|
||||
test_file.write_text('function hello() {\n console.log("Hello, World!");\n}\n')
|
||||
subprocess.run(["git", "add", "test.js"], cwd=temp_git_repo, capture_output=True, check=False)
|
||||
return test_file
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_diff():
|
||||
"""Return a sample diff for testing."""
|
||||
return """diff --git a/test.py b/test.py
|
||||
--- a/test.py
|
||||
+++ b/test.py
|
||||
@@ -1,3 +1,4 @@
|
||||
def hello():
|
||||
+ print("hello")
|
||||
return True
|
||||
- return False
|
||||
"""
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_config():
|
||||
"""Return a default Config instance."""
|
||||
return Config()
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
"""Mock LLM provider for testing."""
|
||||
|
||||
def __init__(self, available: bool = True, response_text: str | None = None):
|
||||
self._available = available
|
||||
self._response_text = response_text or '{"issues": [], "summary": {"critical_count": 0, "warning_count": 0, "info_count": 0, "overall_assessment": "No issues"}}'
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text=self._response_text,
|
||||
model="mock-model",
|
||||
tokens_used=50,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return [
|
||||
ModelInfo(name="mock-model", size="4GB", modified="2024-01-01", digest="abc123")
|
||||
]
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_provider():
|
||||
"""Return a mock LLM provider."""
|
||||
return MockLLMProvider(available=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_unavailable():
|
||||
"""Return a mock LLM provider that's not available."""
|
||||
return MockLLMProvider(available=False)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_llm_with_issues():
|
||||
"""Return a mock LLM provider that returns issues."""
|
||||
response = '''{
|
||||
"issues": [
|
||||
{
|
||||
"file": "test.py",
|
||||
"line": 2,
|
||||
"severity": "warning",
|
||||
"category": "style",
|
||||
"message": "Missing docstring for function",
|
||||
"suggestion": "Add a docstring above the function definition"
|
||||
}
|
||||
],
|
||||
"summary": {
|
||||
"critical_count": 0,
|
||||
"warning_count": 1,
|
||||
"info_count": 0,
|
||||
"overall_assessment": "Minor style issues found"
|
||||
}
|
||||
}'''
|
||||
return MockLLMProvider(available=True, response_text=response)
|
||||
@@ -0,0 +1,46 @@
|
||||
from fixtures.sample_repo import MockLLMProvider
|
||||
|
||||
|
||||
class TestReviewWorkflow:
|
||||
def test_review_with_no_staged_changes(self, temp_git_repo, mock_config):
|
||||
from src.core.review_engine import ReviewEngine # noqa: PLC0415
|
||||
|
||||
engine = ReviewEngine(config=mock_config, llm_provider=MockLLMProvider())
|
||||
engine.set_repo(temp_git_repo)
|
||||
result = engine.review_staged_changes([])
|
||||
assert result.error == "No staged changes found"
|
||||
|
||||
def test_review_with_staged_file(self, temp_git_repo, mock_config, request):
|
||||
from src.core.review_engine import ReviewEngine # noqa: PLC0415
|
||||
from src.git import get_staged_changes # noqa: PLC0415
|
||||
|
||||
request.getfixturevalue("sample_python_file")
|
||||
changes = get_staged_changes(temp_git_repo)
|
||||
|
||||
engine = ReviewEngine(config=mock_config, llm_provider=MockLLMProvider())
|
||||
engine.set_repo(temp_git_repo)
|
||||
result = engine.review_staged_changes(changes)
|
||||
|
||||
assert result.review_mode == "balanced"
|
||||
assert result.error is None or len(result.issues) >= 0
|
||||
|
||||
|
||||
class TestHookInstallation:
|
||||
def test_install_hook(self, temp_git_repo):
|
||||
from src.hooks import install_pre_commit_hook # noqa: PLC0415
|
||||
|
||||
result = install_pre_commit_hook(temp_git_repo)
|
||||
assert result is True
|
||||
|
||||
hook_path = temp_git_repo / ".git" / "hooks" / "pre-commit"
|
||||
assert hook_path.exists()
|
||||
|
||||
content = hook_path.read_text()
|
||||
assert "aicr" in content or "review" in content
|
||||
|
||||
def test_check_hook_installed(self, temp_git_repo):
|
||||
from src.hooks import check_hook_installed, install_pre_commit_hook # noqa: PLC0415
|
||||
|
||||
assert check_hook_installed(temp_git_repo) is False
|
||||
install_pre_commit_hook(temp_git_repo)
|
||||
assert check_hook_installed(temp_git_repo) is True
|
||||
50
app/local-ai-commit-reviewer/tests/unit/test_config.py
Normal file
50
app/local-ai-commit-reviewer/tests/unit/test_config.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from src.config import Config, ConfigLoader
|
||||
|
||||
|
||||
class TestConfig:
|
||||
def test_default_config(self):
|
||||
config = Config()
|
||||
assert config.llm.endpoint == "http://localhost:11434"
|
||||
assert config.llm.model == "codellama"
|
||||
assert config.review.strictness == "balanced"
|
||||
assert config.hooks.enabled is True
|
||||
|
||||
def test_config_from_dict(self):
|
||||
data = {
|
||||
"llm": {
|
||||
"endpoint": "http://custom:9000",
|
||||
"model": "custom-model"
|
||||
},
|
||||
"review": {
|
||||
"strictness": "strict"
|
||||
}
|
||||
}
|
||||
config = Config(**data)
|
||||
assert config.llm.endpoint == "http://custom:9000"
|
||||
assert config.llm.model == "custom-model"
|
||||
assert config.review.strictness == "strict"
|
||||
|
||||
def test_language_config(self):
|
||||
config = Config()
|
||||
py_config = config.languages.get_language_config("python")
|
||||
assert py_config is not None
|
||||
assert py_config.enabled is True
|
||||
|
||||
def test_strictness_profiles(self):
|
||||
config = Config()
|
||||
permissive = config.strictness_profiles.get_profile("permissive")
|
||||
assert permissive.check_style is False
|
||||
strict = config.strictness_profiles.get_profile("strict")
|
||||
assert strict.check_performance is True
|
||||
|
||||
|
||||
class TestConfigLoader:
|
||||
def test_load_default_config(self):
|
||||
loader = ConfigLoader()
|
||||
config = loader.load()
|
||||
assert isinstance(config, Config)
|
||||
|
||||
def test_find_config_files_nonexistent(self):
|
||||
loader = ConfigLoader("/nonexistent/path.yaml")
|
||||
path, _global_path = loader.find_config_files()
|
||||
assert path is None
|
||||
52
app/local-ai-commit-reviewer/tests/unit/test_llm.py
Normal file
52
app/local-ai-commit-reviewer/tests/unit/test_llm.py
Normal file
@@ -0,0 +1,52 @@
|
||||
from src.llm.provider import LLMProvider, LLMResponse, ModelInfo
|
||||
|
||||
|
||||
class MockLLMProvider(LLMProvider):
|
||||
def __init__(self, available: bool = True):
|
||||
self._available = available
|
||||
self._models = []
|
||||
|
||||
def is_available(self) -> bool:
|
||||
return self._available
|
||||
|
||||
def generate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return LLMResponse(
|
||||
text="Mock review response",
|
||||
model="mock-model",
|
||||
tokens_used=100,
|
||||
finish_reason="stop"
|
||||
)
|
||||
|
||||
async def agenerate(self, _prompt: str, **_kwargs) -> LLMResponse:
|
||||
return self.generate(_prompt, **_kwargs)
|
||||
|
||||
def stream_generate(self, _prompt: str, **_kwargs):
|
||||
yield "Mock"
|
||||
|
||||
def list_models(self) -> list[ModelInfo]:
|
||||
return self._models
|
||||
|
||||
def health_check(self) -> bool:
|
||||
return self._available
|
||||
|
||||
|
||||
class TestLLMProvider:
|
||||
def test_mock_provider_is_available(self):
|
||||
provider = MockLLMProvider(available=True)
|
||||
assert provider.is_available() is True
|
||||
|
||||
def test_mock_provider_not_available(self):
|
||||
provider = MockLLMProvider(available=False)
|
||||
assert provider.is_available() is False
|
||||
|
||||
def test_mock_generate(self):
|
||||
provider = MockLLMProvider()
|
||||
response = provider.generate("test prompt")
|
||||
assert isinstance(response, LLMResponse)
|
||||
assert response.text == "Mock review response"
|
||||
assert response.model == "mock-model"
|
||||
|
||||
def test_mock_list_models(self):
|
||||
provider = MockLLMProvider()
|
||||
models = provider.list_models()
|
||||
assert isinstance(models, list)
|
||||
76
app/local-ai-commit-reviewer/tests/unit/test_review.py
Normal file
76
app/local-ai-commit-reviewer/tests/unit/test_review.py
Normal file
@@ -0,0 +1,76 @@
|
||||
from src.core.review_engine import Issue, IssueCategory, IssueSeverity, ReviewResult, ReviewSummary
|
||||
|
||||
|
||||
class TestIssue:
|
||||
def test_issue_creation(self):
|
||||
issue = Issue(
|
||||
file="test.py",
|
||||
line=10,
|
||||
severity=IssueSeverity.WARNING,
|
||||
category=IssueCategory.STYLE,
|
||||
message="Missing docstring",
|
||||
suggestion="Add a docstring"
|
||||
)
|
||||
assert issue.file == "test.py"
|
||||
assert issue.line == 10 # noqa: PLR2004
|
||||
assert issue.severity == IssueSeverity.WARNING
|
||||
|
||||
def test_issue_to_dict(self):
|
||||
issue = Issue(
|
||||
file="test.py",
|
||||
line=10,
|
||||
severity=IssueSeverity.CRITICAL,
|
||||
category=IssueCategory.BUG,
|
||||
message="Potential bug"
|
||||
)
|
||||
data = issue.to_dict()
|
||||
assert data["file"] == "test.py"
|
||||
assert data["severity"] == "critical"
|
||||
assert data["category"] == "bug"
|
||||
|
||||
|
||||
class TestReviewResult:
|
||||
def test_review_result_no_issues(self):
|
||||
result = ReviewResult()
|
||||
assert result.has_issues() is False
|
||||
assert result.has_critical_issues() is False
|
||||
|
||||
def test_review_result_with_issues(self):
|
||||
result = ReviewResult()
|
||||
result.issues = [
|
||||
Issue(
|
||||
file="test.py",
|
||||
line=1,
|
||||
severity=IssueSeverity.CRITICAL,
|
||||
category=IssueCategory.SECURITY,
|
||||
message="SQL injection"
|
||||
)
|
||||
]
|
||||
assert result.has_issues() is True
|
||||
assert result.has_critical_issues() is True
|
||||
|
||||
def test_get_issues_by_severity(self):
|
||||
result = ReviewResult()
|
||||
result.issues = [
|
||||
Issue(file="a.py", line=1, severity=IssueSeverity.CRITICAL, category=IssueCategory.BUG, message="Bug1"),
|
||||
Issue(file="b.py", line=2, severity=IssueSeverity.WARNING, category=IssueCategory.STYLE, message="Style1"),
|
||||
Issue(file="c.py", line=3, severity=IssueSeverity.INFO, category=IssueCategory.DOCUMENTATION, message="Doc1"),
|
||||
]
|
||||
critical = result.get_issues_by_severity(IssueSeverity.CRITICAL)
|
||||
assert len(critical) == 1
|
||||
assert critical[0].file == "a.py"
|
||||
|
||||
|
||||
class TestReviewSummary:
|
||||
def test_review_summary_aggregation(self):
|
||||
summary = ReviewSummary()
|
||||
summary.files_reviewed = 5
|
||||
summary.lines_changed = 100
|
||||
summary.critical_count = 2
|
||||
summary.warning_count = 5
|
||||
summary.info_count = 10
|
||||
summary.overall_assessment = "Good"
|
||||
|
||||
data = summary.to_dict()
|
||||
assert data["files_reviewed"] == 5 # noqa: PLR2004
|
||||
assert data["critical_count"] == 2 # noqa: PLR2004
|
||||
@@ -69,6 +69,8 @@ include = "\\.pyi?$"
|
||||
[tool.ruff]
|
||||
line-length = 100
|
||||
target-version = "py310"
|
||||
|
||||
[tool.ruff.lint]
|
||||
select = ["E", "W", "F", "I", "UP", "B", "C4", "A", "SIM", "ARG", "PL", "RUF"]
|
||||
ignore = ["E501", "B008", "C901"]
|
||||
|
||||
|
||||
47
setup.cfg
Normal file
47
setup.cfg
Normal file
@@ -0,0 +1,47 @@
|
||||
[metadata]
|
||||
name = local-ai-commit-reviewer
|
||||
version = 0.1.0
|
||||
author = Local AI Commit Reviewer Contributors
|
||||
description = A CLI tool that reviews Git commits locally using lightweight LLMs
|
||||
long_description = file: README.md
|
||||
long_description_content_type = text/markdown
|
||||
url = https://github.com/yourusername/local-ai-commit-reviewer
|
||||
license = MIT
|
||||
classifiers =
|
||||
Development Status :: 4 - Beta
|
||||
Intended Audience :: Developers
|
||||
License :: OSI Approved :: MIT License
|
||||
Programming Language :: Python :: 3
|
||||
Programming Language :: Python :: 3.10
|
||||
Programming Language :: Python :: 3.11
|
||||
Programming Language :: Python :: 3.12
|
||||
keywords = git, cli, llm, code-review, ollama
|
||||
|
||||
[options]
|
||||
python_requires = >=3.10
|
||||
install_requires =
|
||||
click>=8.1.7
|
||||
gitpython>=3.1.43
|
||||
ollama>=0.3.3
|
||||
rich>=13.7.1
|
||||
pydantic>=2.6.1
|
||||
pyyaml>=6.0.1
|
||||
|
||||
[options.extras_require]
|
||||
dev =
|
||||
pytest>=7.4.0
|
||||
pytest-cov>=4.1.0
|
||||
pytest-mock>=3.12.0
|
||||
black>=23.0.0
|
||||
ruff>=0.1.0
|
||||
mypy>=1.7.0
|
||||
|
||||
[options.entry_points]
|
||||
console_scripts =
|
||||
aicr = src.cli:main
|
||||
|
||||
[tool:pytest]
|
||||
testpaths = tests
|
||||
python_files = test_*.py
|
||||
python_functions = test_*
|
||||
addopts = -v --tb=short
|
||||
@@ -1,7 +1,7 @@
|
||||
import os
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
from typing import Any, Union
|
||||
|
||||
import click
|
||||
from rich import print as rprint
|
||||
@@ -14,25 +14,6 @@ from ..git import install_hook as git_install_hook
|
||||
from ..llm import OllamaProvider
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option("--config", "-c", type=click.Path(exists=True), help="Path to config file")
|
||||
@click.option("--endpoint", help="LLM endpoint URL", default=None)
|
||||
@click.option("--model", "-m", help="Model name to use", default=None)
|
||||
@click.pass_context
|
||||
def cli(ctx: click.Context, config: str | None, endpoint: str | None, model: str | None):
|
||||
ctx.ensure_object(dict)
|
||||
cfg_path = config or os.environ.get("AICR_CONFIG_PATH")
|
||||
cfg = get_config(cfg_path)
|
||||
|
||||
if endpoint:
|
||||
cfg.llm.endpoint = endpoint
|
||||
if model:
|
||||
cfg.llm.model = model
|
||||
|
||||
ctx.obj["config"] = cfg
|
||||
ctx.obj["repo_path"] = Path.cwd()
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.option("--config", "-c", type=click.Path(exists=True), help="Path to config file")
|
||||
@click.option("--endpoint", help="LLM endpoint URL", default=None)
|
||||
@@ -59,7 +40,7 @@ def cli(ctx: click.Context, config: str | None, endpoint: str | None, model: str
|
||||
@click.option("--hook", is_flag=True, help="Run in hook mode (exit non-zero on critical)")
|
||||
@click.option("--file", "-f", multiple=True, help="Files to review (default: all staged)")
|
||||
@click.pass_context
|
||||
def review(
|
||||
def review( # noqa: PLR0913
|
||||
ctx: click.Context,
|
||||
strictness: str | None,
|
||||
output: str,
|
||||
@@ -250,26 +231,26 @@ def _get_nested_attr(obj, attr_path: str):
|
||||
return current
|
||||
|
||||
|
||||
def _set_nested_attr(obj, attr_path: str, value: str):
|
||||
def _set_nested_attr(obj, attr_path: str, value: Any) -> None:
|
||||
parts = attr_path.split(".")
|
||||
current = obj
|
||||
current: Any = obj
|
||||
for part in parts[:-1]:
|
||||
if hasattr(current, part):
|
||||
current = getattr(current, part)
|
||||
|
||||
final_attr = parts[-1]
|
||||
if hasattr(current, final_attr):
|
||||
attr = getattr(type(current), final_attr)
|
||||
if hasattr(attr, "annotation"):
|
||||
type_hint = attr.annotation
|
||||
attr = getattr(type(current), final_attr, None)
|
||||
if attr is not None and hasattr(attr, "annotation"):
|
||||
type_hint = attr.annotation # type: ignore[attr-defined]
|
||||
if getattr(type_hint, "__origin__", None) is Union:
|
||||
type_hint = type_hint.__args__[0]
|
||||
if hasattr(type_hint, "__name__"):
|
||||
if type_hint.__name__ == "int":
|
||||
if type_hint.__name__ == "int" and isinstance(value, str):
|
||||
value = int(value)
|
||||
elif type_hint.__name__ == "float":
|
||||
elif type_hint.__name__ == "float" and isinstance(value, str):
|
||||
value = float(value)
|
||||
elif type_hint.__name__ == "bool":
|
||||
elif type_hint.__name__ == "bool" and isinstance(value, str):
|
||||
value = value.lower() in ("true", "1", "yes")
|
||||
setattr(current, final_attr, value)
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import os
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import yaml
|
||||
import yaml # type: ignore[import-untyped]
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ class ReviewEngine:
|
||||
|
||||
return result
|
||||
|
||||
def _parse_text_response(self, response_text: str, files: list[FileChange]) -> list[Issue]:
|
||||
def _parse_text_response(self, response_text: str, files: list[FileChange]) -> list[Issue]: # noqa: ARG002
|
||||
issues = []
|
||||
lines = response_text.split("\n")
|
||||
|
||||
@@ -330,12 +330,14 @@ class ReviewEngine:
|
||||
if not file_change.diff.strip():
|
||||
continue
|
||||
|
||||
file_language = language or self.repo.get_file_language(file_change.filename)
|
||||
file_language = language
|
||||
if not file_language and self.repo is not None:
|
||||
file_language = self.repo.get_file_language(file_change.filename)
|
||||
|
||||
prompt = ReviewPromptTemplates.get_prompt(
|
||||
diff=file_change.diff,
|
||||
strictness=strictness,
|
||||
language=file_language
|
||||
language=file_language or ""
|
||||
)
|
||||
|
||||
try:
|
||||
|
||||
@@ -53,7 +53,7 @@ class TerminalFormatter(BaseFormatter):
|
||||
return text
|
||||
|
||||
def format(self, result: ReviewResult) -> str:
|
||||
output = []
|
||||
output: list[Panel | Table | str] = []
|
||||
|
||||
if result.error:
|
||||
output.append(Panel(
|
||||
@@ -132,10 +132,10 @@ class MarkdownFormatter(BaseFormatter):
|
||||
|
||||
|
||||
def get_formatter(format_type: str = "terminal", **kwargs) -> BaseFormatter:
|
||||
formatters = {
|
||||
formatters: dict[str, type[BaseFormatter]] = {
|
||||
"terminal": TerminalFormatter,
|
||||
"json": JSONFormatter,
|
||||
"markdown": MarkdownFormatter,
|
||||
}
|
||||
formatter_class = formatters.get(format_type, TerminalFormatter)
|
||||
return formatter_class(**kwargs)
|
||||
return formatter_class(**kwargs) # type: ignore[arg-type]
|
||||
|
||||
@@ -128,7 +128,7 @@ class GitRepo:
|
||||
message = message_result.stdout.strip()
|
||||
author_parts = author_result.stdout.strip().split("|")
|
||||
author = author_parts[0] if author_parts else "Unknown"
|
||||
date = author_parts[2] if len(author_parts) > 2 else ""
|
||||
date = author_parts[2] if len(author_parts) > 2 else "" # noqa: PLR2004
|
||||
|
||||
changes = self._get_commit_changes(sha)
|
||||
|
||||
|
||||
@@ -94,7 +94,7 @@ class OllamaProvider(LLMProvider):
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Ollama async generation failed: {e}") from None
|
||||
|
||||
def stream_generate(self, prompt: str, **kwargs) -> AsyncIterator[str]:
|
||||
async def stream_generate(self, prompt: str, **kwargs) -> AsyncIterator[str]: # type: ignore[misc]
|
||||
try:
|
||||
max_tokens = kwargs.get("max_tokens", 2048)
|
||||
temperature = kwargs.get("temperature", 0.3)
|
||||
|
||||
@@ -86,7 +86,7 @@ Review the following diff:
|
||||
|
||||
@classmethod
|
||||
def get_commit_review_prompt(cls, diff: str, commit_message: str, strictness: str = "balanced") -> str:
|
||||
prompt = f"""Review the following commit with message: "{commit_message}"
|
||||
prompt = f"""Review the following commit with message: \"{commit_message}\"
|
||||
|
||||
Analyze whether the changes align with the commit message and provide feedback.
|
||||
|
||||
|
||||
Reference in New Issue
Block a user