Initial upload of auto-changelog-generator
This commit is contained in:
404
src/changeloggen/main.py
Normal file
404
src/changeloggen/main.py
Normal file
@@ -0,0 +1,404 @@
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
from .git_client import ChangeSet, FileChange
|
||||
from .llm_client import OllamaAPIClient, Config as LLMConfig
|
||||
from .changelog_generator import (
|
||||
categorize_changes,
|
||||
format_conventional_changelog,
|
||||
format_json_output,
|
||||
format_release_notes,
|
||||
)
|
||||
from .config import load_config
|
||||
from .git_hooks import (
|
||||
install_prepare_hook,
|
||||
install_commit_msg_hook,
|
||||
remove_hook,
|
||||
list_installed_hooks,
|
||||
)
|
||||
|
||||
app = typer.Typer(
|
||||
name="changeloggen",
|
||||
help="Auto Changelog Generator - Generate changelogs from git diffs using local AI",
|
||||
add_completion=False
|
||||
)
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
@app.command()
|
||||
def generate(
|
||||
output: str = typer.Option(
|
||||
"markdown",
|
||||
"--output", "-o",
|
||||
help="Output format: markdown, json, release"
|
||||
),
|
||||
model: str = typer.Option(
|
||||
"llama3.2",
|
||||
"--model", "-m",
|
||||
help="LLM model to use"
|
||||
),
|
||||
all_changes: bool = typer.Option(
|
||||
False,
|
||||
"--all", "-a",
|
||||
help="Include unstaged changes"
|
||||
),
|
||||
version: str = typer.Option(
|
||||
"1.0.0",
|
||||
"--version", "-v",
|
||||
help="Version string for changelog"
|
||||
),
|
||||
output_file: Optional[Path] = typer.Option(
|
||||
None,
|
||||
"--output-file", "-f",
|
||||
help="Write output to file"
|
||||
),
|
||||
):
|
||||
"""Generate changelog from git diffs."""
|
||||
try:
|
||||
from git import Repo
|
||||
repo = Repo(Path.cwd())
|
||||
except Exception as e:
|
||||
console.print(Panel(
|
||||
Text(f"Error: Not a git repository or git not available.\n{str(e)}", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
llm_config = LLMConfig(model=model)
|
||||
llm_client = OllamaAPIClient(llm_config)
|
||||
|
||||
if not llm_client.is_available():
|
||||
console.print(Panel(
|
||||
Text("Error: LLM API not available. Please ensure Ollama or LM Studio is running.", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
change_set = ChangeSet()
|
||||
|
||||
try:
|
||||
staged = repo.index.diff("HEAD")
|
||||
for diff in staged:
|
||||
file_change = FileChange(
|
||||
file_path=diff.a_path or diff.b_path,
|
||||
change_type="modified",
|
||||
diff_content=diff.diff.decode() if isinstance(diff.diff, bytes) else str(diff.diff),
|
||||
staged=True
|
||||
)
|
||||
change_set.staged_changes.append(file_change)
|
||||
|
||||
if all_changes:
|
||||
unstaged = repo.index.diff(None)
|
||||
for diff in unstaged:
|
||||
file_change = FileChange(
|
||||
file_path=diff.a_path or diff.b_path,
|
||||
change_type="modified",
|
||||
diff_content=diff.diff.decode() if isinstance(diff.diff, bytes) else str(diff.diff),
|
||||
staged=False
|
||||
)
|
||||
change_set.unstaged_changes.append(file_change)
|
||||
|
||||
except Exception as e:
|
||||
console.print(Panel(
|
||||
Text(f"Error reading git diff: {str(e)}", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
if not change_set.all_changes:
|
||||
console.print(Panel(
|
||||
Text("No changes detected. Stage some files first.", style="yellow"),
|
||||
title="Info"
|
||||
))
|
||||
return
|
||||
|
||||
console.print(f"Found {change_set.total_files_changed} changed files")
|
||||
|
||||
categorized = categorize_changes(change_set, llm_client, model)
|
||||
|
||||
if output == "markdown":
|
||||
result = format_conventional_changelog(categorized, version)
|
||||
elif output == "json":
|
||||
result = format_json_output(categorized, version)
|
||||
elif output == "release":
|
||||
result = format_release_notes(categorized, version)
|
||||
elif output == "commit-message":
|
||||
if categorized.changes:
|
||||
change = categorized.changes[0]
|
||||
scope_part = f"({change.scope})" if change.scope else ""
|
||||
result = f"{change.type}{scope_part}: {change.description}"
|
||||
else:
|
||||
result = categorized.summary
|
||||
else:
|
||||
result = format_conventional_changelog(categorized, version)
|
||||
|
||||
if output_file:
|
||||
output_file.write_text(result)
|
||||
console.print(f"Changelog written to: {output_file}")
|
||||
else:
|
||||
console.print(result)
|
||||
|
||||
|
||||
@app.command()
|
||||
def release(
|
||||
version: str = typer.Option(
|
||||
"1.0.0",
|
||||
"--version", "-v",
|
||||
help="Version for release notes"
|
||||
),
|
||||
output_file: Optional[Path] = typer.Option(
|
||||
None,
|
||||
"--output-file", "-f",
|
||||
help="Write output to file"
|
||||
),
|
||||
model: str = typer.Option(
|
||||
"llama3.2",
|
||||
"--model", "-m",
|
||||
help="LLM model to use"
|
||||
),
|
||||
):
|
||||
"""Generate release notes for GitHub/GitLab."""
|
||||
try:
|
||||
from git import Repo
|
||||
repo = Repo(Path.cwd())
|
||||
except Exception as e:
|
||||
console.print(Panel(
|
||||
Text(f"Error: Not a git repository.\n{str(e)}", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
llm_config = LLMConfig(model=model)
|
||||
llm_client = OllamaAPIClient(llm_config)
|
||||
|
||||
if not llm_client.is_available():
|
||||
console.print(Panel(
|
||||
Text("Error: LLM API not available.", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
commits = list(repo.iter_commits("HEAD~10..HEAD"))
|
||||
|
||||
change_set = ChangeSet()
|
||||
for commit in commits:
|
||||
file_change = FileChange(
|
||||
file_path=str(commit.hexsha),
|
||||
change_type="modified",
|
||||
diff_content=f"{commit.message}\nAuthor: {commit.author.name}"
|
||||
)
|
||||
change_set.staged_changes.append(file_change)
|
||||
|
||||
except Exception as e:
|
||||
console.print(Panel(
|
||||
Text(f"Error reading commit history: {str(e)}", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
categorized = categorize_changes(change_set, llm_client, model)
|
||||
result = format_release_notes(categorized, version)
|
||||
|
||||
if output_file:
|
||||
output_file.write_text(result)
|
||||
console.print(f"Release notes written to: {output_file}")
|
||||
else:
|
||||
console.print(result)
|
||||
|
||||
|
||||
@app.command()
|
||||
def install_hook(
|
||||
hook_type: str = typer.Argument(
|
||||
help="Hook type: prepare-commit-msg or commit-msg"
|
||||
),
|
||||
model: str = typer.Option(
|
||||
"llama3.2",
|
||||
"--model", "-m",
|
||||
help="LLM model to use"
|
||||
),
|
||||
branches: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--branches", "-b",
|
||||
help="Comma-separated list of branches to run on (default: all)"
|
||||
),
|
||||
):
|
||||
"""Install git hook for automatic changelog generation."""
|
||||
if hook_type not in ["prepare-commit-msg", "commit-msg"]:
|
||||
console.print(Panel(
|
||||
Text("Invalid hook type. Use 'prepare-commit-msg' or 'commit-msg'", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
from git import Repo
|
||||
repo = Repo(Path.cwd())
|
||||
except Exception:
|
||||
console.print(Panel(
|
||||
Text("Error: Not a git repository.", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
branch_list = None
|
||||
if branches:
|
||||
branch_list = [b.strip() for b in branches.split(",")]
|
||||
|
||||
if hook_type == "prepare-commit-msg":
|
||||
hook_path = install_prepare_hook(Path.cwd(), model, branch_list)
|
||||
else:
|
||||
hook_path = install_commit_msg_hook(Path.cwd(), model, branch_list)
|
||||
|
||||
console.print(Panel(
|
||||
Text(f"Hook installed at: {hook_path}", style="green"),
|
||||
title="Success"
|
||||
))
|
||||
|
||||
|
||||
@app.command()
|
||||
def remove_hook_cmd(
|
||||
hook_type: str = typer.Argument(
|
||||
help="Hook type to remove"
|
||||
),
|
||||
):
|
||||
"""Remove installed git hook."""
|
||||
if hook_type not in ["prepare-commit-msg", "commit-msg"]:
|
||||
console.print(Panel(
|
||||
Text("Invalid hook type.", style="red"),
|
||||
title="Error"
|
||||
))
|
||||
raise typer.Exit(1)
|
||||
|
||||
removed = remove_hook(hook_type, Path.cwd())
|
||||
|
||||
if removed:
|
||||
console.print(Panel(
|
||||
Text(f"Hook '{hook_type}' removed.", style="green"),
|
||||
title="Success"
|
||||
))
|
||||
else:
|
||||
console.print(Panel(
|
||||
Text(f"Hook '{hook_type}' not found.", style="yellow"),
|
||||
title="Info"
|
||||
))
|
||||
|
||||
|
||||
@app.command()
|
||||
def list_hooks():
|
||||
"""List all installed changeloggen hooks."""
|
||||
hooks = list_installed_hooks(Path.cwd())
|
||||
|
||||
if hooks:
|
||||
console.print("Installed hooks:")
|
||||
for hook in hooks:
|
||||
console.print(f" - {hook}")
|
||||
else:
|
||||
console.print("No changeloggen hooks installed.")
|
||||
|
||||
|
||||
@app.command()
|
||||
def config_show():
|
||||
"""Show current configuration."""
|
||||
config = load_config()
|
||||
|
||||
console.print(Panel(
|
||||
f"Ollama URL: {config.ollama_url}\n"
|
||||
f"Model: {config.model}\n"
|
||||
f"Temperature: {config.temperature}\n"
|
||||
f"Output Format: {config.output_format}\n"
|
||||
f"Include Unstaged: {config.include_unstaged}",
|
||||
title="Configuration"
|
||||
))
|
||||
|
||||
|
||||
@app.command()
|
||||
def config_set(
|
||||
ollama_url: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--ollama-url",
|
||||
help="Set Ollama/LM Studio URL"
|
||||
),
|
||||
model: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--model",
|
||||
help="Set LLM model"
|
||||
),
|
||||
temperature: Optional[float] = typer.Option(
|
||||
None,
|
||||
"--temperature",
|
||||
help="Set temperature (0.0-1.0)"
|
||||
),
|
||||
output_format: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--output-format",
|
||||
help="Set default output format"
|
||||
),
|
||||
):
|
||||
"""Update configuration."""
|
||||
config = load_config()
|
||||
|
||||
if ollama_url:
|
||||
config.ollama_url = ollama_url
|
||||
if model:
|
||||
config.model = model
|
||||
if temperature is not None:
|
||||
config.temperature = temperature
|
||||
if output_format:
|
||||
config.output_format = output_format
|
||||
|
||||
config_path = Path(".changeloggen.yaml")
|
||||
from .config import save_config
|
||||
save_config(config, config_path)
|
||||
|
||||
console.print(Panel(
|
||||
Text(f"Configuration saved to {config_path}", style="green"),
|
||||
title="Success"
|
||||
))
|
||||
|
||||
|
||||
@app.command()
|
||||
def check():
|
||||
"""Check system requirements and connectivity."""
|
||||
console.print("Checking system requirements...")
|
||||
|
||||
checks = []
|
||||
|
||||
try:
|
||||
from git import Repo
|
||||
repo = Repo(Path.cwd())
|
||||
checks.append(("Git Repository", True, ""))
|
||||
except Exception as e:
|
||||
checks.append(("Git Repository", False, str(e)))
|
||||
|
||||
llm_config = LLMConfig()
|
||||
llm_client = OllamaAPIClient(llm_config)
|
||||
available = llm_client.is_available()
|
||||
checks.append(("LLM API", available, ""))
|
||||
|
||||
for name, passed, error in checks:
|
||||
status = "[green]OK[/green]" if passed else "[red]FAIL[/red]"
|
||||
style = "green" if passed else "red"
|
||||
msg = f"{status} {name}"
|
||||
if error:
|
||||
msg += f": {error}"
|
||||
console.print(Text(msg, style=style))
|
||||
|
||||
if not all(c[1] for c in checks):
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command()
|
||||
def version():
|
||||
"""Show version information."""
|
||||
from . import __version__
|
||||
console.print(f"changeloggen v{__version__}")
|
||||
|
||||
|
||||
def main():
|
||||
app()
|
||||
Reference in New Issue
Block a user