350 lines
10 KiB
Python
350 lines
10 KiB
Python
"""CLI interface for git-commit-message-generator."""
|
|
from pathlib import Path
|
|
from typing import Optional
|
|
|
|
import click
|
|
from rich import print as rprint
|
|
|
|
from git_commit_generator.changelog_generator import get_changelog_generator
|
|
from git_commit_generator.config import Config, get_config
|
|
from git_commit_generator.git_utils import get_git_utils
|
|
from git_commit_generator.interactive import Action, InteractiveMode, get_interactive_mode
|
|
from git_commit_generator.message_generator import MessageGenerator, get_message_generator
|
|
from git_commit_generator.ollama_client import get_ollama_client
|
|
|
|
|
|
@click.group()
|
|
@click.option(
|
|
"--host",
|
|
default=None,
|
|
help="Ollama server URL",
|
|
)
|
|
@click.option(
|
|
"--model",
|
|
default=None,
|
|
help="Model to use for generation",
|
|
)
|
|
@click.pass_context
|
|
def main(ctx: click.Context, host: Optional[str], model: Optional[str]) -> None:
|
|
"""Git Commit Message Generator CLI.
|
|
|
|
A CLI tool that generates git commit messages using local LLMs.
|
|
"""
|
|
ctx.ensure_object(dict)
|
|
config = get_config()
|
|
ctx.obj["config"] = config
|
|
ctx.obj["host"] = host or config.ollama_host
|
|
ctx.obj["model"] = model or config.ollama_model
|
|
|
|
|
|
@main.command("generate")
|
|
@click.option(
|
|
"--staged/--unstaged",
|
|
default=True,
|
|
help="Include staged (default) or unstaged changes",
|
|
)
|
|
@click.option(
|
|
"--interactive",
|
|
"-i",
|
|
is_flag=True,
|
|
default=False,
|
|
help="Use interactive mode for message refinement",
|
|
)
|
|
@click.option(
|
|
"--output",
|
|
"-o",
|
|
type=click.Path(),
|
|
help="Write commit message to file",
|
|
)
|
|
@click.pass_context
|
|
def generate(
|
|
ctx: click.Context,
|
|
staged: bool,
|
|
interactive: bool,
|
|
output: Optional[str],
|
|
) -> None:
|
|
"""Generate a commit message from staged/unstaged changes."""
|
|
config: Config = ctx.obj["config"]
|
|
host: str = ctx.obj["host"]
|
|
model: str = ctx.obj["model"]
|
|
|
|
interactive_mode = get_interactive_mode()
|
|
|
|
try:
|
|
ollama_client = get_ollama_client(host=host, model=model)
|
|
|
|
if not ollama_client.check_connection():
|
|
interactive_mode.show_error(
|
|
f"Could not connect to Ollama at {host}. "
|
|
"Make sure Ollama is running."
|
|
)
|
|
interactive_mode.show_info(
|
|
"Tip: Run 'ollama serve' to start Ollama server."
|
|
)
|
|
return
|
|
|
|
interactive_mode.show_connection_status(True, model)
|
|
interactive_mode.show_info("Generating commit message...")
|
|
|
|
message_generator = get_message_generator(
|
|
config=config,
|
|
ollama_client=ollama_client,
|
|
)
|
|
|
|
if not interactive:
|
|
message = message_generator.generate(
|
|
staged=staged,
|
|
unstaged=not staged,
|
|
model=model,
|
|
)
|
|
rprint("\n[bold green]Generated commit message:[/bold green]")
|
|
rprint(f"[cyan]{message}[/cyan]")
|
|
|
|
if output:
|
|
Path(output).write_text(message)
|
|
interactive_mode.show_info(f"Message written to {output}")
|
|
|
|
else:
|
|
_run_interactive_mode(
|
|
message_generator=message_generator,
|
|
interactive_mode=interactive_mode,
|
|
staged=staged,
|
|
unstaged=not staged,
|
|
output=output,
|
|
)
|
|
|
|
except ValueError as e:
|
|
interactive_mode.show_error(str(e))
|
|
interactive_mode.show_no_changes_warning()
|
|
except Exception as e:
|
|
interactive_mode.show_error(f"An error occurred: {str(e)}")
|
|
|
|
|
|
def _run_interactive_mode(
|
|
message_generator: MessageGenerator,
|
|
interactive_mode: InteractiveMode,
|
|
staged: bool,
|
|
unstaged: bool,
|
|
output: Optional[str],
|
|
) -> None:
|
|
"""Run interactive message generation loop."""
|
|
while True:
|
|
try:
|
|
message = message_generator.generate(
|
|
staged=staged,
|
|
unstaged=unstaged,
|
|
)
|
|
|
|
action, edited_message = interactive_mode.prompt_for_action(message)
|
|
|
|
if action == Action.QUIT:
|
|
interactive_mode.show_info("Cancelled.")
|
|
return
|
|
elif action == Action.REGENERATE:
|
|
interactive_mode.show_info("Regenerating...")
|
|
continue
|
|
elif action == Action.ACCEPT:
|
|
final_message = edited_message or message
|
|
if interactive_mode.confirm_commit(final_message):
|
|
rprint("\n[bold]Commit message:[/bold]")
|
|
rprint(f"[green]{final_message}[/green]")
|
|
|
|
if output:
|
|
Path(output).write_text(final_message)
|
|
interactive_mode.show_info(f"Message written to {output}")
|
|
|
|
return
|
|
|
|
except ValueError as e:
|
|
interactive_mode.show_error(str(e))
|
|
interactive_mode.show_no_changes_warning()
|
|
return
|
|
except Exception as e:
|
|
interactive_mode.show_error(f"An error occurred: {str(e)}")
|
|
return
|
|
|
|
|
|
@main.command("changelog")
|
|
@click.option(
|
|
"--since",
|
|
default=None,
|
|
help="Only include commits since this date/tag",
|
|
)
|
|
@click.option(
|
|
"--limit",
|
|
default=50,
|
|
type=int,
|
|
help="Maximum number of commits to include",
|
|
)
|
|
@click.option(
|
|
"--output",
|
|
"-o",
|
|
type=click.Path(),
|
|
help="Write changelog to file",
|
|
)
|
|
@click.option(
|
|
"--simple",
|
|
is_flag=True,
|
|
default=False,
|
|
help="Generate simple changelog without LLM",
|
|
)
|
|
@click.pass_context
|
|
def changelog(
|
|
ctx: click.Context,
|
|
since: Optional[str],
|
|
limit: int,
|
|
output: Optional[str],
|
|
simple: bool,
|
|
) -> None:
|
|
"""Generate CHANGELOG.md from git history."""
|
|
config: Config = ctx.obj["config"]
|
|
host: str = ctx.obj["host"]
|
|
model: str = ctx.obj["model"]
|
|
|
|
interactive_mode = get_interactive_mode()
|
|
|
|
try:
|
|
ollama_client = get_ollama_client(host=host, model=model)
|
|
|
|
if not simple and not ollama_client.check_connection():
|
|
interactive_mode.show_error(
|
|
f"Could not connect to Ollama at {host}. "
|
|
"Use --simple flag for LLM-free generation."
|
|
)
|
|
return
|
|
|
|
if not simple:
|
|
interactive_mode.show_connection_status(True, model)
|
|
interactive_mode.show_info("Generating changelog with LLM...")
|
|
|
|
changelog_generator = get_changelog_generator(
|
|
config=config,
|
|
ollama_client=ollama_client,
|
|
)
|
|
|
|
changelog = changelog_generator.generate(
|
|
since=since,
|
|
limit=limit,
|
|
output_path=output,
|
|
)
|
|
else:
|
|
interactive_mode.show_info("Generating simple changelog...")
|
|
changelog_generator = get_changelog_generator(config=config)
|
|
changelog = changelog_generator.generate_simple(
|
|
since=since,
|
|
limit=limit,
|
|
output_path=output,
|
|
)
|
|
|
|
rprint("\n[bold green]Generated Changelog:[/bold green]")
|
|
rprint(changelog)
|
|
|
|
if output:
|
|
interactive_mode.show_info(f"Changelog written to {output}")
|
|
|
|
except ValueError as e:
|
|
interactive_mode.show_error(str(e))
|
|
except Exception as e:
|
|
interactive_mode.show_error(f"An error occurred: {str(e)}")
|
|
|
|
|
|
@main.command("config")
|
|
@click.option(
|
|
"--show",
|
|
is_flag=True,
|
|
default=False,
|
|
help="Show current configuration",
|
|
)
|
|
@click.option(
|
|
"--host",
|
|
default=None,
|
|
help="Set Ollama host URL",
|
|
)
|
|
@click.option(
|
|
"--model",
|
|
default=None,
|
|
help="Set default model",
|
|
)
|
|
@click.pass_context
|
|
def config_cmd(
|
|
ctx: click.Context,
|
|
show: bool,
|
|
host: Optional[str],
|
|
model: Optional[str],
|
|
) -> None:
|
|
"""Configure git-commit-message-generator settings."""
|
|
config: Config = ctx.obj["config"]
|
|
|
|
if show:
|
|
rprint("[bold]Current Configuration:[/bold]")
|
|
rprint(f" Ollama Host: {config.ollama_host}")
|
|
rprint(f" Ollama Model: {config.ollama_model}")
|
|
rprint(f" Prompt Directory: {config.prompt_dir}")
|
|
return
|
|
|
|
if host or model:
|
|
config_path = config.config_path or "./config.yaml"
|
|
import yaml
|
|
config_data = {
|
|
"ollama_host": host or config.ollama_host,
|
|
"ollama_model": model or config.ollama_model,
|
|
}
|
|
with open(config_path, "w") as f:
|
|
yaml.dump(config_data, f)
|
|
rprint(f"[green]Configuration saved to {config_path}[/green]")
|
|
else:
|
|
click.echo(main.get_help(ctx))
|
|
|
|
|
|
@main.command("status")
|
|
@click.pass_context
|
|
def status(ctx: click.Context) -> None:
|
|
"""Check system status and configuration."""
|
|
config: Config = ctx.obj["config"]
|
|
host: str = ctx.obj["host"]
|
|
model: str = ctx.obj["model"]
|
|
|
|
interactive_mode = get_interactive_mode()
|
|
|
|
rprint("[bold]Git Commit Message Generator Status[/bold]\n")
|
|
|
|
rprint("[bold]Configuration:[/bold]")
|
|
rprint(f" Ollama Host: {host}")
|
|
rprint(f" Default Model: {model}")
|
|
rprint(f" Prompt Directory: {config.prompt_dir}")
|
|
|
|
try:
|
|
ollama_client = get_ollama_client(host=host, model=model)
|
|
connected = ollama_client.check_connection()
|
|
interactive_mode.show_connection_status(connected, model)
|
|
|
|
if connected:
|
|
models = ollama_client.list_models()
|
|
rprint("\n[bold]Available Models:[/bold]")
|
|
if models:
|
|
for m in models[:5]:
|
|
rprint(f" - {m.get('name', 'unknown')}")
|
|
if len(models) > 5:
|
|
rprint(f" ... and {len(models) - 5} more")
|
|
else:
|
|
rprint(" No models found")
|
|
|
|
except Exception as e:
|
|
interactive_mode.show_error(f"Could not connect to Ollama: {e}")
|
|
|
|
try:
|
|
git_utils = get_git_utils()
|
|
is_repo = git_utils.is_repo()
|
|
rprint("\n[bold]Git Repository:[/bold]")
|
|
rprint(f" Repository Detected: {'Yes' if is_repo else 'No'}")
|
|
except Exception:
|
|
rprint("\n[bold]Git Repository:[/bold]")
|
|
rprint(" Not in a git repository")
|
|
|
|
|
|
@main.command("help")
|
|
def help_cmd() -> None:
|
|
"""Show help information."""
|
|
interactive_mode = get_interactive_mode()
|
|
interactive_mode.show_help()
|