Initial upload: Local LLM Prompt Manager CLI tool
This commit is contained in:
104
src/commands/run.py
Normal file
104
src/commands/run.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""Search and run CLI commands."""
|
||||
|
||||
from typing import Optional
|
||||
|
||||
import click
|
||||
|
||||
from ..llm import LLMClientFactory
|
||||
from ..storage import PromptStorage
|
||||
from ..templates import TemplateEngine
|
||||
|
||||
|
||||
@click.command("search")
|
||||
@click.argument("query", required=False)
|
||||
@click.option("--name", "-n", default=None, help="Search by prompt name")
|
||||
@click.option("--content", "-c", default=None, help="Search by template content")
|
||||
@click.option("--tag", "-t", default=None, help="Search by tag")
|
||||
@click.option("--dir", "prompt_dir", default=None, help="Prompt directory")
|
||||
def search_prompts(
|
||||
query: Optional[str],
|
||||
name: Optional[str],
|
||||
content: Optional[str],
|
||||
tag: Optional[str],
|
||||
prompt_dir: Optional[str]
|
||||
):
|
||||
"""Search prompts by name, content, or tag."""
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
|
||||
storage = PromptStorage(prompt_dir)
|
||||
|
||||
if query:
|
||||
prompts = storage.search_prompts(name=query)
|
||||
else:
|
||||
prompts = storage.search_prompts(name=name, content=content, tag=tag)
|
||||
|
||||
table = Table(title="Search Results")
|
||||
table.add_column("Name", style="cyan")
|
||||
table.add_column("Tags", style="magenta")
|
||||
table.add_column("Description", style="green")
|
||||
|
||||
for prompt in prompts:
|
||||
tags_str = ", ".join(prompt.tags) if prompt.tags else "-"
|
||||
desc = prompt.description[:50] + "..." if len(prompt.description) > 50 else prompt.description
|
||||
table.add_row(prompt.name, tags_str, desc)
|
||||
|
||||
console = Console()
|
||||
console.print(table)
|
||||
|
||||
|
||||
@click.command("run")
|
||||
@click.argument("name")
|
||||
@click.option("--var", "-v", multiple=True, help="Variable values as key=value")
|
||||
@click.option("--provider", default=None, help="LLM provider")
|
||||
@click.option("--model", default=None, help="Model name")
|
||||
@click.option("--stream/--no-stream", default=False, help="Stream the response")
|
||||
@click.option("--dir", "prompt_dir", default=None, help="Prompt directory")
|
||||
def run_prompt(
|
||||
name: str,
|
||||
var: tuple,
|
||||
provider: Optional[str],
|
||||
model: Optional[str],
|
||||
stream: bool,
|
||||
prompt_dir: Optional[str]
|
||||
):
|
||||
"""Run a prompt with optional variable substitution."""
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
|
||||
storage = PromptStorage(prompt_dir)
|
||||
prompt = storage.get_prompt(name)
|
||||
|
||||
if not prompt:
|
||||
click.echo(f"Error: Prompt '{name}' not found", err=True)
|
||||
return
|
||||
|
||||
variables = {}
|
||||
for v in var:
|
||||
if "=" in v:
|
||||
k, val = v.split("=", 1)
|
||||
variables[k.strip()] = val.strip()
|
||||
|
||||
engine = TemplateEngine()
|
||||
try:
|
||||
rendered = engine.render_prompt(prompt, variables)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
return
|
||||
|
||||
click.echo("\n--- Rendered Prompt ---")
|
||||
console = Console()
|
||||
console.print(Panel(rendered, title="Rendered Template"))
|
||||
|
||||
if click.confirm("\nSend to LLM?"):
|
||||
client = LLMClientFactory.create(provider=provider)
|
||||
|
||||
if stream:
|
||||
click.echo("\n--- LLM Response ---")
|
||||
for chunk in client.stream_generate(rendered, model=model):
|
||||
click.echo(chunk, nl=False)
|
||||
click.echo()
|
||||
else:
|
||||
response = client.generate(rendered, model=model)
|
||||
click.echo("\n--- LLM Response ---")
|
||||
console.print(Panel(response, title="Response"))
|
||||
Reference in New Issue
Block a user