From ad25071307f8cb186c1322cae7f50266c3886985 Mon Sep 17 00:00:00 2001 From: 7000pctAUTO Date: Thu, 5 Feb 2026 20:56:17 +0000 Subject: [PATCH] Initial upload: Local LLM Prompt Manager CLI tool --- src/commands/export.py | 83 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 src/commands/export.py diff --git a/src/commands/export.py b/src/commands/export.py new file mode 100644 index 0000000..e05a40e --- /dev/null +++ b/src/commands/export.py @@ -0,0 +1,83 @@ +"""Export prompts to various formats.""" + +import json +from typing import Optional + +import click +import yaml + +from ..storage import PromptStorage + + +@click.command("export") +@click.argument("output", type=click.Path()) +@click.option("--format", "-f", default="yaml", type=click.Choice(["yaml", "json", "ollama", "lmstudio"])) +@click.option("--tag", "-t", default=None, help="Export prompts with specific tag") +@click.option("--dir", "prompt_dir", default=None, help="Prompt directory") +def export_prompts( + output: str, + format: str, + tag: Optional[str], + prompt_dir: Optional[str] +): + """Export prompts to JSON, YAML, or LLM-specific formats.""" + storage = PromptStorage(prompt_dir) + + if tag: + prompts = storage.get_prompts_by_tag(tag) + else: + prompt_names = storage.list_prompts() + prompts = [storage.get_prompt(n) for n in prompt_names] + + if not prompts: + click.echo("No prompts to export", err=True) + return + + if format in ("yaml", "json"): + data = [p.to_dict() for p in prompts] + content = json.dumps(data, indent=2) if format == "json" else yaml.dump(data, default_flow_style=False) + elif format == "ollama": + content = _export_ollama(prompts) + elif format == "lmstudio": + content = _export_lmstudio(prompts) + + with open(output, "w") as f: + f.write(content) + + click.echo(f"Exported {len(prompts)} prompts to {output}") + + +def _export_ollama(prompts) -> str: + """Export prompts in Ollama format.""" + result = [] + for p in prompts: + result.append({ + "name": p.name, + "description": p.description, + "system": "", + "template": p.template, + "parameters": { + "required": p.get_required_variables(), + "properties": {v["name"]: {"description": v.get("description", "")} for v in p.variables} + } + }) + return yaml.dump(result, default_flow_style=False) + + +def _export_lmstudio(prompts) -> str: + """Export prompts in LM Studio format.""" + result = [] + for p in prompts: + result.append({ + "name": p.name, + "description": p.description, + "messages": [ + {"role": "system", "content": ""}, + {"role": "user", "content": p.template} + ], + "parameters": { + "temperature": 0.7, + "max_tokens": 1024 + } + }) + return yaml.dump(result, default_flow_style=False)