Initial upload: Local LLM Prompt Manager CLI tool
Some checks failed
CI / test (push) Has been cancelled
CI / lint (push) Has been cancelled
CI / build (push) Has been cancelled

This commit is contained in:
2026-02-05 20:56:17 +00:00
parent c6b8697a54
commit ad25071307

83
src/commands/export.py Normal file
View File

@@ -0,0 +1,83 @@
"""Export prompts to various formats."""
import json
from typing import Optional
import click
import yaml
from ..storage import PromptStorage
@click.command("export")
@click.argument("output", type=click.Path())
@click.option("--format", "-f", default="yaml", type=click.Choice(["yaml", "json", "ollama", "lmstudio"]))
@click.option("--tag", "-t", default=None, help="Export prompts with specific tag")
@click.option("--dir", "prompt_dir", default=None, help="Prompt directory")
def export_prompts(
output: str,
format: str,
tag: Optional[str],
prompt_dir: Optional[str]
):
"""Export prompts to JSON, YAML, or LLM-specific formats."""
storage = PromptStorage(prompt_dir)
if tag:
prompts = storage.get_prompts_by_tag(tag)
else:
prompt_names = storage.list_prompts()
prompts = [storage.get_prompt(n) for n in prompt_names]
if not prompts:
click.echo("No prompts to export", err=True)
return
if format in ("yaml", "json"):
data = [p.to_dict() for p in prompts]
content = json.dumps(data, indent=2) if format == "json" else yaml.dump(data, default_flow_style=False)
elif format == "ollama":
content = _export_ollama(prompts)
elif format == "lmstudio":
content = _export_lmstudio(prompts)
with open(output, "w") as f:
f.write(content)
click.echo(f"Exported {len(prompts)} prompts to {output}")
def _export_ollama(prompts) -> str:
"""Export prompts in Ollama format."""
result = []
for p in prompts:
result.append({
"name": p.name,
"description": p.description,
"system": "",
"template": p.template,
"parameters": {
"required": p.get_required_variables(),
"properties": {v["name"]: {"description": v.get("description", "")} for v in p.variables}
}
})
return yaml.dump(result, default_flow_style=False)
def _export_lmstudio(prompts) -> str:
"""Export prompts in LM Studio format."""
result = []
for p in prompts:
result.append({
"name": p.name,
"description": p.description,
"messages": [
{"role": "system", "content": ""},
{"role": "user", "content": p.template}
],
"parameters": {
"temperature": 0.7,
"max_tokens": 1024
}
})
return yaml.dump(result, default_flow_style=False)