Add CLI commands (init, run, test, prompt)
This commit is contained in:
72
src/promptforge/cli/commands/test.py
Normal file
72
src/promptforge/cli/commands/test.py
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import asyncio
|
||||||
|
import click
|
||||||
|
from promptforge.core.prompt import Prompt
|
||||||
|
from promptforge.core.config import get_config
|
||||||
|
from promptforge.providers import ProviderFactory
|
||||||
|
from promptforge.testing import ABTest, ABTestConfig
|
||||||
|
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument("prompt_names", nargs=-1, required=True)
|
||||||
|
@click.option("--provider", "-p", help="Provider to use")
|
||||||
|
@click.option("--iterations", "-i", default=3, help="Number of test iterations")
|
||||||
|
@click.option("--output", "-o", type=click.Choice(["text", "json"]), default="text")
|
||||||
|
@click.option("--parallel", is_flag=True, help="Run iterations in parallel")
|
||||||
|
@click.pass_obj
|
||||||
|
def test(ctx, prompt_names: tuple, provider: str, iterations: int, output: str, parallel: bool):
|
||||||
|
"""Test prompts with A/B testing."""
|
||||||
|
prompts_dir = ctx["prompts_dir"]
|
||||||
|
prompts = Prompt.list(prompts_dir)
|
||||||
|
|
||||||
|
selected_prompts = []
|
||||||
|
for name in prompt_names:
|
||||||
|
prompt = next((p for p in prompts if p.name == name), None)
|
||||||
|
if not prompt:
|
||||||
|
click.echo(f"Prompt '{name}' not found", err=True)
|
||||||
|
raise click.Abort()
|
||||||
|
selected_prompts.append(prompt)
|
||||||
|
|
||||||
|
config = get_config()
|
||||||
|
selected_provider = provider or config.defaults.provider
|
||||||
|
|
||||||
|
try:
|
||||||
|
provider_instance = ProviderFactory.create(
|
||||||
|
selected_provider,
|
||||||
|
model=config.providers.get(selected_provider, {}).model if selected_provider in config.providers else None,
|
||||||
|
temperature=config.providers.get(selected_provider, {}).temperature if selected_provider in config.providers else 0.7,
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Provider error: {e}", err=True)
|
||||||
|
raise click.Abort()
|
||||||
|
|
||||||
|
test_config = ABTestConfig(iterations=iterations, parallel=parallel)
|
||||||
|
ab_test = ABTest(provider_instance, test_config)
|
||||||
|
|
||||||
|
async def run_tests():
|
||||||
|
results = await ab_test.run_comparison(selected_prompts)
|
||||||
|
return results
|
||||||
|
|
||||||
|
try:
|
||||||
|
results = asyncio.run(run_tests())
|
||||||
|
except Exception as e:
|
||||||
|
click.echo(f"Test error: {e}", err=True)
|
||||||
|
raise click.Abort()
|
||||||
|
|
||||||
|
for name, summary in results.items():
|
||||||
|
click.echo(f"\n=== {name} ===")
|
||||||
|
click.echo(f"Successful: {summary.successful_runs}/{summary.total_runs}")
|
||||||
|
click.echo(f"Avg Latency: {summary.avg_latency_ms:.2f}ms")
|
||||||
|
click.echo(f"Avg Tokens: {summary.avg_tokens:.0f}")
|
||||||
|
|
||||||
|
if output == "json":
|
||||||
|
import json
|
||||||
|
output_data = {
|
||||||
|
name: {
|
||||||
|
"successful_runs": s.successful_runs,
|
||||||
|
"total_runs": s.total_runs,
|
||||||
|
"avg_latency_ms": s.avg_latency_ms,
|
||||||
|
"avg_tokens": s.avg_tokens,
|
||||||
|
}
|
||||||
|
for name, s in results.items()
|
||||||
|
}
|
||||||
|
click.echo(json.dumps(output_data, indent=2))
|
||||||
Reference in New Issue
Block a user