144 lines
4.1 KiB
Python
144 lines
4.1 KiB
Python
"""Ollama client wrapper for LLM interactions."""
|
|
import ollama as ollama_lib
|
|
|
|
|
|
class OllamaClient:
|
|
"""Client for interacting with Ollama LLM."""
|
|
|
|
def __init__(self, host: str = "http://localhost:11434", model: str = "llama3"):
|
|
"""Initialize Ollama client.
|
|
|
|
Args:
|
|
host: Ollama server URL.
|
|
model: Default model to use.
|
|
"""
|
|
self.host = host
|
|
self.model = model
|
|
ollama_lib.host = host
|
|
|
|
def check_connection(self) -> bool:
|
|
"""Check if Ollama is running and accessible.
|
|
|
|
Returns:
|
|
True if connection successful, False otherwise.
|
|
"""
|
|
try:
|
|
ollama_lib.list()
|
|
return True
|
|
except Exception:
|
|
return False
|
|
|
|
def check_model_available(self, model: str) -> bool:
|
|
"""Check if a specific model is available.
|
|
|
|
Args:
|
|
model: Model name to check.
|
|
|
|
Returns:
|
|
True if model is available, False otherwise.
|
|
"""
|
|
try:
|
|
response = ollama_lib.list()
|
|
return any(m["name"] == model or m["name"].startswith(model)
|
|
for m in response.get("models", []))
|
|
except Exception:
|
|
return False
|
|
|
|
def generate_commit_message(
|
|
self, diff: str, prompt: str, model: str | None = None
|
|
) -> str:
|
|
"""Generate a commit message from diff.
|
|
|
|
Args:
|
|
diff: Git diff to analyze.
|
|
prompt: Prompt template to use.
|
|
model: Optional model override.
|
|
|
|
Returns:
|
|
Generated commit message.
|
|
"""
|
|
response = ollama_lib.chat(
|
|
model=model or self.model,
|
|
messages=[
|
|
{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant that generates git commit messages following conventional commit format. Be concise and descriptive.",
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": f"{prompt}\n\nGit diff to analyze:\n\n{diff}",
|
|
},
|
|
],
|
|
stream=False,
|
|
)
|
|
return response["message"]["content"]
|
|
|
|
def generate_changelog(
|
|
self, commits: str, prompt: str, model: str | None = None
|
|
) -> str:
|
|
"""Generate changelog from commits.
|
|
|
|
Args:
|
|
commits: Commit history to analyze.
|
|
prompt: Prompt template to use.
|
|
model: Optional model override.
|
|
|
|
Returns:
|
|
Generated changelog.
|
|
"""
|
|
response = ollama_lib.chat(
|
|
model=model or self.model,
|
|
messages=[
|
|
{
|
|
"role": "system",
|
|
"content": "You are a helpful assistant that generates changelogs from git commit history. Group changes by type and create a well-structured markdown changelog.",
|
|
},
|
|
{
|
|
"role": "user",
|
|
"content": f"{prompt}\n\nCommit history to analyze:\n\n{commits}",
|
|
},
|
|
],
|
|
stream=False,
|
|
)
|
|
return response["message"]["content"]
|
|
|
|
def list_models(self) -> list[dict]:
|
|
"""List available Ollama models.
|
|
|
|
Returns:
|
|
List of available models with their details.
|
|
"""
|
|
try:
|
|
response = ollama_lib.list()
|
|
return response.get("models", [])
|
|
except Exception:
|
|
return []
|
|
|
|
def pull_model(self, model: str) -> bool:
|
|
"""Pull a model from Ollama registry.
|
|
|
|
Args:
|
|
model: Model name to pull.
|
|
|
|
Returns:
|
|
True if successful, False otherwise.
|
|
"""
|
|
try:
|
|
ollama_lib.pull(model)
|
|
return True
|
|
except Exception:
|
|
return False
|
|
|
|
|
|
def get_ollama_client(host: str = "http://localhost:11434", model: str = "llama3") -> OllamaClient:
|
|
"""Get OllamaClient instance.
|
|
|
|
Args:
|
|
host: Ollama server URL.
|
|
model: Default model to use.
|
|
|
|
Returns:
|
|
OllamaClient instance.
|
|
"""
|
|
return OllamaClient(host=host, model=model)
|