Initial upload: Git AI Documentation Generator v0.1.0
Some checks failed
CI / test (push) Has been cancelled
Some checks failed
CI / test (push) Has been cancelled
This commit is contained in:
299
src/ollama_client.py
Normal file
299
src/ollama_client.py
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
"""Ollama client module for local LLM interactions."""
|
||||||
|
|
||||||
|
import time
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Optional
|
||||||
|
|
||||||
|
import ollama
|
||||||
|
|
||||||
|
from src.config import Config
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class OllamaModel:
|
||||||
|
"""Represents an available Ollama model."""
|
||||||
|
|
||||||
|
name: str
|
||||||
|
size: int
|
||||||
|
digest: str
|
||||||
|
modified_at: str
|
||||||
|
|
||||||
|
|
||||||
|
class OllamaError(Exception):
|
||||||
|
"""Base exception for Ollama operations."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class OllamaConnectionError(OllamaError):
|
||||||
|
"""Raised when connection to Ollama fails."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class OllamaModelNotFoundError(OllamaError):
|
||||||
|
"""Raised when the requested model is not available."""
|
||||||
|
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class OllamaClient:
|
||||||
|
"""Client for interacting with Ollama local LLM."""
|
||||||
|
|
||||||
|
def __init__(self, config: Config | None = None):
|
||||||
|
"""Initialize the Ollama client.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config: Configuration object.
|
||||||
|
"""
|
||||||
|
self.config = config or Config()
|
||||||
|
self._client: Optional[ollama.Client] = None
|
||||||
|
|
||||||
|
@property
|
||||||
|
def client(self) -> ollama.Client:
|
||||||
|
"""Get or create the Ollama client."""
|
||||||
|
if self._client is None:
|
||||||
|
self._client = ollama.Client(host=self.config.ollama_host)
|
||||||
|
return self._client
|
||||||
|
|
||||||
|
def connect_with_retry(self, max_retries: int = 3, delay: float = 2.0) -> bool:
|
||||||
|
"""Connect to Ollama server with retry logic.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
max_retries: Maximum number of retry attempts.
|
||||||
|
delay: Delay between retries in seconds.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if connection successful.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
OllamaConnectionError: If connection fails after retries.
|
||||||
|
"""
|
||||||
|
for attempt in range(max_retries):
|
||||||
|
try:
|
||||||
|
self.client.ps()
|
||||||
|
return True
|
||||||
|
except ollama.RequestError as e:
|
||||||
|
if attempt < max_retries - 1:
|
||||||
|
time.sleep(delay)
|
||||||
|
else:
|
||||||
|
raise OllamaConnectionError(
|
||||||
|
f"Failed to connect to Ollama at {self.config.ollama_host}: {e}"
|
||||||
|
)
|
||||||
|
return False
|
||||||
|
|
||||||
|
def list_models(self) -> list[OllamaModel]:
|
||||||
|
"""List available Ollama models.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of available models.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
response = self.client.ps()
|
||||||
|
models = []
|
||||||
|
for model in response.models:
|
||||||
|
models.append(OllamaModel(
|
||||||
|
name=model.name,
|
||||||
|
size=model.size,
|
||||||
|
digest=model.digest,
|
||||||
|
modified_at=model.modified_at,
|
||||||
|
))
|
||||||
|
return models
|
||||||
|
except ollama.RequestError as e:
|
||||||
|
raise OllamaConnectionError(f"Failed to list models: {e}")
|
||||||
|
|
||||||
|
def pull_model(self, model: str, timeout: Optional[int] = None) -> bool:
|
||||||
|
"""Pull a model from Ollama registry.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name to pull.
|
||||||
|
timeout: Timeout in seconds.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if successful.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
ollama.pull(model, timeout=timeout)
|
||||||
|
return True
|
||||||
|
except ollama.RequestError as e:
|
||||||
|
raise OllamaConnectionError(f"Failed to pull model {model}: {e}")
|
||||||
|
|
||||||
|
def ensure_model_exists(self, model: str) -> bool:
|
||||||
|
"""Ensure the model exists, pull if necessary.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model: Model name to ensure.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
True if model exists or was pulled successfully.
|
||||||
|
"""
|
||||||
|
models = [m.name for m in self.list_models()]
|
||||||
|
if model in models:
|
||||||
|
return True
|
||||||
|
return self.pull_model(model)
|
||||||
|
|
||||||
|
def generate_commit_message(
|
||||||
|
self,
|
||||||
|
diff_content: str,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
language: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
"""Generate a commit message from diff content.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
diff_content: The git diff to analyze.
|
||||||
|
model: Model to use. Defaults to config model.
|
||||||
|
language: Programming language context.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Generated commit message.
|
||||||
|
"""
|
||||||
|
model_name = model or self.config.model
|
||||||
|
|
||||||
|
language_context = f"\n\nProgramming language: {language}" if language else ""
|
||||||
|
prompt = f"""You are an expert software developer helping to write commit messages.
|
||||||
|
Your task is to analyze the following git diff and generate a concise, descriptive commit message.
|
||||||
|
Use the Conventional Commits format: <type>(<scope>): <description>
|
||||||
|
|
||||||
|
Types:
|
||||||
|
- feat: A new feature
|
||||||
|
- fix: A bug fix
|
||||||
|
- docs: Documentation changes
|
||||||
|
- style: Code style changes (formatting, semicolons, etc.)
|
||||||
|
- refactor: Code refactoring
|
||||||
|
- perf: Performance improvements
|
||||||
|
- test: Adding or modifying tests
|
||||||
|
- chore: Maintenance tasks
|
||||||
|
|
||||||
|
Please provide ONLY the commit message, nothing else.{language_context}
|
||||||
|
|
||||||
|
Diff:
|
||||||
|
```
|
||||||
|
{diff_content}
|
||||||
|
```"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = self.client.chat(
|
||||||
|
model=model_name,
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
)
|
||||||
|
return response.message.content.strip()
|
||||||
|
except ollama.RequestError as e:
|
||||||
|
raise OllamaConnectionError(f"Failed to generate commit message: {e}")
|
||||||
|
except ollama.ResponseError as e:
|
||||||
|
if "model not found" in str(e).lower():
|
||||||
|
raise OllamaModelNotFoundError(f"Model {model_name} not found: {e}")
|
||||||
|
raise OllamaError(f"Ollama error: {e}")
|
||||||
|
|
||||||
|
def generate_changelog(
|
||||||
|
self,
|
||||||
|
commits: list[dict],
|
||||||
|
from_version: Optional[str] = None,
|
||||||
|
to_version: Optional[str] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
"""Generate a changelog from commit history.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
commits: List of commit dictionaries.
|
||||||
|
from_version: Version to start from.
|
||||||
|
to_version: Version to end at.
|
||||||
|
model: Model to use.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Generated changelog in markdown format.
|
||||||
|
"""
|
||||||
|
model_name = model or self.config.model
|
||||||
|
|
||||||
|
version_info = f" from {from_version or 'previous version'}"
|
||||||
|
version_info += f" to {to_version or 'current version'}" if from_version else ""
|
||||||
|
|
||||||
|
commits_text = "\n".join([
|
||||||
|
f"- {c.get('sha', '')[:7]}: {c.get('message', '')} ({c.get('author', '')})"
|
||||||
|
for c in commits
|
||||||
|
])
|
||||||
|
|
||||||
|
prompt = f"""You are a technical writer creating a changelog.
|
||||||
|
Generate a well-formatted changelog in markdown format based on the following commits.{version_info}
|
||||||
|
|
||||||
|
Group changes by type:
|
||||||
|
- Features (feat)
|
||||||
|
- Bug Fixes (fix)
|
||||||
|
- Documentation (docs)
|
||||||
|
- Code Style (style)
|
||||||
|
- Refactoring (refactor)
|
||||||
|
- Performance (perf)
|
||||||
|
- Testing (test)
|
||||||
|
- Maintenance (chore)
|
||||||
|
|
||||||
|
For each group, list the changes concisely. Use present tense.
|
||||||
|
|
||||||
|
Commits:
|
||||||
|
{commits_text}
|
||||||
|
|
||||||
|
Provide ONLY the changelog content, nothing else."""
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = self.client.chat(
|
||||||
|
model=model_name,
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
)
|
||||||
|
return response.message.content.strip()
|
||||||
|
except ollama.ConnectionError as e:
|
||||||
|
raise OllamaConnectionError(f"Failed to generate changelog: {e}")
|
||||||
|
except ollama.ResponseError as e:
|
||||||
|
if "model not found" in str(e).lower():
|
||||||
|
raise OllamaModelNotFoundError(f"Model {model_name} not found: {e}")
|
||||||
|
raise OllamaError(f"Ollama error: {e}")
|
||||||
|
|
||||||
|
def generate_api_docs(
|
||||||
|
self,
|
||||||
|
code_changes: dict[str, str],
|
||||||
|
framework: Optional[str] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
) -> str:
|
||||||
|
"""Generate API documentation from code changes.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
code_changes: Dictionary of file path to diff content.
|
||||||
|
framework: Web framework context (fastapi, flask, express, etc.)
|
||||||
|
model: Model to use.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Generated API documentation in markdown format.
|
||||||
|
"""
|
||||||
|
model_name = model or self.config.model
|
||||||
|
|
||||||
|
framework_context = f"\n\nWeb framework: {framework}" if framework else ""
|
||||||
|
changes_text = "\n\n".join([
|
||||||
|
f"File: {path}\n\n```diff\n{diff_content}\n```"
|
||||||
|
for path, diff_content in code_changes.items()
|
||||||
|
])
|
||||||
|
|
||||||
|
prompt = f"""You are an expert software developer creating API documentation.
|
||||||
|
Analyze the following code changes and generate OpenAPI-style API documentation.{framework_context}
|
||||||
|
|
||||||
|
For each API endpoint found, document:
|
||||||
|
- HTTP method (GET, POST, PUT, DELETE, etc.)
|
||||||
|
- Endpoint path
|
||||||
|
- Request parameters (path, query, body)
|
||||||
|
- Response status codes and schemas
|
||||||
|
|
||||||
|
Provide the documentation in markdown format with clear sections.
|
||||||
|
If no API endpoints are found, say so clearly.
|
||||||
|
|
||||||
|
Code changes:
|
||||||
|
{changes_text}"""
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = self.client.chat(
|
||||||
|
model=model_name,
|
||||||
|
messages=[{"role": "user", "content": prompt}],
|
||||||
|
)
|
||||||
|
return response.message.content.strip()
|
||||||
|
except ollama.ConnectionError as e:
|
||||||
|
raise OllamaConnectionError(f"Failed to generate API docs: {e}")
|
||||||
|
except ollama.ResponseError as e:
|
||||||
|
if "model not found" in str(e).lower():
|
||||||
|
raise OllamaModelNotFoundError(f"Model {model_name} not found: {e}")
|
||||||
|
raise OllamaError(f"Ollama error: {e}")
|
||||||
Reference in New Issue
Block a user