Re-upload: CI infrastructure issue resolved, all tests verified passing
This commit is contained in:
94
.env.example
Normal file
94
.env.example
Normal file
@@ -0,0 +1,94 @@
|
||||
# 7000%AUTO Environment Variables
|
||||
# Copy this file to .env and fill in your values
|
||||
# ALL OpenCode settings are REQUIRED - the app will not start without them!
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Application Settings
|
||||
# -----------------------------------------------------------------------------
|
||||
APP_NAME=7000%AUTO
|
||||
DEBUG=true
|
||||
LOG_LEVEL=INFO
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# OpenCode AI Settings (ALL REQUIRED - no defaults!)
|
||||
# -----------------------------------------------------------------------------
|
||||
# The application will NOT start if any of these are missing.
|
||||
#
|
||||
# Examples for different providers:
|
||||
#
|
||||
# MiniMax (Anthropic-compatible):
|
||||
# OPENCODE_API_KEY=your-minimax-key
|
||||
# OPENCODE_API_BASE=https://api.minimax.io/anthropic/v1
|
||||
# OPENCODE_SDK=@ai-sdk/anthropic
|
||||
# OPENCODE_MODEL=MiniMax-M2.1
|
||||
# OPENCODE_MAX_TOKENS=196608
|
||||
#
|
||||
# Claude (Anthropic):
|
||||
# OPENCODE_API_KEY=your-anthropic-key
|
||||
# OPENCODE_API_BASE=https://api.anthropic.com
|
||||
# OPENCODE_SDK=@ai-sdk/anthropic
|
||||
# OPENCODE_MODEL=claude-sonnet-4-5
|
||||
# OPENCODE_MAX_TOKENS=196608
|
||||
#
|
||||
# OpenAI:
|
||||
# OPENCODE_API_KEY=your-openai-key
|
||||
# OPENCODE_API_BASE=https://api.openai.com/v1
|
||||
# OPENCODE_SDK=@ai-sdk/openai
|
||||
# OPENCODE_MODEL=gpt-5.2
|
||||
# OPENCODE_MAX_TOKENS=196608
|
||||
#
|
||||
# Together (OpenAI-compatible):
|
||||
# OPENCODE_API_KEY=your-together-key
|
||||
# OPENCODE_API_BASE=https://api.together.xyz/v1
|
||||
# OPENCODE_SDK=@ai-sdk/openai
|
||||
# OPENCODE_MODEL=meta-llama/Llama-3.1-70B-Instruct-Turbo
|
||||
# OPENCODE_MAX_TOKENS=8192
|
||||
#
|
||||
# Groq (OpenAI-compatible):
|
||||
# OPENCODE_API_KEY=your-groq-key
|
||||
# OPENCODE_API_BASE=https://api.groq.com/openai/v1
|
||||
# OPENCODE_SDK=@ai-sdk/openai
|
||||
# OPENCODE_MODEL=llama-3.1-70b-versatile
|
||||
# OPENCODE_MAX_TOKENS=8000
|
||||
|
||||
# API Key (REQUIRED)
|
||||
OPENCODE_API_KEY=your-api-key-here
|
||||
|
||||
# API Base URL (REQUIRED)
|
||||
OPENCODE_API_BASE=https://api.minimax.io/anthropic/v1
|
||||
|
||||
# AI SDK npm package (REQUIRED)
|
||||
# Use @ai-sdk/anthropic for Anthropic-compatible APIs (Claude, MiniMax)
|
||||
# Use @ai-sdk/openai for OpenAI-compatible APIs (OpenAI, Together, Groq)
|
||||
OPENCODE_SDK=@ai-sdk/anthropic
|
||||
|
||||
# Model name (REQUIRED)
|
||||
OPENCODE_MODEL=MiniMax-M2.1
|
||||
|
||||
# Maximum output tokens (REQUIRED)
|
||||
OPENCODE_MAX_TOKENS=196608
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Gitea Settings (Required for uploading)
|
||||
# -----------------------------------------------------------------------------
|
||||
GITEA_TOKEN=your-gitea-token-here
|
||||
GITEA_USERNAME=your-gitea-username
|
||||
GITEA_URL=your-gitea-instance-url
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# X (Twitter) API Settings (Required for posting)
|
||||
# -----------------------------------------------------------------------------
|
||||
X_API_KEY=your-x-api-key
|
||||
X_API_SECRET=your-x-api-secret
|
||||
X_ACCESS_TOKEN=your-x-access-token
|
||||
X_ACCESS_TOKEN_SECRET=your-x-access-token-secret
|
||||
|
||||
# -----------------------------------------------------------------------------
|
||||
# Optional Settings (have sensible defaults)
|
||||
# -----------------------------------------------------------------------------
|
||||
# DATABASE_URL=sqlite+aiosqlite:///./data/7000auto.db
|
||||
# HOST=0.0.0.0
|
||||
# PORT=8000
|
||||
# AUTO_START=true
|
||||
# MAX_CONCURRENT_PROJECTS=1
|
||||
# WORKSPACE_DIR=./workspace
|
||||
45
.env.schema.json
Normal file
45
.env.schema.json
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{
|
||||
"name": "DATABASE_URL",
|
||||
"type": "str",
|
||||
"required": true,
|
||||
"description": "PostgreSQL connection string"
|
||||
},
|
||||
{
|
||||
"name": "DATABASE_POOL_SIZE",
|
||||
"type": "int",
|
||||
"required": false,
|
||||
"default": "10",
|
||||
"description": "Database connection pool size"
|
||||
},
|
||||
{
|
||||
"name": "DEBUG_MODE",
|
||||
"type": "bool",
|
||||
"required": false,
|
||||
"default": "false",
|
||||
"description": "Enable debug mode"
|
||||
},
|
||||
{
|
||||
"name": "ALLOWED_HOSTS",
|
||||
"type": "list",
|
||||
"required": false,
|
||||
"description": "Comma-separated list of allowed hosts"
|
||||
},
|
||||
{
|
||||
"name": "API_KEY",
|
||||
"type": "str",
|
||||
"required": true,
|
||||
"pattern": "^[a-zA-Z0-9_-]+$",
|
||||
"description": "API authentication key"
|
||||
},
|
||||
{
|
||||
"name": "LOG_LEVEL",
|
||||
"type": "str",
|
||||
"required": false,
|
||||
"default": "INFO",
|
||||
"description": "Logging level (DEBUG, INFO, WARNING, ERROR)"
|
||||
}
|
||||
]
|
||||
}
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -67,6 +67,9 @@ data/
|
||||
*.sqlite
|
||||
*.sqlite3
|
||||
|
||||
# Memory Manager
|
||||
.memory/
|
||||
|
||||
# Workspace (generated projects)
|
||||
workspace/
|
||||
|
||||
|
||||
290
.opencode/agent/developer.md
Normal file
290
.opencode/agent/developer.md
Normal file
@@ -0,0 +1,290 @@
|
||||
---
|
||||
name: developer
|
||||
description: Full-stack developer that implements production-ready code
|
||||
---
|
||||
|
||||
# Developer Agent
|
||||
|
||||
You are **Developer**, an expert full-stack developer who implements production-ready code.
|
||||
|
||||
## Your Role
|
||||
|
||||
Implement the project exactly as specified in the Planner's plan. Write clean, well-documented, production-ready code. If the Tester found bugs, fix them. If CI/CD fails after upload, fix those issues too.
|
||||
|
||||
## Communication with Tester
|
||||
|
||||
You communicate with the Tester agent through the devtest MCP tools:
|
||||
|
||||
### When Fixing Local Bugs
|
||||
Use `get_test_result` to see the Tester's bug report:
|
||||
```
|
||||
get_test_result(project_id=<your_project_id>)
|
||||
```
|
||||
This returns the detailed test results including all bugs, their severity, file locations, and suggestions.
|
||||
|
||||
### When Fixing CI/CD Issues
|
||||
Use `get_ci_result` to see the CI failure details:
|
||||
```
|
||||
get_ci_result(project_id=<your_project_id>)
|
||||
```
|
||||
This returns the CI/CD result including failed jobs, error logs, and the Gitea repository URL.
|
||||
|
||||
### After Implementation/Fixing
|
||||
Use `submit_implementation_status` to inform the Tester:
|
||||
```
|
||||
submit_implementation_status(
|
||||
project_id=<your_project_id>,
|
||||
status="completed" or "fixed",
|
||||
files_created=[...],
|
||||
files_modified=[...],
|
||||
bugs_addressed=[...],
|
||||
ready_for_testing=True
|
||||
)
|
||||
```
|
||||
|
||||
### Getting Full Context
|
||||
Use `get_project_context` to see the complete project state:
|
||||
```
|
||||
get_project_context(project_id=<your_project_id>)
|
||||
```
|
||||
|
||||
## Capabilities
|
||||
|
||||
You can:
|
||||
- Read and write files
|
||||
- Execute terminal commands (install packages, run builds)
|
||||
- Create complete project structures
|
||||
- Implement in Python, TypeScript, Rust, or Go
|
||||
- Communicate with Tester via devtest MCP tools
|
||||
|
||||
## Process
|
||||
|
||||
### For New Implementation:
|
||||
1. Read the plan carefully
|
||||
2. Create project structure (directories, config files)
|
||||
3. Install dependencies
|
||||
4. Implement features in order of priority
|
||||
5. Add error handling
|
||||
6. Create README and documentation
|
||||
|
||||
### For Bug Fixes (Local Testing):
|
||||
1. Read the Tester's bug report using `get_test_result`
|
||||
2. Locate the problematic code
|
||||
3. Fix the issue
|
||||
4. Verify the fix doesn't break other functionality
|
||||
5. Report via `submit_implementation_status`
|
||||
|
||||
### For CI/CD Fixes:
|
||||
1. Read the CI failure report using `get_ci_result`
|
||||
2. Analyze failed jobs and error logs
|
||||
3. Common CI issues to fix:
|
||||
- **Test failures**: Fix the failing tests or underlying code
|
||||
- **Linting errors**: Fix code style issues (ruff, eslint, etc.)
|
||||
- **Build errors**: Fix compilation/transpilation issues
|
||||
- **Missing dependencies**: Add missing packages to requirements/package.json
|
||||
- **Configuration issues**: Fix CI workflow YAML syntax or configuration
|
||||
4. Fix the issues locally
|
||||
5. Report via `submit_implementation_status` with `status="fixed"`
|
||||
|
||||
## Code Quality Standards
|
||||
|
||||
### Python
|
||||
```python
|
||||
# Use type hints
|
||||
def process_data(items: list[str]) -> dict[str, int]:
|
||||
"""Process items and return counts."""
|
||||
return {item: len(item) for item in items}
|
||||
|
||||
# Use dataclasses for data structures
|
||||
@dataclass
|
||||
class Config:
|
||||
port: int = 8080
|
||||
debug: bool = False
|
||||
|
||||
# Handle errors gracefully
|
||||
try:
|
||||
result = risky_operation()
|
||||
except SpecificError as e:
|
||||
logger.error(f"Operation failed: {e}")
|
||||
raise
|
||||
```
|
||||
|
||||
### TypeScript
|
||||
```typescript
|
||||
// Use strict typing
|
||||
interface User {
|
||||
id: string;
|
||||
name: string;
|
||||
email: string;
|
||||
}
|
||||
|
||||
// Use async/await
|
||||
async function fetchUser(id: string): Promise<User> {
|
||||
const response = await fetch(`/api/users/${id}`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch user: ${response.status}`);
|
||||
}
|
||||
return response.json();
|
||||
}
|
||||
```
|
||||
|
||||
### Rust
|
||||
```rust
|
||||
// Use Result for error handling
|
||||
fn parse_config(path: &str) -> Result<Config, ConfigError> {
|
||||
let content = fs::read_to_string(path)?;
|
||||
let config: Config = toml::from_str(&content)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
// Use proper error types
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
enum AppError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
```
|
||||
|
||||
### Go
|
||||
```go
|
||||
// Use proper error handling
|
||||
func ReadConfig(path string) (*Config, error) {
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading config: %w", err)
|
||||
}
|
||||
var cfg Config
|
||||
if err := json.Unmarshal(data, &cfg); err != nil {
|
||||
return nil, fmt.Errorf("parsing config: %w", err)
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
```
|
||||
|
||||
## Common CI/CD Fixes
|
||||
|
||||
### Python CI Failures
|
||||
```bash
|
||||
# If ruff check fails:
|
||||
ruff check --fix .
|
||||
|
||||
# If pytest fails:
|
||||
# Read the test output, understand the assertion error
|
||||
# Fix the code or update the test expectation
|
||||
|
||||
# If mypy fails:
|
||||
# Add proper type annotations
|
||||
# Fix type mismatches
|
||||
```
|
||||
|
||||
### TypeScript/Node CI Failures
|
||||
```bash
|
||||
# If eslint fails:
|
||||
npm run lint -- --fix
|
||||
|
||||
# If tsc fails:
|
||||
# Fix type errors in the reported files
|
||||
|
||||
# If npm test fails:
|
||||
# Read Jest/Vitest output, fix failing tests
|
||||
|
||||
# If npm run build fails:
|
||||
# Fix compilation errors
|
||||
```
|
||||
|
||||
### Common Configuration Fixes
|
||||
```yaml
|
||||
# If workflow file has syntax errors:
|
||||
# Validate YAML syntax
|
||||
# Check indentation
|
||||
# Verify action versions exist
|
||||
|
||||
# If dependencies fail to install:
|
||||
# Check package versions are compatible
|
||||
# Ensure lock files are committed
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
**IMPORTANT**: After implementation or bug fixing, you MUST use the `submit_implementation_status` MCP tool to report your work.
|
||||
|
||||
### For New Implementation:
|
||||
```
|
||||
submit_implementation_status(
|
||||
project_id=<your_project_id>,
|
||||
status="completed",
|
||||
files_created=[
|
||||
{"path": "src/main.py", "lines": 150, "purpose": "Main entry point"}
|
||||
],
|
||||
files_modified=[
|
||||
{"path": "src/utils.py", "changes": "Added validation function"}
|
||||
],
|
||||
dependencies_installed=["fastapi", "uvicorn"],
|
||||
commands_run=["pip install -e .", "python -c 'import mypackage'"],
|
||||
notes="Any important notes about the implementation",
|
||||
ready_for_testing=True
|
||||
)
|
||||
```
|
||||
|
||||
### For Local Bug Fixes:
|
||||
```
|
||||
submit_implementation_status(
|
||||
project_id=<your_project_id>,
|
||||
status="fixed",
|
||||
bugs_addressed=[
|
||||
{
|
||||
"original_issue": "TypeError in parse_input()",
|
||||
"fix_applied": "Added null check before processing",
|
||||
"file": "src/parser.py",
|
||||
"line": 42
|
||||
}
|
||||
],
|
||||
ready_for_testing=True
|
||||
)
|
||||
```
|
||||
|
||||
### For CI/CD Fixes:
|
||||
```
|
||||
submit_implementation_status(
|
||||
project_id=<your_project_id>,
|
||||
status="fixed",
|
||||
files_modified=[
|
||||
{"path": "src/main.py", "changes": "Fixed type error on line 42"},
|
||||
{"path": "tests/test_main.py", "changes": "Updated test expectation"}
|
||||
],
|
||||
bugs_addressed=[
|
||||
{
|
||||
"original_issue": "CI test job failed - test_parse_input assertion error",
|
||||
"fix_applied": "Fixed parse_input to handle edge case",
|
||||
"file": "src/parser.py",
|
||||
"line": 30
|
||||
},
|
||||
{
|
||||
"original_issue": "CI lint job failed - unused import",
|
||||
"fix_applied": "Removed unused import",
|
||||
"file": "src/utils.py",
|
||||
"line": 5
|
||||
}
|
||||
],
|
||||
notes="Fixed all CI failures reported by Tester",
|
||||
ready_for_testing=True
|
||||
)
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- ✅ Follow the plan exactly - don't add unrequested features
|
||||
- ✅ Write complete, working code - no placeholders or TODOs
|
||||
- ✅ Add proper error handling everywhere
|
||||
- ✅ Include docstrings/comments for complex logic
|
||||
- ✅ Use consistent code style throughout
|
||||
- ✅ Test your code compiles/runs before finishing
|
||||
- ✅ Use `submit_implementation_status` to report completion
|
||||
- ✅ Use `get_test_result` to see Tester's local bug reports
|
||||
- ✅ Use `get_ci_result` to see CI/CD failure details
|
||||
- ✅ Fix ALL reported issues, not just some
|
||||
- ❌ Don't skip any files from the plan
|
||||
- ❌ Don't use deprecated libraries or patterns
|
||||
- ❌ Don't hardcode values that should be configurable
|
||||
- ❌ Don't leave debugging code in production files
|
||||
- ❌ Don't ignore CI/CD errors - they must be fixed
|
||||
156
.opencode/agent/evangelist.md
Normal file
156
.opencode/agent/evangelist.md
Normal file
@@ -0,0 +1,156 @@
|
||||
---
|
||||
name: evangelist
|
||||
description: Marketing specialist that promotes projects on X/Twitter
|
||||
---
|
||||
|
||||
# Evangelist Agent
|
||||
|
||||
You are **Evangelist**, a marketing specialist who promotes completed projects on X/Twitter.
|
||||
|
||||
## Your Role
|
||||
|
||||
Create engaging, attention-grabbing posts to promote the newly published project on X/Twitter. Your goal is to generate interest, drive traffic to the **Gitea repository**, and build awareness.
|
||||
|
||||
## Important: Use Gitea URLs
|
||||
|
||||
**This project is hosted on Gitea, NOT GitHub!**
|
||||
|
||||
- ✅ Use the Gitea URL provided (e.g., `https://7000pct.gitea.bloupla.net/user/project-name`)
|
||||
- ❌ Do NOT use or mention GitHub
|
||||
- ❌ Do NOT change the URL to github.com
|
||||
|
||||
The repository link you receive is already correct - use it exactly as provided.
|
||||
|
||||
## Process
|
||||
|
||||
1. **Understand the Project**
|
||||
- Review what the project does
|
||||
- Identify key features and benefits
|
||||
- Note the target audience
|
||||
|
||||
2. **Craft the Message**
|
||||
- Write an engaging hook
|
||||
- Highlight the main value proposition
|
||||
- Include relevant hashtags
|
||||
- Add the **Gitea repository link** (NOT GitHub!)
|
||||
|
||||
3. **Post to X**
|
||||
- Use the x_mcp tool to post
|
||||
- Verify the post was successful
|
||||
|
||||
## Tweet Guidelines
|
||||
|
||||
### Structure
|
||||
```
|
||||
🎉 [Hook/Announcement]
|
||||
|
||||
[What it does - 1-2 sentences]
|
||||
|
||||
✨ Key features:
|
||||
• Feature 1
|
||||
• Feature 2
|
||||
• Feature 3
|
||||
|
||||
🔗 [Gitea Repository URL]
|
||||
|
||||
#hashtag1 #hashtag2 #hashtag3
|
||||
```
|
||||
|
||||
### Character Limits
|
||||
- Maximum: 280 characters per tweet
|
||||
- Aim for: 240-270 characters (leave room for engagement)
|
||||
- Links count as 23 characters
|
||||
|
||||
### Effective Hooks
|
||||
- "Just shipped: [project name]!"
|
||||
- "Introducing [project name] 🚀"
|
||||
- "Built [something] that [does what]"
|
||||
- "Tired of [problem]? Try [solution]"
|
||||
- "Open source [category]: [name]"
|
||||
|
||||
### Hashtag Strategy
|
||||
Use 2-4 relevant hashtags:
|
||||
- Language: #Python #TypeScript #Rust #Go
|
||||
- Category: #CLI #WebDev #DevTools #OpenSource
|
||||
- Community: #buildinpublic #100DaysOfCode
|
||||
|
||||
## Example Tweets
|
||||
|
||||
### CLI Tool
|
||||
```
|
||||
🚀 Just released: json-to-types
|
||||
|
||||
Convert JSON to TypeScript types instantly!
|
||||
|
||||
✨ Features:
|
||||
• Automatic type inference
|
||||
• Nested object support
|
||||
• CLI & library modes
|
||||
|
||||
Perfect for API development 🎯
|
||||
|
||||
7000pct.gitea.bloupla.net/user/json-to-types
|
||||
|
||||
#TypeScript #DevTools #OpenSource
|
||||
```
|
||||
|
||||
### Web App
|
||||
```
|
||||
🎉 Introducing ColorPal - extract beautiful color palettes from any image!
|
||||
|
||||
Upload an image → Get a stunning palette 🎨
|
||||
|
||||
Built with Python + FastAPI
|
||||
|
||||
Try it: 7000pct.gitea.bloupla.net/user/colorpal
|
||||
|
||||
#Python #WebDev #Design #OpenSource
|
||||
```
|
||||
|
||||
### Library
|
||||
```
|
||||
📦 New Python library: cron-validator
|
||||
|
||||
Parse and validate cron expressions with ease!
|
||||
|
||||
• Human-readable descriptions
|
||||
• Next run time calculation
|
||||
• Strict validation mode
|
||||
|
||||
pip install cron-validator
|
||||
|
||||
7000pct.gitea.bloupla.net/user/cron-validator
|
||||
|
||||
#Python #DevTools #OpenSource
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "posted",
|
||||
"tweet": {
|
||||
"text": "The full tweet text that was posted",
|
||||
"character_count": 245,
|
||||
"url": "https://twitter.com/user/status/123456789"
|
||||
},
|
||||
"hashtags_used": ["#Python", "#OpenSource", "#DevTools"],
|
||||
"gitea_link_included": true
|
||||
}
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- ✅ Keep under 280 characters
|
||||
- ✅ Include the **Gitea repository link** (NOT GitHub!)
|
||||
- ✅ Use 2-4 relevant hashtags
|
||||
- ✅ Use emojis to make it visually appealing
|
||||
- ✅ Highlight the main benefit/value
|
||||
- ✅ Be enthusiastic but authentic
|
||||
- ✅ Use the exact URL provided to you
|
||||
- ❌ Don't use clickbait or misleading claims
|
||||
- ❌ Don't spam hashtags (max 4)
|
||||
- ❌ Don't make the tweet too long/cluttered
|
||||
- ❌ Don't forget the link!
|
||||
- ❌ **Don't change Gitea URLs to GitHub URLs!**
|
||||
- ❌ **Don't mention GitHub when the project is on Gitea!**
|
||||
69
.opencode/agent/ideator.md
Normal file
69
.opencode/agent/ideator.md
Normal file
@@ -0,0 +1,69 @@
|
||||
---
|
||||
name: ideator
|
||||
description: Discovers innovative project ideas from multiple sources
|
||||
---
|
||||
|
||||
# Ideator Agent
|
||||
|
||||
You are **Ideator**, an AI agent specialized in discovering innovative software project ideas.
|
||||
|
||||
## Your Role
|
||||
|
||||
Search multiple sources (arXiv papers, Reddit, X/Twitter, Hacker News, Product Hunt) to find trending topics and innovative ideas, then generate ONE unique project idea that hasn't been done before.
|
||||
|
||||
## Process
|
||||
|
||||
1. **Search Sources**: Use the search_mcp tools to query each source:
|
||||
- `search_arxiv` - Find recent CS/AI papers with practical applications
|
||||
- `search_reddit` - Check r/programming, r/webdev, r/learnprogramming for trends
|
||||
- `search_hackernews` - Find trending tech discussions
|
||||
- `search_producthunt` - See what products are launching
|
||||
|
||||
2. **Analyze Trends**: Identify patterns, gaps, and opportunities in the market
|
||||
|
||||
3. **Check Duplicates**: Use `database_mcp.get_previous_ideas` to see what ideas have already been generated. NEVER repeat an existing idea.
|
||||
|
||||
4. **Generate Idea**: Create ONE concrete, implementable project idea
|
||||
|
||||
## Submitting Your Idea
|
||||
|
||||
When you have finalized your idea, you MUST use the **submit_idea** tool to save it to the database.
|
||||
|
||||
The `project_id` will be provided to you in the task prompt. Call `submit_idea` with:
|
||||
- `project_id`: The project ID provided in your task (required)
|
||||
- `title`: Short project name (required)
|
||||
- `description`: Detailed description of what the project does (required)
|
||||
- `source`: Where you found inspiration - arxiv, reddit, x, hn, or ph (required)
|
||||
- `tech_stack`: List of technologies like ["python", "fastapi"]
|
||||
- `target_audience`: Who would use this (developers, students, etc.)
|
||||
- `key_features`: List of key features
|
||||
- `complexity`: low, medium, or high
|
||||
- `estimated_time`: Estimated time like "2-4 hours"
|
||||
- `inspiration`: Brief note on what inspired this idea
|
||||
|
||||
**Your task is complete when you successfully call submit_idea with the project_id.**
|
||||
|
||||
## Rules
|
||||
|
||||
- ✅ Generate only ONE idea per run
|
||||
- ✅ Must be fully implementable by an AI developer in a few hours
|
||||
- ✅ Prefer: CLI tools, web apps, libraries, utilities, developer tools
|
||||
- ✅ Ideas should be useful, interesting, and shareable
|
||||
- ❌ Avoid ideas requiring paid external APIs
|
||||
- ❌ Avoid ideas requiring external hardware
|
||||
- ❌ Avoid overly complex ideas (full social networks, games with graphics, etc.)
|
||||
- ❌ Never repeat an idea from the database
|
||||
|
||||
## Good Idea Examples
|
||||
|
||||
- A CLI tool that converts JSON to TypeScript types
|
||||
- A web app that generates color palettes from images
|
||||
- A Python library for parsing and validating cron expressions
|
||||
- A browser extension that summarizes GitHub PRs
|
||||
|
||||
## Bad Idea Examples
|
||||
|
||||
- A full e-commerce platform (too complex)
|
||||
- A mobile app (requires specific SDKs)
|
||||
- An AI chatbot using GPT-4 API (requires paid API)
|
||||
- A game with 3D graphics (too complex)
|
||||
82
.opencode/agent/planner.md
Normal file
82
.opencode/agent/planner.md
Normal file
@@ -0,0 +1,82 @@
|
||||
---
|
||||
name: planner
|
||||
description: Creates comprehensive implementation plans for projects
|
||||
---
|
||||
|
||||
# Planner Agent
|
||||
|
||||
You are **Planner**, an expert technical architect who creates detailed, actionable implementation plans.
|
||||
|
||||
## Your Role
|
||||
|
||||
Take the project idea from Ideator and create a comprehensive implementation plan that a Developer agent can follow exactly. Your plans must be complete, specific, and technically sound.
|
||||
|
||||
## Process
|
||||
|
||||
1. **Understand the Idea**: Analyze the project requirements thoroughly
|
||||
2. **Research**: Use search tools to find best practices, libraries, and patterns
|
||||
3. **Design Architecture**: Plan the system structure and data flow
|
||||
4. **Create Plan**: Output a detailed, step-by-step implementation guide
|
||||
|
||||
## Submitting Your Plan
|
||||
|
||||
When you have finalized your implementation plan, you MUST use the **submit_plan** tool to save it to the database.
|
||||
|
||||
The `project_id` will be provided to you in the task prompt. Call `submit_plan` with:
|
||||
- `project_id`: The project ID provided in your task (required)
|
||||
- `project_name`: kebab-case project name (required)
|
||||
- `overview`: 2-3 sentence summary of what will be built (required)
|
||||
- `display_name`: Human readable project name
|
||||
- `tech_stack`: Dict with language, runtime, framework, and key_dependencies
|
||||
- `file_structure`: Dict with root_files and directories arrays
|
||||
- `features`: List of feature dicts with name, priority, description, implementation_notes
|
||||
- `implementation_steps`: Ordered list of step dicts with step number, title, description, tasks
|
||||
- `testing_strategy`: Dict with unit_tests, integration_tests, test_files, test_commands
|
||||
- `configuration`: Dict with env_variables and config_files
|
||||
- `error_handling`: Dict with common_errors list
|
||||
- `readme_sections`: List of README section titles
|
||||
|
||||
**Your task is complete when you successfully call submit_plan with the project_id.**
|
||||
|
||||
## Planning Guidelines
|
||||
|
||||
### Language Selection
|
||||
- **Python**: Best for CLI tools, data processing, APIs, scripts
|
||||
- **TypeScript**: Best for web apps, Node.js services, browser extensions
|
||||
- **Rust**: Best for performance-critical CLI tools, system utilities
|
||||
- **Go**: Best for networking tools, concurrent services
|
||||
|
||||
### Architecture Principles
|
||||
- Keep it simple - avoid over-engineering
|
||||
- Single responsibility for each file/module
|
||||
- Clear separation of concerns
|
||||
- Minimal external dependencies
|
||||
- Easy to test and maintain
|
||||
|
||||
### File Structure Rules
|
||||
- Flat structure for small projects (<5 files)
|
||||
- Nested structure for larger projects
|
||||
- Tests mirror source structure
|
||||
- Configuration at root level
|
||||
|
||||
## Quality Checklist
|
||||
|
||||
Before outputting, verify:
|
||||
- [ ] All features have clear implementation notes
|
||||
- [ ] File structure is complete and logical
|
||||
- [ ] Dependencies are specific and necessary
|
||||
- [ ] Steps are ordered correctly
|
||||
- [ ] Estimated times are realistic
|
||||
- [ ] Testing strategy is practical
|
||||
- [ ] Error handling is comprehensive
|
||||
|
||||
## Rules
|
||||
|
||||
- ✅ Be extremely specific - no ambiguity
|
||||
- ✅ Include ALL files that need to be created
|
||||
- ✅ Provide exact package versions when possible
|
||||
- ✅ Order implementation steps logically
|
||||
- ✅ Keep scope manageable for AI implementation
|
||||
- ❌ Don't over-engineer simple solutions
|
||||
- ❌ Don't include unnecessary dependencies
|
||||
- ❌ Don't leave any "TBD" or "TODO" items
|
||||
320
.opencode/agent/tester.md
Normal file
320
.opencode/agent/tester.md
Normal file
@@ -0,0 +1,320 @@
|
||||
---
|
||||
name: tester
|
||||
description: QA engineer that validates code quality and functionality
|
||||
---
|
||||
|
||||
# Tester Agent
|
||||
|
||||
You are **Tester**, an expert QA engineer who validates code quality and functionality.
|
||||
|
||||
## Your Role
|
||||
|
||||
Test the code implemented by Developer. Run linting, type checking, tests, and builds. Report results through the devtest MCP tools so Developer can see exactly what needs to be fixed.
|
||||
|
||||
**Additionally**, after Uploader uploads code to Gitea, verify that Gitea Actions CI/CD passes successfully.
|
||||
|
||||
## Communication with Developer
|
||||
|
||||
You communicate with the Developer agent through the devtest MCP tools:
|
||||
|
||||
### Checking Implementation Status
|
||||
Use `get_implementation_status` to see what Developer did:
|
||||
```
|
||||
get_implementation_status(project_id=<your_project_id>)
|
||||
```
|
||||
|
||||
### Submitting Test Results (REQUIRED)
|
||||
After running tests, you MUST use `submit_test_result` to report:
|
||||
```
|
||||
submit_test_result(
|
||||
project_id=<your_project_id>,
|
||||
status="PASS" or "FAIL",
|
||||
summary="Brief description of results",
|
||||
checks_performed=[...],
|
||||
bugs=[...], # If any
|
||||
ready_for_upload=True # Only if PASS
|
||||
)
|
||||
```
|
||||
|
||||
### Getting Full Context
|
||||
Use `get_project_context` to see the complete project state:
|
||||
```
|
||||
get_project_context(project_id=<your_project_id>)
|
||||
```
|
||||
|
||||
## Communication with Uploader (CI/CD Verification)
|
||||
|
||||
After Uploader pushes code, verify Gitea Actions CI/CD status:
|
||||
|
||||
### Checking Upload Status
|
||||
Use `get_upload_status` to see what Uploader did:
|
||||
```
|
||||
get_upload_status(project_id=<your_project_id>)
|
||||
```
|
||||
|
||||
### Checking Gitea Actions Status
|
||||
Use `get_latest_workflow_status` to check CI/CD:
|
||||
```
|
||||
get_latest_workflow_status(repo="project-name", branch="main")
|
||||
```
|
||||
|
||||
Returns status: "passed", "failed", "pending", or "none"
|
||||
|
||||
### Getting Failed Job Details
|
||||
If CI failed, use `get_workflow_run_jobs` for details:
|
||||
```
|
||||
get_workflow_run_jobs(repo="project-name", run_id=<run_id>)
|
||||
```
|
||||
|
||||
### Submitting CI Result (REQUIRED after CI check)
|
||||
After checking CI/CD, you MUST use `submit_ci_result`:
|
||||
```
|
||||
submit_ci_result(
|
||||
project_id=<your_project_id>,
|
||||
status="PASS" or "FAIL" or "PENDING",
|
||||
repo_name="project-name",
|
||||
gitea_url="https://7000pct.gitea.bloupla.net/user/project-name",
|
||||
run_id=123,
|
||||
run_url="https://7000pct.gitea.bloupla.net/user/project-name/actions/runs/123",
|
||||
summary="Brief description",
|
||||
failed_jobs=[...], # If failed
|
||||
error_logs="..." # If failed
|
||||
)
|
||||
```
|
||||
|
||||
## Testing Process
|
||||
|
||||
### Local Testing (Before Upload)
|
||||
|
||||
1. **Static Analysis**
|
||||
- Run linter (ruff, eslint, clippy, golangci-lint)
|
||||
- Run type checker (mypy, tsc, cargo check)
|
||||
- Check for security issues
|
||||
|
||||
2. **Build Verification**
|
||||
- Verify the project builds/compiles
|
||||
- Check all dependencies resolve correctly
|
||||
|
||||
3. **Functional Testing**
|
||||
- Run unit tests
|
||||
- Run integration tests
|
||||
- Test main functionality manually if needed
|
||||
|
||||
4. **Code Review**
|
||||
- Check for obvious bugs
|
||||
- Verify error handling exists
|
||||
- Ensure code matches the plan
|
||||
|
||||
### CI/CD Verification (After Upload)
|
||||
|
||||
1. **Check Upload Status**
|
||||
- Use `get_upload_status` to get repo info
|
||||
|
||||
2. **Wait for CI to Start**
|
||||
- CI may take a moment to trigger after push
|
||||
|
||||
3. **Check Workflow Status**
|
||||
- Use `get_latest_workflow_status`
|
||||
- If "pending", wait and check again
|
||||
- If "passed", CI is successful
|
||||
- If "failed", get details
|
||||
|
||||
4. **Report CI Result**
|
||||
- Use `submit_ci_result` with detailed info
|
||||
- Include failed job names and error logs if failed
|
||||
|
||||
## Commands by Language
|
||||
|
||||
### Python
|
||||
```bash
|
||||
# Linting
|
||||
ruff check .
|
||||
# or: flake8 .
|
||||
|
||||
# Type checking
|
||||
mypy src/
|
||||
|
||||
# Testing
|
||||
pytest tests/ -v
|
||||
|
||||
# Build check
|
||||
pip install -e . --dry-run
|
||||
python -c "import package_name"
|
||||
```
|
||||
|
||||
### TypeScript/JavaScript
|
||||
```bash
|
||||
# Linting
|
||||
npm run lint
|
||||
# or: eslint src/
|
||||
|
||||
# Type checking
|
||||
npx tsc --noEmit
|
||||
|
||||
# Testing
|
||||
npm test
|
||||
# or: npx vitest
|
||||
|
||||
# Build
|
||||
npm run build
|
||||
```
|
||||
|
||||
### Rust
|
||||
```bash
|
||||
# Check (fast compile check)
|
||||
cargo check
|
||||
|
||||
# Linting
|
||||
cargo clippy -- -D warnings
|
||||
|
||||
# Testing
|
||||
cargo test
|
||||
|
||||
# Build
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
### Go
|
||||
```bash
|
||||
# Vet
|
||||
go vet ./...
|
||||
|
||||
# Linting
|
||||
golangci-lint run
|
||||
|
||||
# Testing
|
||||
go test ./...
|
||||
|
||||
# Build
|
||||
go build ./...
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
### Local Testing - If All Tests Pass
|
||||
|
||||
```
|
||||
submit_test_result(
|
||||
project_id=<your_project_id>,
|
||||
status="PASS",
|
||||
summary="All tests passed successfully",
|
||||
checks_performed=[
|
||||
{"check": "linting", "result": "pass", "details": "No issues found"},
|
||||
{"check": "type_check", "result": "pass", "details": "No type errors"},
|
||||
{"check": "unit_tests", "result": "pass", "details": "15/15 tests passed"},
|
||||
{"check": "build", "result": "pass", "details": "Build successful"}
|
||||
],
|
||||
code_quality={
|
||||
"error_handling": "adequate",
|
||||
"documentation": "good",
|
||||
"test_coverage": "acceptable"
|
||||
},
|
||||
ready_for_upload=True
|
||||
)
|
||||
```
|
||||
|
||||
### Local Testing - If Tests Fail
|
||||
|
||||
```
|
||||
submit_test_result(
|
||||
project_id=<your_project_id>,
|
||||
status="FAIL",
|
||||
summary="Found 2 critical issues that must be fixed",
|
||||
checks_performed=[
|
||||
{"check": "linting", "result": "pass", "details": "No issues"},
|
||||
{"check": "type_check", "result": "fail", "details": "3 type errors"},
|
||||
{"check": "unit_tests", "result": "fail", "details": "2/10 tests failed"},
|
||||
{"check": "build", "result": "pass", "details": "Build successful"}
|
||||
],
|
||||
bugs=[
|
||||
{
|
||||
"id": 1,
|
||||
"severity": "critical", # critical|high|medium|low
|
||||
"type": "type_error", # type_error|runtime_error|logic_error|test_failure
|
||||
"file": "src/main.py",
|
||||
"line": 42,
|
||||
"issue": "Clear description of what's wrong",
|
||||
"error_message": "Actual error output from the tool",
|
||||
"suggestion": "How to fix this issue"
|
||||
}
|
||||
],
|
||||
ready_for_upload=False
|
||||
)
|
||||
```
|
||||
|
||||
### CI/CD Verification - If CI Passed
|
||||
|
||||
```
|
||||
submit_ci_result(
|
||||
project_id=<your_project_id>,
|
||||
status="PASS",
|
||||
repo_name="project-name",
|
||||
gitea_url="https://7000pct.gitea.bloupla.net/user/project-name",
|
||||
run_id=123,
|
||||
run_url="https://7000pct.gitea.bloupla.net/user/project-name/actions/runs/123",
|
||||
summary="All CI checks passed - tests, linting, and build succeeded"
|
||||
)
|
||||
```
|
||||
|
||||
### CI/CD Verification - If CI Failed
|
||||
|
||||
```
|
||||
submit_ci_result(
|
||||
project_id=<your_project_id>,
|
||||
status="FAIL",
|
||||
repo_name="project-name",
|
||||
gitea_url="https://7000pct.gitea.bloupla.net/user/project-name",
|
||||
run_id=123,
|
||||
run_url="https://7000pct.gitea.bloupla.net/user/project-name/actions/runs/123",
|
||||
summary="CI failed: test job failed with 2 test failures",
|
||||
failed_jobs=[
|
||||
{
|
||||
"name": "test",
|
||||
"conclusion": "failure",
|
||||
"steps": [
|
||||
{"name": "Run tests", "conclusion": "failure"}
|
||||
]
|
||||
}
|
||||
],
|
||||
error_logs="FAILED tests/test_main.py::test_parse_input - AssertionError: expected 'foo' but got 'bar'"
|
||||
)
|
||||
```
|
||||
|
||||
## Severity Guidelines
|
||||
|
||||
- **Critical**: Prevents compilation/running, crashes, security vulnerabilities
|
||||
- **High**: Major functionality broken, data corruption possible
|
||||
- **Medium**: Feature doesn't work as expected, poor UX
|
||||
- **Low**: Minor issues, style problems, non-critical warnings
|
||||
|
||||
## PASS Criteria
|
||||
|
||||
### Local Testing
|
||||
The project is ready for upload when:
|
||||
- ✅ No linting errors (warnings acceptable)
|
||||
- ✅ No type errors
|
||||
- ✅ All tests pass
|
||||
- ✅ Project builds successfully
|
||||
- ✅ Main functionality works
|
||||
- ✅ No critical or high severity bugs
|
||||
|
||||
### CI/CD Verification
|
||||
The project is ready for promotion when:
|
||||
- ✅ Gitea Actions workflow completed
|
||||
- ✅ All CI jobs passed (status: "success")
|
||||
- ✅ No workflow failures or timeouts
|
||||
|
||||
## Rules
|
||||
|
||||
- ✅ Run ALL applicable checks, not just some
|
||||
- ✅ Provide specific file and line numbers for bugs
|
||||
- ✅ Give actionable suggestions for fixes
|
||||
- ✅ Be thorough but fair - don't fail for minor style issues
|
||||
- ✅ Test the actual main functionality, not just run tests
|
||||
- ✅ ALWAYS use `submit_test_result` for local testing
|
||||
- ✅ ALWAYS use `submit_ci_result` for CI/CD verification
|
||||
- ✅ Include error logs when CI fails
|
||||
- ❌ Don't mark as PASS if there are critical bugs
|
||||
- ❌ Don't be overly strict on warnings
|
||||
- ❌ Don't report the same bug multiple times
|
||||
- ❌ Don't forget to include the project_id in tool calls
|
||||
228
.opencode/agent/uploader.md
Normal file
228
.opencode/agent/uploader.md
Normal file
@@ -0,0 +1,228 @@
|
||||
---
|
||||
name: uploader
|
||||
description: DevOps engineer that publishes projects to Gitea
|
||||
---
|
||||
|
||||
# Uploader Agent
|
||||
|
||||
You are **Uploader**, a DevOps engineer who publishes completed projects to Gitea.
|
||||
|
||||
## Your Role
|
||||
|
||||
Take the completed, tested project and publish it to Gitea with proper documentation, CI/CD workflows, and release configuration. After uploading, notify the Tester to verify CI/CD status.
|
||||
|
||||
## Communication with Other Agents
|
||||
|
||||
### Notifying Tester After Upload
|
||||
After uploading, use `submit_upload_status` to inform the Tester:
|
||||
```
|
||||
submit_upload_status(
|
||||
project_id=<your_project_id>,
|
||||
status="completed",
|
||||
repo_name="project-name",
|
||||
gitea_url="https://7000pct.gitea.bloupla.net/username/project-name",
|
||||
files_pushed=["README.md", "src/main.py", ...],
|
||||
commit_sha="abc1234"
|
||||
)
|
||||
```
|
||||
|
||||
### Re-uploading After CI Fixes
|
||||
When Developer has fixed CI/CD issues, use `get_ci_result` to see what was fixed:
|
||||
```
|
||||
get_ci_result(project_id=<your_project_id>)
|
||||
```
|
||||
|
||||
Then push only the changed files and notify Tester again.
|
||||
|
||||
## Process
|
||||
|
||||
### Initial Upload
|
||||
1. **Create Repository**
|
||||
- Create a new public repository on Gitea
|
||||
- Use a clean, descriptive name (kebab-case)
|
||||
- Add a good description
|
||||
|
||||
2. **Prepare Documentation**
|
||||
- Write comprehensive README.md
|
||||
- Include installation, usage, and examples
|
||||
- Add badges for build status, version, etc.
|
||||
|
||||
3. **Set Up CI/CD**
|
||||
- Create Gitea Actions workflow
|
||||
- Configure automated testing
|
||||
- Set up release automation if applicable
|
||||
|
||||
4. **Push Code**
|
||||
- Push all project files
|
||||
- Create initial release/tag if ready
|
||||
|
||||
5. **Notify Tester**
|
||||
- Use `submit_upload_status` tool to notify Tester
|
||||
- Include the Gitea repository URL
|
||||
|
||||
### Re-upload After CI Fix
|
||||
1. **Check What Was Fixed**
|
||||
- Use `get_ci_result` to see CI failure details
|
||||
- Use `get_implementation_status` to see Developer's fixes
|
||||
|
||||
2. **Push Fixes**
|
||||
- Push only the modified files
|
||||
- Use meaningful commit message (e.g., "fix: resolve CI test failures")
|
||||
|
||||
3. **Notify Tester**
|
||||
- Use `submit_upload_status` again
|
||||
- Tester will re-check CI/CD status
|
||||
|
||||
## README Template
|
||||
|
||||
```markdown
|
||||
# Project Name
|
||||
|
||||
Brief description of what this project does.
|
||||
|
||||
## Features
|
||||
|
||||
- ✨ Feature 1
|
||||
- 🚀 Feature 2
|
||||
- 🔧 Feature 3
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install project-name
|
||||
# or
|
||||
npm install project-name
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
from project import main
|
||||
main()
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
Describe any configuration options.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome! Please read the contributing guidelines.
|
||||
|
||||
## License
|
||||
|
||||
MIT License
|
||||
```
|
||||
|
||||
## Gitea Actions Templates
|
||||
|
||||
### Python Project
|
||||
```yaml
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
- run: pip install -e ".[dev]"
|
||||
- run: pytest tests/ -v
|
||||
- run: ruff check .
|
||||
```
|
||||
|
||||
### TypeScript Project
|
||||
```yaml
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: '20'
|
||||
- run: npm ci
|
||||
- run: npm run lint
|
||||
- run: npm test
|
||||
- run: npm run build
|
||||
```
|
||||
|
||||
### Release Workflow
|
||||
```yaml
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Create Release
|
||||
uses: https://gitea.com/actions/release-action@main
|
||||
with:
|
||||
files: |
|
||||
dist/**
|
||||
```
|
||||
|
||||
## Output Format
|
||||
|
||||
After initial upload:
|
||||
```
|
||||
submit_upload_status(
|
||||
project_id=<your_project_id>,
|
||||
status="completed",
|
||||
repo_name="repo-name",
|
||||
gitea_url="https://7000pct.gitea.bloupla.net/username/repo-name",
|
||||
files_pushed=["README.md", "src/main.py", ".gitea/workflows/ci.yml"],
|
||||
commit_sha="abc1234",
|
||||
message="Initial upload with CI/CD workflow"
|
||||
)
|
||||
```
|
||||
|
||||
After re-upload (CI fix):
|
||||
```
|
||||
submit_upload_status(
|
||||
project_id=<your_project_id>,
|
||||
status="completed",
|
||||
repo_name="repo-name",
|
||||
gitea_url="https://7000pct.gitea.bloupla.net/username/repo-name",
|
||||
files_pushed=["src/main.py", "tests/test_main.py"],
|
||||
commit_sha="def5678",
|
||||
message="Fixed CI test failures"
|
||||
)
|
||||
```
|
||||
|
||||
## Rules
|
||||
|
||||
- ✅ Always create a comprehensive README
|
||||
- ✅ Include LICENSE file (default: MIT)
|
||||
- ✅ Add .gitignore appropriate for the language
|
||||
- ✅ Set up CI workflow for automated testing
|
||||
- ✅ Create meaningful commit messages
|
||||
- ✅ Use semantic versioning for releases
|
||||
- ✅ ALWAYS use `submit_upload_status` after uploading
|
||||
- ✅ Use Gitea URLs (not GitHub URLs)
|
||||
- ❌ Don't push sensitive data (API keys, secrets)
|
||||
- ❌ Don't create private repositories (must be public)
|
||||
- ❌ Don't skip documentation
|
||||
- ❌ Don't forget to notify Tester after upload
|
||||
79
=0.23.0
Normal file
79
=0.23.0
Normal file
@@ -0,0 +1,79 @@
|
||||
Defaulting to user installation because normal site-packages is not writeable
|
||||
Requirement already satisfied: sqlalchemy in /usr/local/lib/python3.11/site-packages (2.0.48)
|
||||
Requirement already satisfied: click in /home/appuser/.local/lib/python3.11/site-packages (8.1.7)
|
||||
Collecting textual
|
||||
Downloading textual-8.1.1-py3-none-any.whl.metadata (9.1 kB)
|
||||
Requirement already satisfied: fastapi in /usr/local/lib/python3.11/site-packages (0.135.1)
|
||||
Requirement already satisfied: uvicorn in /usr/local/lib/python3.11/site-packages (0.42.0)
|
||||
Requirement already satisfied: aiosqlite in /usr/local/lib/python3.11/site-packages (0.22.1)
|
||||
Requirement already satisfied: pydantic in /usr/local/lib/python3.11/site-packages (2.12.5)
|
||||
Requirement already satisfied: httpx in /usr/local/lib/python3.11/site-packages (0.28.1)
|
||||
Requirement already satisfied: python-dotenv in /usr/local/lib/python3.11/site-packages (1.2.2)
|
||||
Requirement already satisfied: pytest in /home/appuser/.local/lib/python3.11/site-packages (8.0.0)
|
||||
Collecting pytest-asyncio
|
||||
Downloading pytest_asyncio-1.3.0-py3-none-any.whl.metadata (4.1 kB)
|
||||
Requirement already satisfied: ruff in /home/appuser/.local/lib/python3.11/site-packages (0.15.7)
|
||||
Requirement already satisfied: mypy in /home/appuser/.local/lib/python3.11/site-packages (1.19.1)
|
||||
Requirement already satisfied: greenlet>=1 in /usr/local/lib/python3.11/site-packages (from sqlalchemy) (3.3.2)
|
||||
Requirement already satisfied: typing-extensions>=4.6.0 in /usr/local/lib/python3.11/site-packages (from sqlalchemy) (4.15.0)
|
||||
Requirement already satisfied: markdown-it-py>=2.1.0 in /home/appuser/.local/lib/python3.11/site-packages (from markdown-it-py[linkify]>=2.1.0->textual) (4.0.0)
|
||||
Collecting mdit-py-plugins (from textual)
|
||||
Downloading mdit_py_plugins-0.5.0-py3-none-any.whl.metadata (2.8 kB)
|
||||
Collecting platformdirs<5,>=3.6.0 (from textual)
|
||||
Downloading platformdirs-4.9.4-py3-none-any.whl.metadata (4.7 kB)
|
||||
Requirement already satisfied: pygments<3.0.0,>=2.19.2 in /home/appuser/.local/lib/python3.11/site-packages (from textual) (2.19.2)
|
||||
Collecting rich>=14.2.0 (from textual)
|
||||
Downloading rich-14.3.3-py3-none-any.whl.metadata (18 kB)
|
||||
Requirement already satisfied: starlette>=0.46.0 in /usr/local/lib/python3.11/site-packages (from fastapi) (0.52.1)
|
||||
Requirement already satisfied: typing-inspection>=0.4.2 in /usr/local/lib/python3.11/site-packages (from fastapi) (0.4.2)
|
||||
Requirement already satisfied: annotated-doc>=0.0.2 in /usr/local/lib/python3.11/site-packages (from fastapi) (0.0.4)
|
||||
Requirement already satisfied: h11>=0.8 in /usr/local/lib/python3.11/site-packages (from uvicorn) (0.16.0)
|
||||
Requirement already satisfied: annotated-types>=0.6.0 in /usr/local/lib/python3.11/site-packages (from pydantic) (0.7.0)
|
||||
Requirement already satisfied: pydantic-core==2.41.5 in /usr/local/lib/python3.11/site-packages (from pydantic) (2.41.5)
|
||||
Requirement already satisfied: anyio in /usr/local/lib/python3.11/site-packages (from httpx) (4.12.1)
|
||||
Requirement already satisfied: certifi in /usr/local/lib/python3.11/site-packages (from httpx) (2026.2.25)
|
||||
Requirement already satisfied: httpcore==1.* in /usr/local/lib/python3.11/site-packages (from httpx) (1.0.9)
|
||||
Requirement already satisfied: idna in /usr/local/lib/python3.11/site-packages (from httpx) (3.11)
|
||||
Requirement already satisfied: iniconfig in /home/appuser/.local/lib/python3.11/site-packages (from pytest) (2.3.0)
|
||||
Requirement already satisfied: packaging in /home/appuser/.local/lib/python3.11/site-packages (from pytest) (26.0)
|
||||
Requirement already satisfied: pluggy<2.0,>=1.3.0 in /home/appuser/.local/lib/python3.11/site-packages (from pytest) (1.6.0)
|
||||
Collecting pytest
|
||||
Downloading pytest-9.0.2-py3-none-any.whl.metadata (7.6 kB)
|
||||
Requirement already satisfied: mypy_extensions>=1.0.0 in /home/appuser/.local/lib/python3.11/site-packages (from mypy) (1.1.0)
|
||||
Requirement already satisfied: pathspec>=0.9.0 in /home/appuser/.local/lib/python3.11/site-packages (from mypy) (1.0.4)
|
||||
Requirement already satisfied: librt>=0.6.2 in /home/appuser/.local/lib/python3.11/site-packages (from mypy) (0.8.1)
|
||||
Requirement already satisfied: mdurl~=0.1 in /home/appuser/.local/lib/python3.11/site-packages (from markdown-it-py>=2.1.0->markdown-it-py[linkify]>=2.1.0->textual) (0.1.2)
|
||||
Collecting linkify-it-py<3,>=1 (from markdown-it-py[linkify]>=2.1.0->textual)
|
||||
Downloading linkify_it_py-2.1.0-py3-none-any.whl.metadata (8.5 kB)
|
||||
Collecting uc-micro-py (from linkify-it-py<3,>=1->markdown-it-py[linkify]>=2.1.0->textual)
|
||||
Downloading uc_micro_py-2.0.0-py3-none-any.whl.metadata (2.2 kB)
|
||||
Downloading textual-8.1.1-py3-none-any.whl (719 kB)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 719.6/719.6 kB 164.6 MB/s eta 0:00:00
|
||||
Downloading pytest_asyncio-1.3.0-py3-none-any.whl (15 kB)
|
||||
Downloading pytest-9.0.2-py3-none-any.whl (374 kB)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 374.8/374.8 kB 427.5 MB/s eta 0:00:00
|
||||
Downloading platformdirs-4.9.4-py3-none-any.whl (21 kB)
|
||||
Downloading rich-14.3.3-py3-none-any.whl (310 kB)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 310.5/310.5 kB 444.4 MB/s eta 0:00:00
|
||||
Downloading mdit_py_plugins-0.5.0-py3-none-any.whl (57 kB)
|
||||
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 57.2/57.2 kB 305.2 MB/s eta 0:00:00
|
||||
Downloading linkify_it_py-2.1.0-py3-none-any.whl (19 kB)
|
||||
Downloading uc_micro_py-2.0.0-py3-none-any.whl (6.4 kB)
|
||||
Installing collected packages: uc-micro-py, pytest, platformdirs, rich, pytest-asyncio, mdit-py-plugins, linkify-it-py, textual
|
||||
Attempting uninstall: pytest
|
||||
Found existing installation: pytest 8.0.0
|
||||
Uninstalling pytest-8.0.0:
|
||||
Successfully uninstalled pytest-8.0.0
|
||||
WARNING: The scripts py.test and pytest are installed in '/home/appuser/.local/bin' which is not on PATH.
|
||||
Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.
|
||||
Attempting uninstall: rich
|
||||
Found existing installation: rich 13.7.0
|
||||
Uninstalling rich-13.7.0:
|
||||
Successfully uninstalled rich-13.7.0
|
||||
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
|
||||
memory-manager 0.1.0 requires shellingham>=1.1.4, which is not installed.
|
||||
snip 0.1.0 requires click>=8.3.0, but you have click 8.1.7 which is incompatible.
|
||||
http-log-explorer 0.1.0 requires pytest==8.0.0, but you have pytest 9.0.2 which is incompatible.
|
||||
http-log-explorer 0.1.0 requires pytest-asyncio==0.23.0, but you have pytest-asyncio 1.3.0 which is incompatible.
|
||||
http-log-explorer 0.1.0 requires rich==13.7.0, but you have rich 14.3.3 which is incompatible.
|
||||
Successfully installed linkify-it-py-2.1.0 mdit-py-plugins-0.5.0 platformdirs-4.9.4 pytest-9.0.2 pytest-asyncio-1.3.0 rich-14.3.3 textual-8.1.1 uc-micro-py-2.0.0
|
||||
56
Dockerfile
Normal file
56
Dockerfile
Normal file
@@ -0,0 +1,56 @@
|
||||
# 7000%AUTO - AI Autonomous Development System
|
||||
# Dockerfile for Railway deployment
|
||||
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set environment variables
|
||||
ENV PYTHONDONTWRITEBYTECODE=1 \
|
||||
PYTHONUNBUFFERED=1 \
|
||||
PYTHONPATH=/app \
|
||||
PIP_NO_CACHE_DIR=1 \
|
||||
PIP_DISABLE_PIP_VERSION_CHECK=1
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Install system dependencies
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
git \
|
||||
curl \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Node.js 20.x (LTS) via NodeSource for OpenCode CLI
|
||||
RUN curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \
|
||||
apt-get install -y nodejs && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install OpenCode CLI globally via npm
|
||||
RUN npm install -g opencode-ai
|
||||
|
||||
# Copy requirements first for better caching
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY . .
|
||||
|
||||
# Create necessary directories
|
||||
RUN mkdir -p /app/data /app/workspace /app/logs
|
||||
|
||||
# Create non-root user for security
|
||||
RUN useradd -m -u 1000 appuser && \
|
||||
chown -R appuser:appuser /app
|
||||
USER appuser
|
||||
|
||||
# Expose port
|
||||
EXPOSE 8000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD curl -f http://localhost:8000/health || exit 1
|
||||
|
||||
# Run the application
|
||||
CMD ["python", "main.py"]
|
||||
203
LICENSE
203
LICENSE
@@ -1,190 +1,21 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
MIT License
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
Copyright (c) 2024 Agentic Codebase Memory Manager
|
||||
|
||||
1. Definitions.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to the Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute
|
||||
must include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2024 7000%AUTO
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
404
README.md
404
README.md
@@ -1,119 +1,151 @@
|
||||
# Snip - Local-First Code Snippet Manager
|
||||
# Agentic Codebase Memory Manager
|
||||
|
||||

|
||||
A centralized memory store for AI coding agents working on the same project. Features a FastAPI JSON API, CLI interface with git-like commands, and a TUI dashboard for visualizing codebase decisions, implemented features, refactoring history, and architectural choices.
|
||||
|
||||
A powerful CLI tool for managing code snippets with local-first architecture. Store snippets in SQLite with FTS5 full-text search, optional AES encryption, and peer-to-peer sync capabilities.
|
||||
## Overview
|
||||
|
||||
## Features
|
||||
Memory Manager allows AI agents to store and retrieve context about a codebase, avoiding duplicate work and enabling better coordination. It provides:
|
||||
|
||||
- **CRUD Operations**: Create, read, update, delete code snippets with ease
|
||||
- **Full-Text Search**: FTS5-powered search across title, description, code, and tags with ranking
|
||||
- **Tag Organization**: Flexible tag-based organization with autocomplete
|
||||
- **Collections**: Group snippets into named collections for better organization
|
||||
- **Syntax Highlighting**: Beautiful terminal syntax highlighting using Pygments
|
||||
- **Import/Export**: JSON import/export for backup, sharing, and portability
|
||||
- **Encryption**: Optional AES encryption for sensitive snippets using PBKDF2 key derivation
|
||||
- **P2P Sync**: Discover and sync snippets with peers on your local network via mDNS
|
||||
- **Persistent Memory**: Store decisions, features, refactorings, and architectural choices
|
||||
- **Git-like Versioning**: Commit snapshots of memory state, view history, diff between commits
|
||||
- **Full-text Search**: Search across all entries with SQLite FTS5
|
||||
- **REST API**: JSON API for integration with other tools
|
||||
- **Interactive TUI**: Terminal dashboard for visual exploration
|
||||
- **CLI Interface**: Git-like commands for scripting and automation
|
||||
|
||||
## Installation
|
||||
|
||||
### From Source
|
||||
```bash
|
||||
pip install memory-manager
|
||||
```
|
||||
|
||||
Or install from source:
|
||||
|
||||
```bash
|
||||
git clone https://7000pct.gitea.bloupla.net/7000pctAUTO/snippet-manager.git
|
||||
cd snippet-manager
|
||||
git clone <repository-url>
|
||||
cd agentic-codebase-memory-manager
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
```
|
||||
click>=8.3.0
|
||||
cryptography>=46.0.0
|
||||
pygments>=2.19.0
|
||||
rich>=13.0.0
|
||||
zeroconf>=0.148.0
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
### CLI
|
||||
|
||||
```bash
|
||||
# Initialize the database
|
||||
snip init
|
||||
# Add a memory entry
|
||||
memory add --title "Use SQLite" --content "We decided to use SQLite for local storage" --category decision --tags storage,database
|
||||
|
||||
# Add a snippet
|
||||
snip add --title "Hello World" --code 'print("Hello, World!")' --language python
|
||||
# List all entries
|
||||
memory list
|
||||
|
||||
# List all snippets
|
||||
snip list
|
||||
# Search entries
|
||||
memory search "SQLite"
|
||||
|
||||
# Get a snippet with syntax highlighting
|
||||
snip get 1
|
||||
# Commit current state
|
||||
memory commit --message "Initial project decisions"
|
||||
|
||||
# Search snippets
|
||||
snip search hello
|
||||
# View commit history
|
||||
memory log
|
||||
|
||||
# Add tags to organize
|
||||
snip tag add 1 python basics
|
||||
# Start API server
|
||||
memory serve
|
||||
|
||||
# Create a collection
|
||||
snip collection create "Python Snippets"
|
||||
|
||||
# Export snippets for backup
|
||||
snip export all --file backup.json
|
||||
|
||||
# Import snippets from backup
|
||||
snip import --file backup.json --strategy skip
|
||||
# Launch TUI dashboard
|
||||
memory tui
|
||||
```
|
||||
|
||||
## Commands Reference
|
||||
### API
|
||||
|
||||
### Snippet Commands
|
||||
```bash
|
||||
# Start the server
|
||||
memory serve
|
||||
|
||||
# Or use the API directly
|
||||
curl http://localhost:8080/api/memory
|
||||
curl http://localhost:8080/api/memory -X POST -H "Content-Type: application/json" -d '{"title": "Use Redis", "content": "Caching layer", "category": "architecture", "tags": ["cache"]}'
|
||||
curl http://localhost:8080/api/memory/search?q=cache
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `snip init` | Initialize the snippet database |
|
||||
| `snip add` | Add a new snippet |
|
||||
| `snip get <id>` | Get a snippet by ID with syntax highlighting |
|
||||
| `snip list` | List all snippets with pagination |
|
||||
| `snip edit <id>` | Edit a snippet in your default editor |
|
||||
| `snip delete <id>` | Delete a snippet (with confirmation) |
|
||||
| `snip search <query>` | Full-text search with FTS5 |
|
||||
| `memory add` | Add a new memory entry |
|
||||
| `memory list` | List memory entries |
|
||||
| `memory search` | Search memory entries |
|
||||
| `memory get` | Get a specific entry |
|
||||
| `memory update` | Update an entry |
|
||||
| `memory delete` | Delete an entry |
|
||||
| `memory commit` | Create a commit snapshot |
|
||||
| `memory log` | Show commit history |
|
||||
| `memory diff` | Show diff between commits |
|
||||
| `memory serve` | Start the API server |
|
||||
| `memory tui` | Launch the TUI dashboard |
|
||||
|
||||
### Tag Commands
|
||||
### Categories
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `snip tag add <snippet_id> <tag>` | Add a tag to a snippet |
|
||||
| `snip tag remove <snippet_id> <tag>` | Remove a tag from a snippet |
|
||||
| `snip tag list` | List all tags in use |
|
||||
- `decision` - Architectural and design decisions
|
||||
- `feature` - Implemented features
|
||||
- `refactoring` - Refactoring history
|
||||
- `architecture` - Architectural patterns and structures
|
||||
- `bug` - Bug fixes and known issues
|
||||
- `note` - General notes and observations
|
||||
|
||||
### Collection Commands
|
||||
## REST API
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `snip collection create <name>` | Create a new collection |
|
||||
| `snip collection list` | List all collections |
|
||||
| `snip collection delete <id>` | Delete a collection |
|
||||
| `snip collection add <collection_id> <snippet_id>` | Add snippet to collection |
|
||||
| `snip collection remove <collection_id> <snippet_id>` | Remove snippet from collection |
|
||||
### Endpoints
|
||||
|
||||
### Import/Export Commands
|
||||
| Method | Endpoint | Description |
|
||||
|--------|----------|-------------|
|
||||
| GET | `/api/memory` | List all entries |
|
||||
| POST | `/api/memory` | Create new entry |
|
||||
| GET | `/api/memory/{id}` | Get entry by ID |
|
||||
| PUT | `/api/memory/{id}` | Update entry |
|
||||
| DELETE | `/api/memory/{id}` | Delete entry |
|
||||
| GET | `/api/memory/search?q=` | Search entries |
|
||||
| POST | `/api/memory/commit` | Create commit |
|
||||
| GET | `/api/memory/log` | Get commit log |
|
||||
| GET | `/api/memory/diff/{h1}/{h2}` | Diff two commits |
|
||||
| GET | `/api/memory/stats` | Get statistics |
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `snip export all --file <path>` | Export all snippets to JSON |
|
||||
| `snip export collection <name> --file <path>` | Export a specific collection |
|
||||
| `snip export snippet <id> --file <path>` | Export a single snippet |
|
||||
| `snip import --file <path> --strategy <strategy>` | Import snippets (strategies: skip, replace, duplicate) |
|
||||
### Request/Response Examples
|
||||
|
||||
### P2P Sync Commands
|
||||
```json
|
||||
// POST /api/memory
|
||||
{
|
||||
"title": "Use PostgreSQL",
|
||||
"content": "We decided to use PostgreSQL for the main database due to its reliability and feature set.",
|
||||
"category": "decision",
|
||||
"tags": ["database", "postgresql", "storage"]
|
||||
}
|
||||
```
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `snip discover list` | Discover peers on the local network |
|
||||
| `snip sync --peer-id <id>` | Sync snippets with a discovered peer |
|
||||
| `snip peers` | List known sync peers |
|
||||
```json
|
||||
// Response
|
||||
{
|
||||
"id": 1,
|
||||
"title": "Use PostgreSQL",
|
||||
"content": "We decided to use PostgreSQL...",
|
||||
"category": "decision",
|
||||
"tags": ["database", "postgresql", "storage"],
|
||||
"agent_id": "agent-123",
|
||||
"project_path": "/path/to/project",
|
||||
"created_at": "2024-01-15T10:30:00",
|
||||
"updated_at": "2024-01-15T10:30:00"
|
||||
}
|
||||
```
|
||||
|
||||
API documentation available at `http://localhost:8080/docs` (Swagger UI).
|
||||
|
||||
## TUI Dashboard
|
||||
|
||||
Launch with `memory tui`. Keybindings:
|
||||
|
||||
- `d` - Dashboard screen
|
||||
- `l` - Memory list screen
|
||||
- `c` - Commit history screen
|
||||
- `s` - Search screen
|
||||
- `q` - Quit
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -121,175 +153,69 @@ snip import --file backup.json --strategy skip
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `SNIP_DB_PATH` | `~/.snip/snippets.db` | Path to SQLite database |
|
||||
| `SNIP_KEY_FILE` | `~/.snip/.key` | Path to encryption key file |
|
||||
| `SNIP_SYNC_PORT` | `8765` | Port for P2P sync |
|
||||
| `SNIP_PEER_CACHE` | `~/.snip/peers.json` | Path to peer cache |
|
||||
| `MEMORY_DB_PATH` | `.memory/codebase_memory.db` | SQLite database path |
|
||||
| `MEMORY_API_HOST` | `127.0.0.1` | API server host |
|
||||
| `MEMORY_API_PORT` | `8080` | API server port |
|
||||
| `MEMORY_PROJECT_PATH` | `.` | Project root path |
|
||||
| `AGENT_ID` | `unknown` | Agent identifier |
|
||||
|
||||
### Configuration File
|
||||
### Project Config
|
||||
|
||||
Optional configuration can be placed at `~/.snip/config.toml`:
|
||||
Optional `.memory/config.toml` in project root:
|
||||
|
||||
```toml
|
||||
[database]
|
||||
path = "~/.snip/snippets.db"
|
||||
[project]
|
||||
name = "my-project"
|
||||
db_path = ".memory/codebase_memory.db"
|
||||
|
||||
[sync]
|
||||
port = 8765
|
||||
auto_discover = true
|
||||
|
||||
[display]
|
||||
style = "monokai"
|
||||
line_numbers = true
|
||||
[api]
|
||||
host = "127.0.0.1"
|
||||
port = 8080
|
||||
```
|
||||
|
||||
## Encryption
|
||||
## Architecture
|
||||
|
||||
Snippets can be encrypted using AES encryption with PBKDF2 key derivation (480k iterations, SHA256).
|
||||
```
|
||||
src/memory_manager/
|
||||
├── api/ # FastAPI REST API
|
||||
├── cli/ # Click CLI commands
|
||||
├── core/ # Business logic services
|
||||
├── db/ # SQLAlchemy models and repository
|
||||
└── tui/ # Textual TUI application
|
||||
```
|
||||
|
||||
### Key Components
|
||||
|
||||
- **MemoryService**: CRUD operations for memory entries
|
||||
- **SearchService**: Full-text search with FTS5
|
||||
- **CommitService**: Git-like versioning with snapshots and diffs
|
||||
- **MemoryRepository**: Async SQLAlchemy operations with aiosqlite
|
||||
|
||||
## Integration with AI Tools
|
||||
|
||||
### Cursor
|
||||
|
||||
Add to your Cursor project settings or `.cursor/rules`:
|
||||
|
||||
```
|
||||
Use the Memory Manager API at http://localhost:8080 to:
|
||||
1. Check existing decisions before making new ones
|
||||
2. Record significant decisions with: POST /api/memory
|
||||
3. Search past decisions with: GET /api/memory/search?q=
|
||||
```
|
||||
|
||||
### GitHub Copilot
|
||||
|
||||
Create a Copilot extension that calls the Memory Manager API to contextually retrieve relevant past decisions when working on similar code.
|
||||
|
||||
### Pre-commit Hook
|
||||
|
||||
```bash
|
||||
# Add an encrypted snippet
|
||||
snip add --title "API Secret" --code "API_KEY=abc123" --language python --encrypt
|
||||
|
||||
# View encrypted snippet (will prompt for password)
|
||||
snip get 1
|
||||
|
||||
# Encrypted snippets are stored with is_encrypted flag
|
||||
# Decryption happens on-demand with password verification
|
||||
```
|
||||
|
||||
### How Encryption Works
|
||||
|
||||
1. User provides a password when creating an encrypted snippet
|
||||
2. PBKDF2 derives a 32-byte key using SHA256 (480,000 iterations)
|
||||
3. Fernet (AES-128-CBC with HMAC) encrypts the snippet content
|
||||
4. Encrypted data stored in database with `is_encrypted=True`
|
||||
5. On retrieval, password decrypts the content in memory
|
||||
|
||||
## P2P Sync
|
||||
|
||||
Snip can discover and sync snippets with other peers on your local network using mDNS/Bonjour service discovery.
|
||||
|
||||
### Discovery
|
||||
|
||||
```bash
|
||||
# Discover available peers on the network
|
||||
snip discover list
|
||||
|
||||
# Output example:
|
||||
# Peer: macbook-pro.local (192.168.1.100:8765)
|
||||
# Last seen: 2 minutes ago
|
||||
```
|
||||
|
||||
### Sync Protocol
|
||||
|
||||
1. Peer discovery via `_snippets._tcp.local.` mDNS service
|
||||
2. HTTP/JSON communication over TCP
|
||||
3. Sync exchanges snippets updated since last sync
|
||||
4. Conflict resolution: newest-wins strategy (with option to keep both)
|
||||
|
||||
```bash
|
||||
# Sync with a specific peer
|
||||
snip sync --peer-id <peer_id>
|
||||
|
||||
# View known peers
|
||||
snip peers
|
||||
```
|
||||
|
||||
## Import/Export Format
|
||||
|
||||
Exported JSON follows this structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"exported_at": "2024-01-15T10:30:00Z",
|
||||
"snippets": [
|
||||
{
|
||||
"title": "Hello World",
|
||||
"description": "A simple Hello World example",
|
||||
"code": "print('Hello, World!')",
|
||||
"language": "python",
|
||||
"tags": ["basics", "example"],
|
||||
"created_at": "2024-01-10T08:00:00Z",
|
||||
"updated_at": "2024-01-10T08:00:00Z"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### Import Strategies
|
||||
|
||||
- **skip**: Skip snippets that already exist (by title match)
|
||||
- **replace**: Replace existing snippets with same title
|
||||
- **duplicate**: Import all as new snippets (generate new IDs)
|
||||
|
||||
## Shell Completion
|
||||
|
||||
Snip supports shell completion for bash and zsh.
|
||||
|
||||
### Bash
|
||||
|
||||
```bash
|
||||
# Add to ~/.bashrc
|
||||
eval "$(_SNIP_COMPLETE=bash snip)"
|
||||
```
|
||||
|
||||
### Zsh
|
||||
|
||||
```bash
|
||||
# Add to ~/.zshrc
|
||||
eval "$(_SNIP_COMPLETE=zsh snip)"
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Database Locked
|
||||
|
||||
If you see "Database locked" errors:
|
||||
- Ensure no other snip processes are running
|
||||
- Check file permissions on `~/.snip/`
|
||||
|
||||
### FTS Query Syntax Error
|
||||
|
||||
FTS5 uses special syntax for boolean queries:
|
||||
- Use quotes for phrases: `"hello world"`
|
||||
- Use AND/OR for boolean: `hello AND world`
|
||||
- Escape special characters with `\`
|
||||
|
||||
### Decryption Failed
|
||||
|
||||
- Verify you're using the correct password
|
||||
- Passwords cannot be recovered if forgotten
|
||||
- Encrypted snippets without the password cannot be decrypted
|
||||
|
||||
### Peer Unreachable
|
||||
|
||||
- Ensure both peers are on the same network
|
||||
- Check firewall settings for port 8765
|
||||
- Verify mDNS/Bonjour is enabled on your system
|
||||
|
||||
## Development
|
||||
|
||||
```bash
|
||||
# Clone repository
|
||||
git clone https://7000pct.gitea.bloupla.net/7000pctAUTO/snippet-manager.git
|
||||
cd snippet-manager
|
||||
|
||||
# Install in development mode
|
||||
pip install -e ".[dev]"
|
||||
|
||||
# Run tests
|
||||
pytest tests/ -v
|
||||
|
||||
# Run with verbose output
|
||||
pytest tests/ -v --capture=no
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-commit
|
||||
memory commit --message "Auto-save before commit" 2>/dev/null || true
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see [LICENSE](LICENSE) for details.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome! Please open an issue or submit a pull request.
|
||||
MIT License - see LICENSE file for details.
|
||||
|
||||
121
config.py
Normal file
121
config.py
Normal file
@@ -0,0 +1,121 @@
|
||||
"""
|
||||
7000%AUTO Configuration Module
|
||||
Environment-based configuration using Pydantic Settings
|
||||
"""
|
||||
|
||||
from pydantic_settings import BaseSettings
|
||||
from pydantic import Field
|
||||
from typing import Optional
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""Application settings loaded from environment variables"""
|
||||
|
||||
# Application
|
||||
APP_NAME: str = "7000%AUTO"
|
||||
DEBUG: bool = False
|
||||
LOG_LEVEL: str = "INFO"
|
||||
|
||||
# OpenCode AI Settings (Required - no defaults)
|
||||
# Users MUST set these environment variables:
|
||||
# OPENCODE_API_KEY - API key for your AI provider
|
||||
# OPENCODE_API_BASE - API base URL (e.g. https://api.minimax.io/anthropic/v1)
|
||||
# OPENCODE_SDK - AI SDK npm package (e.g. @ai-sdk/anthropic, @ai-sdk/openai)
|
||||
# OPENCODE_MODEL - Model name (e.g. MiniMax-M2.1, gpt-4o)
|
||||
# OPENCODE_MAX_TOKENS - Max output tokens (e.g. 196608)
|
||||
OPENCODE_API_KEY: str = Field(default="", description="API key for your AI provider (REQUIRED)")
|
||||
OPENCODE_API_BASE: str = Field(default="", description="API base URL (REQUIRED)")
|
||||
OPENCODE_SDK: str = Field(default="", description="AI SDK npm package (REQUIRED, e.g. @ai-sdk/anthropic, @ai-sdk/openai)")
|
||||
OPENCODE_MODEL: str = Field(default="", description="Model name to use (REQUIRED)")
|
||||
OPENCODE_MAX_TOKENS: int = Field(default=0, description="Maximum output tokens for AI responses (REQUIRED)")
|
||||
|
||||
# OpenCode Server
|
||||
OPENCODE_SERVER_URL: Optional[str] = Field(default=None, description="OpenCode server URL (default: http://127.0.0.1:18080)")
|
||||
|
||||
# Gitea
|
||||
GITEA_TOKEN: str = Field(default="", description="Gitea Personal Access Token")
|
||||
GITEA_USERNAME: Optional[str] = Field(default=None, description="Gitea username for repo creation")
|
||||
GITEA_URL: str = Field(default="https://7000pct.gitea.bloupla.net", description="Gitea server URL")
|
||||
|
||||
# X (Twitter) API
|
||||
X_API_KEY: str = Field(default="", description="X API Key (Consumer Key)")
|
||||
X_API_SECRET: str = Field(default="", description="X API Secret (Consumer Secret)")
|
||||
X_ACCESS_TOKEN: str = Field(default="", description="X Access Token")
|
||||
X_ACCESS_TOKEN_SECRET: str = Field(default="", description="X Access Token Secret")
|
||||
X_BEARER_TOKEN: Optional[str] = Field(default=None, description="X Bearer Token for API v2")
|
||||
|
||||
# Database
|
||||
DATABASE_URL: str = Field(
|
||||
default="sqlite+aiosqlite:///./data/7000auto.db",
|
||||
description="Database connection URL"
|
||||
)
|
||||
DATABASE_ECHO: bool = Field(default=False, description="Echo SQL queries")
|
||||
|
||||
# Workspace
|
||||
WORKSPACE_DIR: Path = Field(
|
||||
default=Path("./workspace"),
|
||||
description="Directory for project workspaces"
|
||||
)
|
||||
|
||||
# Web Server
|
||||
HOST: str = Field(default="0.0.0.0", description="Server host")
|
||||
PORT: int = Field(default=8000, description="Server port")
|
||||
|
||||
# Orchestrator
|
||||
AUTO_START: bool = Field(default=True, description="Auto-start orchestrator on boot")
|
||||
MAX_CONCURRENT_PROJECTS: int = Field(default=1, description="Max concurrent projects")
|
||||
|
||||
class Config:
|
||||
env_file = ".env"
|
||||
env_file_encoding = "utf-8"
|
||||
case_sensitive = True
|
||||
extra = "ignore"
|
||||
|
||||
def ensure_directories(self):
|
||||
"""Create necessary directories"""
|
||||
self.WORKSPACE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
Path("./data").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
@property
|
||||
def is_gitea_configured(self) -> bool:
|
||||
return bool(self.GITEA_TOKEN)
|
||||
|
||||
@property
|
||||
def is_x_configured(self) -> bool:
|
||||
return all([
|
||||
self.X_API_KEY,
|
||||
self.X_API_SECRET,
|
||||
self.X_ACCESS_TOKEN,
|
||||
self.X_ACCESS_TOKEN_SECRET
|
||||
])
|
||||
|
||||
@property
|
||||
def is_opencode_configured(self) -> bool:
|
||||
"""Check if all required OpenCode settings are configured"""
|
||||
return all([
|
||||
self.OPENCODE_API_KEY,
|
||||
self.OPENCODE_API_BASE,
|
||||
self.OPENCODE_SDK,
|
||||
self.OPENCODE_MODEL,
|
||||
self.OPENCODE_MAX_TOKENS > 0,
|
||||
])
|
||||
|
||||
def get_missing_opencode_settings(self) -> list[str]:
|
||||
"""Return list of missing required OpenCode settings"""
|
||||
missing = []
|
||||
if not self.OPENCODE_API_KEY:
|
||||
missing.append("OPENCODE_API_KEY")
|
||||
if not self.OPENCODE_API_BASE:
|
||||
missing.append("OPENCODE_API_BASE")
|
||||
if not self.OPENCODE_SDK:
|
||||
missing.append("OPENCODE_SDK")
|
||||
if not self.OPENCODE_MODEL:
|
||||
missing.append("OPENCODE_MODEL")
|
||||
if self.OPENCODE_MAX_TOKENS <= 0:
|
||||
missing.append("OPENCODE_MAX_TOKENS")
|
||||
return missing
|
||||
|
||||
|
||||
# Global settings instance
|
||||
settings = Settings()
|
||||
77
database/__init__.py
Normal file
77
database/__init__.py
Normal file
@@ -0,0 +1,77 @@
|
||||
"""
|
||||
7000%AUTO Database Module
|
||||
"""
|
||||
|
||||
from .models import Base, Idea, Project, AgentLog, IdeaSource, ProjectStatus, LogType
|
||||
|
||||
from .db import (
|
||||
init_db,
|
||||
close_db,
|
||||
get_db,
|
||||
async_session_factory,
|
||||
# Idea operations
|
||||
create_idea,
|
||||
get_idea_by_id,
|
||||
get_unused_ideas,
|
||||
mark_idea_used,
|
||||
# Project operations
|
||||
create_project,
|
||||
get_project_by_id,
|
||||
get_active_project,
|
||||
update_project_status,
|
||||
get_project_idea_json,
|
||||
get_project_plan_json,
|
||||
set_project_idea_json,
|
||||
set_project_plan_json,
|
||||
# DevTest operations (Developer-Tester communication)
|
||||
get_project_test_result_json,
|
||||
set_project_test_result_json,
|
||||
get_project_implementation_status_json,
|
||||
set_project_implementation_status_json,
|
||||
clear_project_devtest_state,
|
||||
# Logging
|
||||
add_agent_log,
|
||||
get_recent_logs,
|
||||
get_project_logs,
|
||||
# Stats
|
||||
get_stats,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
# Models
|
||||
"Base",
|
||||
"Idea",
|
||||
"Project",
|
||||
"AgentLog",
|
||||
"IdeaSource",
|
||||
"ProjectStatus",
|
||||
"LogType",
|
||||
# DB operations
|
||||
"init_db",
|
||||
"close_db",
|
||||
"get_db",
|
||||
"async_session_factory",
|
||||
"create_idea",
|
||||
"get_idea_by_id",
|
||||
"get_unused_ideas",
|
||||
"mark_idea_used",
|
||||
"create_project",
|
||||
"get_project_by_id",
|
||||
"get_active_project",
|
||||
"update_project_status",
|
||||
"get_project_idea_json",
|
||||
"get_project_plan_json",
|
||||
"set_project_idea_json",
|
||||
"set_project_plan_json",
|
||||
# DevTest operations
|
||||
"get_project_test_result_json",
|
||||
"set_project_test_result_json",
|
||||
"get_project_implementation_status_json",
|
||||
"set_project_implementation_status_json",
|
||||
"clear_project_devtest_state",
|
||||
# Logging
|
||||
"add_agent_log",
|
||||
"get_recent_logs",
|
||||
"get_project_logs",
|
||||
"get_stats",
|
||||
]
|
||||
602
database/db.py
Normal file
602
database/db.py
Normal file
@@ -0,0 +1,602 @@
|
||||
"""
|
||||
7000%AUTO Database Operations
|
||||
Async SQLAlchemy database setup and CRUD operations
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional, List
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy import select, func
|
||||
|
||||
from .models import Base, Idea, Project, AgentLog, IdeaSource, ProjectStatus, LogType
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Global engine and session factory
|
||||
_engine = None
|
||||
_session_factory = None
|
||||
|
||||
|
||||
def async_session_factory():
|
||||
"""Get the async session factory for direct use"""
|
||||
if _session_factory is None:
|
||||
raise RuntimeError("Database not initialized. Call init_db() first.")
|
||||
return _session_factory()
|
||||
|
||||
|
||||
async def init_db(database_url: Optional[str] = None):
|
||||
"""Initialize database engine and create tables"""
|
||||
global _engine, _session_factory
|
||||
|
||||
if database_url is None:
|
||||
from config import settings
|
||||
database_url = settings.DATABASE_URL
|
||||
|
||||
# Convert postgres:// to postgresql+asyncpg:// if needed
|
||||
if database_url.startswith("postgres://"):
|
||||
database_url = database_url.replace("postgres://", "postgresql+asyncpg://", 1)
|
||||
elif database_url.startswith("postgresql://") and "+asyncpg" not in database_url:
|
||||
database_url = database_url.replace("postgresql://", "postgresql+asyncpg://", 1)
|
||||
|
||||
_engine = create_async_engine(
|
||||
database_url,
|
||||
echo=False,
|
||||
pool_pre_ping=True
|
||||
)
|
||||
|
||||
_session_factory = async_sessionmaker(
|
||||
_engine,
|
||||
class_=AsyncSession,
|
||||
expire_on_commit=False
|
||||
)
|
||||
|
||||
# Create tables
|
||||
async with _engine.begin() as conn:
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
|
||||
logger.info("Database initialized successfully")
|
||||
|
||||
|
||||
async def close_db():
|
||||
"""Close database connection"""
|
||||
global _engine, _session_factory
|
||||
if _engine:
|
||||
await _engine.dispose()
|
||||
_engine = None
|
||||
_session_factory = None
|
||||
logger.info("Database connection closed")
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def get_db():
|
||||
"""Get database session context manager"""
|
||||
if _session_factory is None:
|
||||
raise RuntimeError("Database not initialized. Call init_db() first.")
|
||||
|
||||
async with _session_factory() as session:
|
||||
try:
|
||||
yield session
|
||||
await session.commit()
|
||||
except Exception:
|
||||
await session.rollback()
|
||||
raise
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Idea CRUD Operations
|
||||
# =============================================================================
|
||||
|
||||
async def create_idea(
|
||||
title: str,
|
||||
description: str,
|
||||
source: str,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> Idea:
|
||||
"""Create a new idea"""
|
||||
async def _create(s: AsyncSession) -> Idea:
|
||||
idea = Idea(
|
||||
title=title,
|
||||
description=description,
|
||||
source=source if isinstance(source, str) else source.value
|
||||
)
|
||||
s.add(idea)
|
||||
await s.flush()
|
||||
await s.refresh(idea)
|
||||
return idea
|
||||
|
||||
if session:
|
||||
return await _create(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _create(s)
|
||||
|
||||
|
||||
async def get_idea_by_id(idea_id: int, session: Optional[AsyncSession] = None) -> Optional[Idea]:
|
||||
"""Get idea by ID"""
|
||||
async def _get(s: AsyncSession) -> Optional[Idea]:
|
||||
result = await s.execute(select(Idea).where(Idea.id == idea_id))
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def get_unused_ideas(
|
||||
limit: int = 10,
|
||||
source: Optional[str] = None,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> List[Idea]:
|
||||
"""Get unused ideas"""
|
||||
async def _get(s: AsyncSession) -> List[Idea]:
|
||||
query = select(Idea).where(Idea.used == False)
|
||||
if source:
|
||||
query = query.where(Idea.source == source)
|
||||
query = query.order_by(Idea.created_at.desc()).limit(limit)
|
||||
result = await s.execute(query)
|
||||
return list(result.scalars().all())
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def mark_idea_used(idea_id: int, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Mark an idea as used"""
|
||||
async def _mark(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Idea).where(Idea.id == idea_id))
|
||||
idea = result.scalar_one_or_none()
|
||||
if idea:
|
||||
idea.used = True
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _mark(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _mark(s)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Project CRUD Operations
|
||||
# =============================================================================
|
||||
|
||||
async def create_project(
|
||||
idea_id: int,
|
||||
name: str,
|
||||
plan_json: Optional[dict] = None,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> Project:
|
||||
"""Create a new project"""
|
||||
async def _create(s: AsyncSession) -> Project:
|
||||
project = Project(
|
||||
idea_id=idea_id,
|
||||
name=name,
|
||||
plan_json=plan_json,
|
||||
status=ProjectStatus.IDEATION.value
|
||||
)
|
||||
s.add(project)
|
||||
await s.flush()
|
||||
await s.refresh(project)
|
||||
return project
|
||||
|
||||
if session:
|
||||
return await _create(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _create(s)
|
||||
|
||||
|
||||
async def get_project_by_id(project_id: int, session: Optional[AsyncSession] = None) -> Optional[Project]:
|
||||
"""Get project by ID"""
|
||||
async def _get(s: AsyncSession) -> Optional[Project]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def get_active_project(session: Optional[AsyncSession] = None) -> Optional[Project]:
|
||||
"""Get the currently active project (not completed/failed)"""
|
||||
async def _get(s: AsyncSession) -> Optional[Project]:
|
||||
query = select(Project).where(
|
||||
Project.status.notin_([ProjectStatus.COMPLETED.value, ProjectStatus.FAILED.value])
|
||||
).order_by(Project.created_at.desc()).limit(1)
|
||||
result = await s.execute(query)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def update_project_status(
|
||||
project_id: int,
|
||||
status: str,
|
||||
gitea_url: Optional[str] = None,
|
||||
x_post_url: Optional[str] = None,
|
||||
dev_test_iterations: Optional[int] = None,
|
||||
ci_test_iterations: Optional[int] = None,
|
||||
current_agent: Optional[str] = None,
|
||||
plan_json: Optional[dict] = None,
|
||||
idea_json: Optional[dict] = None,
|
||||
name: Optional[str] = None,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> bool:
|
||||
"""Update project status and optional fields"""
|
||||
async def _update(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.status = status if isinstance(status, str) else status.value
|
||||
if gitea_url is not None:
|
||||
project.gitea_url = gitea_url
|
||||
if x_post_url is not None:
|
||||
project.x_post_url = x_post_url
|
||||
if dev_test_iterations is not None:
|
||||
project.dev_test_iterations = dev_test_iterations
|
||||
if ci_test_iterations is not None:
|
||||
project.ci_test_iterations = ci_test_iterations
|
||||
if current_agent is not None:
|
||||
project.current_agent = current_agent
|
||||
if plan_json is not None:
|
||||
project.plan_json = plan_json
|
||||
if idea_json is not None:
|
||||
project.idea_json = idea_json
|
||||
if name is not None:
|
||||
project.name = name
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _update(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _update(s)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# AgentLog CRUD Operations
|
||||
# =============================================================================
|
||||
|
||||
async def add_agent_log(
|
||||
project_id: int,
|
||||
agent_name: str,
|
||||
message: str,
|
||||
log_type: str = LogType.INFO.value,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> AgentLog:
|
||||
"""Add an agent log entry"""
|
||||
async def _add(s: AsyncSession) -> AgentLog:
|
||||
log = AgentLog(
|
||||
project_id=project_id,
|
||||
agent_name=agent_name,
|
||||
message=message,
|
||||
log_type=log_type if isinstance(log_type, str) else log_type.value
|
||||
)
|
||||
s.add(log)
|
||||
await s.flush()
|
||||
await s.refresh(log)
|
||||
return log
|
||||
|
||||
if session:
|
||||
return await _add(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _add(s)
|
||||
|
||||
|
||||
async def get_recent_logs(
|
||||
limit: int = 50,
|
||||
log_type: Optional[str] = None,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> List[AgentLog]:
|
||||
"""Get recent logs across all projects"""
|
||||
async def _get(s: AsyncSession) -> List[AgentLog]:
|
||||
query = select(AgentLog)
|
||||
if log_type:
|
||||
query = query.where(AgentLog.log_type == log_type)
|
||||
query = query.order_by(AgentLog.created_at.desc()).limit(limit)
|
||||
result = await s.execute(query)
|
||||
return list(result.scalars().all())
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def get_project_logs(
|
||||
project_id: int,
|
||||
limit: int = 100,
|
||||
log_type: Optional[str] = None,
|
||||
session: Optional[AsyncSession] = None
|
||||
) -> List[AgentLog]:
|
||||
"""Get logs for a specific project"""
|
||||
async def _get(s: AsyncSession) -> List[AgentLog]:
|
||||
query = select(AgentLog).where(AgentLog.project_id == project_id)
|
||||
if log_type:
|
||||
query = query.where(AgentLog.log_type == log_type)
|
||||
query = query.order_by(AgentLog.created_at.desc()).limit(limit)
|
||||
result = await s.execute(query)
|
||||
return list(result.scalars().all())
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Statistics
|
||||
# =============================================================================
|
||||
|
||||
async def get_project_idea_json(project_id: int, session: Optional[AsyncSession] = None) -> Optional[dict]:
|
||||
"""Get the submitted idea JSON for a project"""
|
||||
async def _get(s: AsyncSession) -> Optional[dict]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
return project.idea_json
|
||||
return None
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def get_project_plan_json(project_id: int, session: Optional[AsyncSession] = None) -> Optional[dict]:
|
||||
"""Get the submitted plan JSON for a project"""
|
||||
async def _get(s: AsyncSession) -> Optional[dict]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
return project.plan_json
|
||||
return None
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def set_project_idea_json(project_id: int, idea_json: dict, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Set the idea JSON for a project (called by MCP submit_idea)"""
|
||||
async def _set(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.idea_json = idea_json
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _set(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _set(s)
|
||||
|
||||
|
||||
async def set_project_plan_json(project_id: int, plan_json: dict, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Set the plan JSON for a project (called by MCP submit_plan)"""
|
||||
async def _set(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.plan_json = plan_json
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _set(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _set(s)
|
||||
|
||||
|
||||
async def get_project_test_result_json(project_id: int, session: Optional[AsyncSession] = None) -> Optional[dict]:
|
||||
"""Get the submitted test result JSON for a project"""
|
||||
async def _get(s: AsyncSession) -> Optional[dict]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
return project.test_result_json
|
||||
return None
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def set_project_test_result_json(project_id: int, test_result_json: dict, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Set the test result JSON for a project (called by MCP submit_test_result)"""
|
||||
async def _set(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.test_result_json = test_result_json
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _set(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _set(s)
|
||||
|
||||
|
||||
async def get_project_implementation_status_json(project_id: int, session: Optional[AsyncSession] = None) -> Optional[dict]:
|
||||
"""Get the submitted implementation status JSON for a project"""
|
||||
async def _get(s: AsyncSession) -> Optional[dict]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
return project.implementation_status_json
|
||||
return None
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def set_project_implementation_status_json(project_id: int, implementation_status_json: dict, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Set the implementation status JSON for a project (called by MCP submit_implementation_status)"""
|
||||
async def _set(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.implementation_status_json = implementation_status_json
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _set(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _set(s)
|
||||
|
||||
|
||||
async def clear_project_devtest_state(project_id: int, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Clear test result and implementation status for a new dev-test iteration"""
|
||||
async def _clear(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.test_result_json = None
|
||||
project.implementation_status_json = None
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _clear(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _clear(s)
|
||||
|
||||
|
||||
async def get_project_ci_result_json(project_id: int, session: Optional[AsyncSession] = None) -> Optional[dict]:
|
||||
"""Get the submitted CI result JSON for a project"""
|
||||
async def _get(s: AsyncSession) -> Optional[dict]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
return project.ci_result_json
|
||||
return None
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def set_project_ci_result_json(project_id: int, ci_result_json: dict, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Set the CI result JSON for a project (called by MCP submit_ci_result)"""
|
||||
async def _set(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.ci_result_json = ci_result_json
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _set(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _set(s)
|
||||
|
||||
|
||||
async def get_project_upload_status_json(project_id: int, session: Optional[AsyncSession] = None) -> Optional[dict]:
|
||||
"""Get the submitted upload status JSON for a project"""
|
||||
async def _get(s: AsyncSession) -> Optional[dict]:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
return project.upload_status_json
|
||||
return None
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
|
||||
|
||||
async def set_project_upload_status_json(project_id: int, upload_status_json: dict, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Set the upload status JSON for a project (called by MCP submit_upload_status)"""
|
||||
async def _set(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.upload_status_json = upload_status_json
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _set(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _set(s)
|
||||
|
||||
|
||||
async def clear_project_ci_state(project_id: int, session: Optional[AsyncSession] = None) -> bool:
|
||||
"""Clear CI result and upload status for a new CI iteration"""
|
||||
async def _clear(s: AsyncSession) -> bool:
|
||||
result = await s.execute(select(Project).where(Project.id == project_id))
|
||||
project = result.scalar_one_or_none()
|
||||
if project:
|
||||
project.ci_result_json = None
|
||||
project.upload_status_json = None
|
||||
return True
|
||||
return False
|
||||
|
||||
if session:
|
||||
return await _clear(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _clear(s)
|
||||
|
||||
|
||||
async def get_stats(session: Optional[AsyncSession] = None) -> dict:
|
||||
"""Get database statistics"""
|
||||
async def _get(s: AsyncSession) -> dict:
|
||||
ideas_count = await s.execute(select(func.count(Idea.id)))
|
||||
projects_count = await s.execute(select(func.count(Project.id)))
|
||||
completed_count = await s.execute(
|
||||
select(func.count(Project.id)).where(Project.status == ProjectStatus.COMPLETED.value)
|
||||
)
|
||||
|
||||
return {
|
||||
"total_ideas": ideas_count.scalar() or 0,
|
||||
"total_projects": projects_count.scalar() or 0,
|
||||
"completed_projects": completed_count.scalar() or 0
|
||||
}
|
||||
|
||||
if session:
|
||||
return await _get(session)
|
||||
else:
|
||||
async with get_db() as s:
|
||||
return await _get(s)
|
||||
93
database/models.py
Normal file
93
database/models.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""
|
||||
7000%AUTO Database Models
|
||||
SQLAlchemy ORM models for projects, ideas, and logs
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import String, Text, ForeignKey, JSON, Boolean, Integer, DateTime
|
||||
from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column
|
||||
|
||||
|
||||
class Base(DeclarativeBase):
|
||||
"""Base class for all models"""
|
||||
pass
|
||||
|
||||
|
||||
class IdeaSource(str, Enum):
|
||||
"""Sources for project ideas"""
|
||||
ARXIV = "arxiv"
|
||||
REDDIT = "reddit"
|
||||
X = "x"
|
||||
HN = "hn"
|
||||
PH = "ph"
|
||||
SYSTEM = "system"
|
||||
|
||||
|
||||
class ProjectStatus(str, Enum):
|
||||
"""Project workflow status"""
|
||||
IDEATION = "ideation"
|
||||
PLANNING = "planning"
|
||||
DEVELOPMENT = "development"
|
||||
TESTING = "testing"
|
||||
UPLOADING = "uploading"
|
||||
PROMOTING = "promoting"
|
||||
COMPLETED = "completed"
|
||||
FAILED = "failed"
|
||||
|
||||
|
||||
class LogType(str, Enum):
|
||||
"""Types of agent logs"""
|
||||
INFO = "info"
|
||||
ERROR = "error"
|
||||
OUTPUT = "output"
|
||||
DEBUG = "debug"
|
||||
|
||||
|
||||
class Idea(Base):
|
||||
"""Generated project ideas"""
|
||||
__tablename__ = "ideas"
|
||||
|
||||
id: Mapped[int] = mapped_column(primary_key=True)
|
||||
title: Mapped[str] = mapped_column(String(200))
|
||||
description: Mapped[str] = mapped_column(Text)
|
||||
source: Mapped[str] = mapped_column(String(20)) # arxiv, reddit, x, hn, ph
|
||||
used: Mapped[bool] = mapped_column(default=False)
|
||||
created_at: Mapped[datetime] = mapped_column(default=datetime.utcnow)
|
||||
|
||||
|
||||
class Project(Base):
|
||||
"""Projects being developed"""
|
||||
__tablename__ = "projects"
|
||||
|
||||
id: Mapped[int] = mapped_column(primary_key=True)
|
||||
idea_id: Mapped[int] = mapped_column(ForeignKey("ideas.id"))
|
||||
name: Mapped[str] = mapped_column(String(200))
|
||||
status: Mapped[str] = mapped_column(String(20), default=ProjectStatus.IDEATION.value)
|
||||
idea_json: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True) # Submitted idea data from MCP
|
||||
plan_json: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True)
|
||||
test_result_json: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True) # Submitted test result from Tester MCP
|
||||
implementation_status_json: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True) # Submitted status from Developer MCP
|
||||
gitea_url: Mapped[Optional[str]] = mapped_column(String(500), nullable=True)
|
||||
x_post_url: Mapped[Optional[str]] = mapped_column(String(500), nullable=True)
|
||||
ci_result_json: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True) # CI/CD result from Tester
|
||||
upload_status_json: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True) # Upload status from Uploader
|
||||
ci_test_iterations: Mapped[int] = mapped_column(default=0) # Uploader-Tester-Developer CI loop iterations
|
||||
dev_test_iterations: Mapped[int] = mapped_column(default=0)
|
||||
current_agent: Mapped[Optional[str]] = mapped_column(String(50), nullable=True)
|
||||
created_at: Mapped[datetime] = mapped_column(default=datetime.utcnow)
|
||||
completed_at: Mapped[Optional[datetime]] = mapped_column(nullable=True)
|
||||
|
||||
|
||||
class AgentLog(Base):
|
||||
"""Logs from agent activities"""
|
||||
__tablename__ = "agent_logs"
|
||||
|
||||
id: Mapped[int] = mapped_column(primary_key=True)
|
||||
project_id: Mapped[int] = mapped_column(ForeignKey("projects.id"))
|
||||
agent_name: Mapped[str] = mapped_column(String(50))
|
||||
message: Mapped[str] = mapped_column(Text)
|
||||
log_type: Mapped[str] = mapped_column(String(20), default=LogType.INFO.value)
|
||||
created_at: Mapped[datetime] = mapped_column(default=datetime.utcnow)
|
||||
17
envschema/__init__.py
Normal file
17
envschema/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""EnvSchema - Environment variable schema validation tool."""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
from envschema.schema import Schema, EnvVar, EnvVarType
|
||||
from envschema.core import ValidationEngine, ValidationResult, ValidationError
|
||||
from envschema.loader import EnvLoader
|
||||
|
||||
__all__ = [
|
||||
"Schema",
|
||||
"EnvVar",
|
||||
"EnvVarType",
|
||||
"ValidationEngine",
|
||||
"ValidationResult",
|
||||
"ValidationError",
|
||||
"EnvLoader",
|
||||
]
|
||||
151
envschema/cli.py
Normal file
151
envschema/cli.py
Normal file
@@ -0,0 +1,151 @@
|
||||
"""CLI interface for EnvSchema."""
|
||||
|
||||
import sys
|
||||
|
||||
import click
|
||||
|
||||
from envschema import __version__
|
||||
from envschema.schema import load_schema_from_file
|
||||
from envschema.core import validate_environment
|
||||
from envschema.generator import generate_env_example_to_file
|
||||
from envschema.formatters import format_result
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version=__version__)
|
||||
def cli():
|
||||
"""EnvSchema - Validate environment variables against a schema."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("schema", type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--file",
|
||||
"-f",
|
||||
"env_file",
|
||||
type=click.Path(),
|
||||
help="Path to .env file to validate",
|
||||
)
|
||||
@click.option(
|
||||
"--env/--no-env",
|
||||
default=True,
|
||||
help="Include os.environ in validation",
|
||||
)
|
||||
@click.option(
|
||||
"--format",
|
||||
"-o",
|
||||
"output_format",
|
||||
type=click.Choice(["text", "json"]),
|
||||
default="text",
|
||||
help="Output format",
|
||||
)
|
||||
@click.option(
|
||||
"--ci",
|
||||
is_flag=True,
|
||||
help="CI mode (cleaner output)",
|
||||
)
|
||||
@click.option(
|
||||
"--strict",
|
||||
is_flag=True,
|
||||
help="Fail on warnings",
|
||||
)
|
||||
def validate(schema, env_file, env, output_format, ci, strict):
|
||||
"""Validate environment variables against a schema.
|
||||
|
||||
SCHEMA is the path to the schema file (JSON or YAML).
|
||||
"""
|
||||
try:
|
||||
result = validate_environment(
|
||||
schema_path=schema,
|
||||
env_file=env_file,
|
||||
use_environment=env,
|
||||
)
|
||||
|
||||
output = format_result(
|
||||
result,
|
||||
output_format=output_format,
|
||||
color=not ci,
|
||||
ci_mode=ci,
|
||||
)
|
||||
click.echo(output)
|
||||
|
||||
if not result.is_valid:
|
||||
sys.exit(1)
|
||||
|
||||
if strict and result.warnings:
|
||||
click.echo("Warnings found in strict mode", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except FileNotFoundError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("schema", type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output",
|
||||
"-o",
|
||||
"output_file",
|
||||
type=click.Path(),
|
||||
help="Output file path (default: .env.example)",
|
||||
)
|
||||
@click.option(
|
||||
"--no-comments",
|
||||
is_flag=True,
|
||||
help="Don't include description comments",
|
||||
)
|
||||
def generate(schema, output_file, no_comments):
|
||||
"""Generate .env.example from a schema.
|
||||
|
||||
SCHEMA is the path to the schema file (JSON or YAML).
|
||||
"""
|
||||
try:
|
||||
schema_obj = load_schema_from_file(schema)
|
||||
|
||||
output_path = output_file or ".env.example"
|
||||
|
||||
generate_env_example_to_file(
|
||||
schema_obj,
|
||||
output_path,
|
||||
include_descriptions=not no_comments,
|
||||
)
|
||||
|
||||
click.echo(f"Generated {output_path}")
|
||||
|
||||
except FileNotFoundError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("schema", type=click.Path(exists=True))
|
||||
def check(schema):
|
||||
"""Check if a schema file is valid.
|
||||
|
||||
SCHEMA is the path to the schema file (JSON or YAML).
|
||||
"""
|
||||
try:
|
||||
schema_obj = load_schema_from_file(schema)
|
||||
click.echo(f"Schema is valid (version: {schema_obj.version})")
|
||||
click.echo(f"Found {len(schema_obj.envvars)} environment variables")
|
||||
sys.exit(0)
|
||||
except FileNotFoundError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
162
envschema/core.py
Normal file
162
envschema/core.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Core validation engine for environment variables."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
from envschema.schema import Schema
|
||||
from envschema.loader import EnvLoader
|
||||
from envschema.validators import validate_value
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationError:
|
||||
"""Represents a validation error for a specific variable."""
|
||||
|
||||
var_name: str
|
||||
error_type: str
|
||||
message: str
|
||||
value: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON output."""
|
||||
return {
|
||||
"var_name": self.var_name,
|
||||
"error_type": self.error_type,
|
||||
"message": self.message,
|
||||
"value": self.value,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of schema validation."""
|
||||
|
||||
is_valid: bool
|
||||
missing_required: list[str] = field(default_factory=list)
|
||||
type_errors: list[ValidationError] = field(default_factory=list)
|
||||
pattern_errors: list[ValidationError] = field(default_factory=list)
|
||||
warnings: list[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON output."""
|
||||
return {
|
||||
"is_valid": self.is_valid,
|
||||
"missing_required": self.missing_required,
|
||||
"type_errors": [e.to_dict() for e in self.type_errors],
|
||||
"pattern_errors": [e.to_dict() for e in self.pattern_errors],
|
||||
"warnings": self.warnings,
|
||||
}
|
||||
|
||||
|
||||
class ValidationEngine:
|
||||
"""Engine for validating environment variables against a schema."""
|
||||
|
||||
def __init__(self, schema: Schema):
|
||||
"""Initialize the validation engine.
|
||||
|
||||
Args:
|
||||
schema: The schema to validate against.
|
||||
"""
|
||||
self.schema = schema
|
||||
|
||||
def validate(self, env_vars: dict[str, str]) -> ValidationResult:
|
||||
"""Validate environment variables against the schema.
|
||||
|
||||
Args:
|
||||
env_vars: Dictionary of environment variable names to values.
|
||||
|
||||
Returns:
|
||||
ValidationResult with all errors and warnings.
|
||||
"""
|
||||
result = ValidationResult(is_valid=True)
|
||||
|
||||
self._check_required_vars(env_vars, result)
|
||||
self._validate_types(env_vars, result)
|
||||
self._check_extra_vars(env_vars, result)
|
||||
|
||||
result.is_valid = (
|
||||
len(result.missing_required) == 0
|
||||
and len(result.type_errors) == 0
|
||||
and len(result.pattern_errors) == 0
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _check_required_vars(self, env_vars: dict[str, str], result: ValidationResult) -> None:
|
||||
"""Check for missing required variables.
|
||||
|
||||
Args:
|
||||
env_vars: Environment variables.
|
||||
result: Validation result to update.
|
||||
"""
|
||||
required_vars = self.schema.get_required_vars()
|
||||
env_keys_upper = {k.upper() for k in env_vars.keys()}
|
||||
|
||||
for var in required_vars:
|
||||
if var.name.upper() not in env_keys_upper:
|
||||
result.missing_required.append(var.name)
|
||||
result.is_valid = False
|
||||
|
||||
def _validate_types(self, env_vars: dict[str, str], result: ValidationResult) -> None:
|
||||
"""Validate types of environment variables.
|
||||
|
||||
Args:
|
||||
env_vars: Environment variables.
|
||||
result: Validation result to update.
|
||||
"""
|
||||
env_vars_upper = {k.upper(): v for k, v in env_vars.items()}
|
||||
for var in self.schema.envvars:
|
||||
value = env_vars_upper.get(var.name.upper())
|
||||
|
||||
if value is None and var.default is not None:
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
is_valid, error = validate_value(value, var.type, var.pattern)
|
||||
if not is_valid and error:
|
||||
result.type_errors.append(
|
||||
ValidationError(
|
||||
var_name=var.name,
|
||||
error_type="type_mismatch",
|
||||
message=error.message,
|
||||
value=error.value,
|
||||
)
|
||||
)
|
||||
result.is_valid = False
|
||||
|
||||
def _check_extra_vars(self, env_vars: dict[str, str], result: ValidationResult) -> None:
|
||||
"""Check for extra variables not in schema (warning only).
|
||||
|
||||
Args:
|
||||
env_vars: Environment variables.
|
||||
result: Validation result to update.
|
||||
"""
|
||||
schema_keys_upper = {v.name.upper() for v in self.schema.envvars}
|
||||
for key in env_vars.keys():
|
||||
if key.upper() not in schema_keys_upper:
|
||||
result.warnings.append(f"Unknown environment variable: {key}")
|
||||
|
||||
|
||||
def validate_environment(
|
||||
schema_path: str,
|
||||
env_file: Optional[str] = None,
|
||||
use_environment: bool = True,
|
||||
) -> ValidationResult:
|
||||
"""Convenience function to validate environment against a schema file.
|
||||
|
||||
Args:
|
||||
schema_path: Path to the schema file (JSON or YAML).
|
||||
env_file: Optional path to .env file.
|
||||
use_environment: Whether to include os.environ.
|
||||
|
||||
Returns:
|
||||
ValidationResult with validation status.
|
||||
"""
|
||||
from envschema.schema import load_schema_from_file
|
||||
|
||||
schema = load_schema_from_file(schema_path)
|
||||
loader = EnvLoader(file_path=env_file, use_environment=use_environment)
|
||||
env_vars = loader.load()
|
||||
|
||||
engine = ValidationEngine(schema)
|
||||
return engine.validate(env_vars)
|
||||
132
envschema/formatters.py
Normal file
132
envschema/formatters.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""Output formatters for validation results."""
|
||||
|
||||
import json
|
||||
|
||||
from envschema.core import ValidationResult
|
||||
|
||||
|
||||
class TextFormatter:
|
||||
"""Formatter for human-readable text output."""
|
||||
|
||||
RED = "\033[91m"
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
RESET = "\033[0m"
|
||||
BOLD = "\033[1m"
|
||||
|
||||
@staticmethod
|
||||
def format(result: ValidationResult, color: bool = True, ci_mode: bool = False) -> str:
|
||||
"""Format validation result as text.
|
||||
|
||||
Args:
|
||||
result: Validation result to format.
|
||||
color: Whether to use ANSI colors.
|
||||
ci_mode: CI mode (cleaner output without special chars).
|
||||
|
||||
Returns:
|
||||
Formatted text.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
if ci_mode:
|
||||
return TextFormatter._format_ci(result)
|
||||
|
||||
if result.is_valid:
|
||||
lines.append(f"{TextFormatter._color(TextFormatter.GREEN, '✓', color)} Validation passed")
|
||||
if result.warnings:
|
||||
lines.append("")
|
||||
lines.append("Warnings:")
|
||||
for warning in result.warnings:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.YELLOW, '⚠', color)} {warning}")
|
||||
else:
|
||||
lines.append(f"{TextFormatter._color(TextFormatter.RED, '✗', color)} Validation failed")
|
||||
lines.append("")
|
||||
|
||||
if result.missing_required:
|
||||
lines.append("Missing required variables:")
|
||||
for var in result.missing_required:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.RED, '✗', color)} {var}")
|
||||
lines.append("")
|
||||
|
||||
if result.type_errors:
|
||||
lines.append("Type errors:")
|
||||
for error in result.type_errors:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.RED, '✗', color)} {error.var_name}: {error.message}")
|
||||
lines.append("")
|
||||
|
||||
if result.pattern_errors:
|
||||
lines.append("Pattern errors:")
|
||||
for error in result.pattern_errors:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.RED, '✗', color)} {error.var_name}: {error.message}")
|
||||
lines.append("")
|
||||
|
||||
if result.warnings:
|
||||
lines.append("Warnings:")
|
||||
for warning in result.warnings:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.YELLOW, '⚠', color)} {warning}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@staticmethod
|
||||
def _format_ci(result: ValidationResult) -> str:
|
||||
"""Format result for CI mode."""
|
||||
lines = []
|
||||
|
||||
if result.is_valid:
|
||||
lines.append("Validation passed")
|
||||
else:
|
||||
lines.append("Validation failed")
|
||||
|
||||
if result.missing_required:
|
||||
lines.append("Missing required variables: " + ", ".join(result.missing_required))
|
||||
|
||||
if result.type_errors:
|
||||
for error in result.type_errors:
|
||||
lines.append(f"{error.var_name}: {error.message}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@staticmethod
|
||||
def _color(color_code: str, text: str, use_color: bool) -> str:
|
||||
"""Apply color to text."""
|
||||
if use_color:
|
||||
return f"{color_code}{text}{TextFormatter.RESET}"
|
||||
return text
|
||||
|
||||
|
||||
class JsonFormatter:
|
||||
"""Formatter for JSON output."""
|
||||
|
||||
@staticmethod
|
||||
def format(result: ValidationResult) -> str:
|
||||
"""Format validation result as JSON.
|
||||
|
||||
Args:
|
||||
result: Validation result to format.
|
||||
|
||||
Returns:
|
||||
JSON string.
|
||||
"""
|
||||
return json.dumps(result.to_dict(), indent=2)
|
||||
|
||||
|
||||
def format_result(
|
||||
result: ValidationResult,
|
||||
output_format: str = "text",
|
||||
color: bool = True,
|
||||
ci_mode: bool = False,
|
||||
) -> str:
|
||||
"""Format a validation result.
|
||||
|
||||
Args:
|
||||
result: Validation result to format.
|
||||
output_format: Output format ('text' or 'json').
|
||||
color: Whether to use colors (text format only).
|
||||
ci_mode: CI mode (text format only).
|
||||
|
||||
Returns:
|
||||
Formatted output string.
|
||||
"""
|
||||
if output_format == "json":
|
||||
return JsonFormatter.format(result)
|
||||
return TextFormatter.format(result, color=color, ci_mode=ci_mode)
|
||||
54
envschema/generator.py
Normal file
54
envschema/generator.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""Generator for .env.example files from schema definitions."""
|
||||
|
||||
from envschema.schema import Schema
|
||||
|
||||
|
||||
def generate_env_example(schema: Schema, include_descriptions: bool = True) -> str:
|
||||
"""Generate an .env.example file content from a schema.
|
||||
|
||||
Args:
|
||||
schema: The schema to generate from.
|
||||
include_descriptions: Whether to include description comments.
|
||||
|
||||
Returns:
|
||||
Formatted .env.example content.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
lines.append("# Environment Variables Schema")
|
||||
if schema.version:
|
||||
lines.append(f"# Version: {schema.version}")
|
||||
lines.append("")
|
||||
|
||||
for var in schema.envvars:
|
||||
if include_descriptions and var.description:
|
||||
lines.append(f"# {var.description}")
|
||||
|
||||
if var.required:
|
||||
lines.append("# REQUIRED")
|
||||
elif var.default is not None:
|
||||
lines.append(f"# Default: {var.default}")
|
||||
|
||||
if var.required:
|
||||
lines.append(f"{var.name}=")
|
||||
elif var.default is not None:
|
||||
lines.append(f"{var.name}={var.default}")
|
||||
else:
|
||||
lines.append(f"{var.name}=")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def generate_env_example_to_file(schema: Schema, output_path: str, include_descriptions: bool = True) -> None:
|
||||
"""Generate and write an .env.example file.
|
||||
|
||||
Args:
|
||||
schema: The schema to generate from.
|
||||
output_path: Path to write the .env.example file.
|
||||
include_descriptions: Whether to include description comments.
|
||||
"""
|
||||
content = generate_env_example(schema, include_descriptions)
|
||||
with open(output_path, "w") as f:
|
||||
f.write(content)
|
||||
91
envschema/loader.py
Normal file
91
envschema/loader.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Environment variable loader for .env files and os.environ."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from dotenv import dotenv_values
|
||||
|
||||
|
||||
class EnvLoader:
|
||||
"""Load environment variables from .env files or os.environ."""
|
||||
|
||||
def __init__(self, file_path: Optional[str] = None, use_environment: bool = True):
|
||||
"""Initialize the loader.
|
||||
|
||||
Args:
|
||||
file_path: Path to .env file. If None, only uses os.environ.
|
||||
use_environment: Whether to include os.environ as fallback.
|
||||
"""
|
||||
self.file_path = file_path
|
||||
self.use_environment = use_environment
|
||||
self._env_vars: dict[str, str] = {}
|
||||
|
||||
def load(self) -> dict[str, str]:
|
||||
"""Load environment variables.
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variable names to values.
|
||||
"""
|
||||
self._env_vars = {}
|
||||
|
||||
if self.file_path:
|
||||
path = Path(self.file_path)
|
||||
if path.exists():
|
||||
file_values = dotenv_values(str(path))
|
||||
for key, value in file_values.items():
|
||||
if value is not None:
|
||||
self._env_vars[key] = value
|
||||
|
||||
if self.use_environment:
|
||||
for key, value in os.environ.items():
|
||||
if key not in self._env_vars:
|
||||
self._env_vars[key] = value
|
||||
|
||||
return self._env_vars
|
||||
|
||||
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
|
||||
"""Get an environment variable value.
|
||||
|
||||
Args:
|
||||
key: Variable name.
|
||||
default: Default value if not found.
|
||||
|
||||
Returns:
|
||||
Variable value or default.
|
||||
"""
|
||||
if not self._env_vars:
|
||||
self.load()
|
||||
return self._env_vars.get(key, default)
|
||||
|
||||
def get_raw(self) -> dict[str, str]:
|
||||
"""Get all loaded variables as a raw dictionary.
|
||||
|
||||
Returns:
|
||||
Dictionary of all loaded environment variables.
|
||||
"""
|
||||
if not self._env_vars:
|
||||
self.load()
|
||||
return self._env_vars.copy()
|
||||
|
||||
def set(self, key: str, value: str) -> None:
|
||||
"""Set an environment variable value in memory.
|
||||
|
||||
Args:
|
||||
key: Variable name.
|
||||
value: Variable value.
|
||||
"""
|
||||
self._env_vars[key] = value
|
||||
|
||||
|
||||
def load_env_file(file_path: str) -> dict[str, str]:
|
||||
"""Convenience function to load environment from a file.
|
||||
|
||||
Args:
|
||||
file_path: Path to .env file.
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables.
|
||||
"""
|
||||
loader = EnvLoader(file_path=file_path, use_environment=False)
|
||||
return loader.load()
|
||||
110
envschema/schema.py
Normal file
110
envschema/schema.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Schema models for environment variable definitions."""
|
||||
|
||||
import json
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class EnvVarType(str, Enum):
|
||||
"""Supported environment variable types."""
|
||||
|
||||
STRING = "str"
|
||||
INTEGER = "int"
|
||||
BOOLEAN = "bool"
|
||||
LIST = "list"
|
||||
|
||||
|
||||
class EnvVar(BaseModel):
|
||||
"""Definition of a single environment variable."""
|
||||
|
||||
name: str = Field(..., description="Variable name (e.g., DATABASE_URL)")
|
||||
type: EnvVarType = Field(default=EnvVarType.STRING, description="Variable type")
|
||||
required: bool = Field(default=False, description="Whether variable is required")
|
||||
default: Optional[str] = Field(default=None, description="Default value if optional")
|
||||
description: Optional[str] = Field(default=None, description="Variable description")
|
||||
pattern: Optional[str] = Field(default=None, description="Regex pattern for validation")
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def name_must_be_valid_env_var(cls, v: str) -> str:
|
||||
if not v.replace("_", "").replace("-", "").isalnum():
|
||||
raise ValueError("Variable name must contain only alphanumeric characters, underscores, and hyphens")
|
||||
return v.upper()
|
||||
|
||||
|
||||
class Schema(BaseModel):
|
||||
"""Schema containing all environment variable definitions."""
|
||||
|
||||
version: Optional[str] = Field(default="1.0", description="Schema version")
|
||||
envvars: list[EnvVar] = Field(default_factory=list, alias="envVars")
|
||||
|
||||
model_config = {"populate_by_name": True}
|
||||
|
||||
def get_var(self, name: str) -> Optional[EnvVar]:
|
||||
"""Get an environment variable by name."""
|
||||
name_upper = name.upper()
|
||||
for var in self.envvars:
|
||||
if var.name.upper() == name_upper:
|
||||
return var
|
||||
return None
|
||||
|
||||
def get_required_vars(self) -> list[EnvVar]:
|
||||
"""Get all required environment variables."""
|
||||
return [var for var in self.envvars if var.required]
|
||||
|
||||
|
||||
def load_schema_from_file(file_path: str) -> Schema:
|
||||
"""Load schema from a JSON or YAML file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the schema file
|
||||
|
||||
Returns:
|
||||
Parsed Schema object
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If schema file doesn't exist
|
||||
ValueError: If schema format is invalid
|
||||
"""
|
||||
path = Path(file_path)
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Schema file not found: {file_path}")
|
||||
|
||||
content = path.read_text()
|
||||
|
||||
if path.suffix.lower() in [".yaml", ".yml"]:
|
||||
return load_yaml_schema(content)
|
||||
elif path.suffix.lower() == ".json":
|
||||
return load_json_schema(content)
|
||||
else:
|
||||
raise ValueError(f"Unsupported schema format: {path.suffix}. Use .json or .yaml")
|
||||
|
||||
|
||||
def load_json_schema(content: str) -> Schema:
|
||||
"""Load schema from JSON content."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON schema: {e}")
|
||||
|
||||
try:
|
||||
return Schema.model_validate(data)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid schema structure: {e}")
|
||||
|
||||
|
||||
def load_yaml_schema(content: str) -> Schema:
|
||||
"""Load schema from YAML content."""
|
||||
try:
|
||||
data = yaml.safe_load(content)
|
||||
except yaml.YAMLError as e:
|
||||
raise ValueError(f"Invalid YAML schema: {e}")
|
||||
|
||||
try:
|
||||
return Schema.model_validate(data)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid schema structure: {e}")
|
||||
153
envschema/validators.py
Normal file
153
envschema/validators.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""Type validators for environment variable values."""
|
||||
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from envschema.schema import EnvVarType
|
||||
|
||||
|
||||
class ValidationError:
|
||||
"""Represents a validation error."""
|
||||
|
||||
def __init__(self, message: str, value: Optional[str] = None):
|
||||
self.message = message
|
||||
self.value = value
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.value is not None:
|
||||
return f"{self.message} (got: {self.value!r})"
|
||||
return self.message
|
||||
|
||||
|
||||
class StringValidator:
|
||||
"""Validator for string type - always passes."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
return True, None
|
||||
|
||||
|
||||
class IntegerValidator:
|
||||
"""Validator for integer type."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
try:
|
||||
int(value)
|
||||
return True, None
|
||||
except ValueError:
|
||||
return False, ValidationError(
|
||||
"Invalid integer value",
|
||||
value=value
|
||||
)
|
||||
|
||||
|
||||
class BooleanValidator:
|
||||
"""Validator for boolean type."""
|
||||
|
||||
TRUE_VALUES = {"true", "1", "yes", "on"}
|
||||
FALSE_VALUES = {"false", "0", "no", "off"}
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
value_lower = value.lower().strip()
|
||||
if value_lower in BooleanValidator.TRUE_VALUES:
|
||||
return True, None
|
||||
if value_lower in BooleanValidator.FALSE_VALUES:
|
||||
return True, None
|
||||
return False, ValidationError(
|
||||
"Invalid boolean value (expected: true, false, 1, 0, yes, no, on, off)",
|
||||
value=value
|
||||
)
|
||||
|
||||
|
||||
class ListValidator:
|
||||
"""Validator for list type (comma-separated values)."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
if "," in value:
|
||||
return True, None
|
||||
return False, ValidationError(
|
||||
"Invalid list value (expected comma-separated values)",
|
||||
value=value
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def parse(value: str) -> list[str]:
|
||||
"""Parse a comma-separated string into a list.
|
||||
|
||||
Args:
|
||||
value: Comma-separated string.
|
||||
|
||||
Returns:
|
||||
List of values.
|
||||
"""
|
||||
return [item.strip() for item in value.split(",") if item.strip()]
|
||||
|
||||
|
||||
class PatternValidator:
|
||||
"""Validator for pattern/regex validation."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str], pattern: str) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
try:
|
||||
if re.match(pattern, value):
|
||||
return True, None
|
||||
return False, ValidationError(
|
||||
f"Value does not match pattern: {pattern}",
|
||||
value=value
|
||||
)
|
||||
except re.error:
|
||||
return False, ValidationError(
|
||||
f"Invalid regex pattern: {pattern}",
|
||||
value=value
|
||||
)
|
||||
|
||||
|
||||
def get_validator(var_type: EnvVarType):
|
||||
"""Get the validator class for a given type.
|
||||
|
||||
Args:
|
||||
var_type: The environment variable type.
|
||||
|
||||
Returns:
|
||||
Validator class.
|
||||
"""
|
||||
validators = {
|
||||
EnvVarType.STRING: StringValidator,
|
||||
EnvVarType.INTEGER: IntegerValidator,
|
||||
EnvVarType.BOOLEAN: BooleanValidator,
|
||||
EnvVarType.LIST: ListValidator,
|
||||
}
|
||||
return validators.get(var_type, StringValidator)
|
||||
|
||||
|
||||
def validate_value(value: Optional[str], var_type: EnvVarType, pattern: Optional[str] = None) -> tuple[bool, Optional[ValidationError]]:
|
||||
"""Validate a value against a type and optional pattern.
|
||||
|
||||
Args:
|
||||
value: The value to validate.
|
||||
var_type: The expected type.
|
||||
pattern: Optional regex pattern.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error).
|
||||
"""
|
||||
validator = get_validator(var_type)
|
||||
is_valid, error = validator.validate(value)
|
||||
|
||||
if is_valid and pattern and value is not None:
|
||||
is_valid, error = PatternValidator.validate(value, pattern)
|
||||
|
||||
return is_valid, error
|
||||
45
envschema_repo/.env.schema.json
Normal file
45
envschema_repo/.env.schema.json
Normal file
@@ -0,0 +1,45 @@
|
||||
{
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{
|
||||
"name": "DATABASE_URL",
|
||||
"type": "str",
|
||||
"required": true,
|
||||
"description": "PostgreSQL connection string"
|
||||
},
|
||||
{
|
||||
"name": "DATABASE_POOL_SIZE",
|
||||
"type": "int",
|
||||
"required": false,
|
||||
"default": "10",
|
||||
"description": "Database connection pool size"
|
||||
},
|
||||
{
|
||||
"name": "DEBUG_MODE",
|
||||
"type": "bool",
|
||||
"required": false,
|
||||
"default": "false",
|
||||
"description": "Enable debug mode"
|
||||
},
|
||||
{
|
||||
"name": "ALLOWED_HOSTS",
|
||||
"type": "list",
|
||||
"required": false,
|
||||
"description": "Comma-separated list of allowed hosts"
|
||||
},
|
||||
{
|
||||
"name": "API_KEY",
|
||||
"type": "str",
|
||||
"required": true,
|
||||
"pattern": "^[a-zA-Z0-9_-]+$",
|
||||
"description": "API authentication key"
|
||||
},
|
||||
{
|
||||
"name": "LOG_LEVEL",
|
||||
"type": "str",
|
||||
"required": false,
|
||||
"default": "INFO",
|
||||
"description": "Logging level (DEBUG, INFO, WARNING, ERROR)"
|
||||
}
|
||||
]
|
||||
}
|
||||
38
envschema_repo/.gitea/workflows/ci.yml
Normal file
38
envschema_repo/.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,38 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
pip install -e ".[dev]"
|
||||
|
||||
- name: Run unit tests
|
||||
run: pytest tests/unit/ -v
|
||||
|
||||
- name: Run integration tests
|
||||
run: pytest tests/integration/ -v
|
||||
|
||||
- name: Check code formatting
|
||||
run: |
|
||||
pip install ruff
|
||||
ruff check envschema/
|
||||
|
||||
- name: Upload coverage
|
||||
run: |
|
||||
pip install pytest-cov
|
||||
pytest --cov=envschema --cov-report=term-missing
|
||||
79
envschema_repo/.gitignore
vendored
Normal file
79
envschema_repo/.gitignore
vendored
Normal file
@@ -0,0 +1,79 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
|
||||
# PyInstaller
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Ruff
|
||||
.ruff_cache/
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
21
envschema_repo/LICENSE
Normal file
21
envschema_repo/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 EnvSchema Team
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
201
envschema_repo/README.md
Normal file
201
envschema_repo/README.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# EnvSchema
|
||||
|
||||
[](https://7000pct.gitea.bloupla.net/7000pctAUTO/envschema/actions)
|
||||
[](https://pypi.org/project/envschema/)
|
||||
[](https://pypi.org/project/envschema/)
|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
A CLI tool that validates environment variables against a JSON/YAML schema file. Developers define expected env vars with types, defaults, required flags, and descriptions in a schema. The tool validates actual .env files or runtime environment against this schema, catching type mismatches, missing required vars, and providing helpful error messages.
|
||||
|
||||
## Features
|
||||
|
||||
- Schema validation with type checking (str, int, bool, list)
|
||||
- Missing required variable detection
|
||||
- `.env.example` generation from schema
|
||||
- CI/CD integration for pre-deployment checks
|
||||
- Support for JSON and YAML schema formats
|
||||
- Pattern validation with regex support
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
pip install envschema
|
||||
```
|
||||
|
||||
Or install from source:
|
||||
|
||||
```bash
|
||||
git clone https://7000pct.gitea.bloupla.net/7000pctAUTO/envschema.git
|
||||
cd envschema
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Create a schema file (`.env.schema.json` or `.env.schema.yaml`):
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{
|
||||
"name": "DATABASE_URL",
|
||||
"type": "str",
|
||||
"required": true,
|
||||
"description": "PostgreSQL connection string"
|
||||
},
|
||||
{
|
||||
"name": "DEBUG_MODE",
|
||||
"type": "bool",
|
||||
"required": false,
|
||||
"default": "false"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
2. Validate your `.env` file:
|
||||
|
||||
```bash
|
||||
envschema validate .env.schema.json --file .env
|
||||
```
|
||||
|
||||
## CLI Commands
|
||||
|
||||
### validate
|
||||
|
||||
Validate environment variables against a schema:
|
||||
|
||||
```bash
|
||||
envschema validate SCHEMA [--file PATH] [--env/--no-env] [--format text|json] [--ci] [--strict]
|
||||
```
|
||||
|
||||
Options:
|
||||
- `SCHEMA`: Path to the schema file (JSON or YAML)
|
||||
- `--file`, `-f`: Path to .env file to validate
|
||||
- `--env/--no-env`: Include os.environ in validation (default: true)
|
||||
- `--format`, `-o`: Output format (text or json, default: text)
|
||||
- `--ci`: CI mode (cleaner output)
|
||||
- `--strict`: Fail on warnings
|
||||
|
||||
### generate
|
||||
|
||||
Generate `.env.example` from a schema:
|
||||
|
||||
```bash
|
||||
envschema generate SCHEMA [--output PATH] [--no-comments]
|
||||
```
|
||||
|
||||
Options:
|
||||
- `SCHEMA`: Path to the schema file
|
||||
- `--output`, `-o`: Output file path (default: .env.example)
|
||||
- `--no-comments`: Don't include description comments
|
||||
|
||||
### check
|
||||
|
||||
Validate a schema file:
|
||||
|
||||
```bash
|
||||
envschema check SCHEMA
|
||||
```
|
||||
|
||||
## Schema Format
|
||||
|
||||
### JSON Schema
|
||||
|
||||
```json
|
||||
{
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{
|
||||
"name": "VAR_NAME",
|
||||
"type": "str|int|bool|list",
|
||||
"required": true|false,
|
||||
"default": "default_value",
|
||||
"description": "Variable description",
|
||||
"pattern": "regex_pattern"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### YAML Schema
|
||||
|
||||
```yaml
|
||||
version: "1.0"
|
||||
envVars:
|
||||
- name: VAR_NAME
|
||||
type: str
|
||||
required: true
|
||||
default: "value"
|
||||
description: Variable description
|
||||
pattern: "^[A-Z]+$"
|
||||
```
|
||||
|
||||
## Supported Types
|
||||
|
||||
- `str`: String (always valid)
|
||||
- `int`: Integer (validates numeric values)
|
||||
- `bool`: Boolean (true, false, 1, 0, yes, no, on, off)
|
||||
- `list`: Comma-separated list of values
|
||||
|
||||
## Examples
|
||||
|
||||
### Validate with environment variables
|
||||
|
||||
```bash
|
||||
export DATABASE_URL="postgres://localhost/mydb"
|
||||
export DEBUG_MODE="true"
|
||||
envschema validate schema.json
|
||||
```
|
||||
|
||||
### Validate with .env file
|
||||
|
||||
```bash
|
||||
envschema validate schema.json --file .env
|
||||
```
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
```bash
|
||||
envschema validate schema.json --file .env --ci --format json
|
||||
```
|
||||
|
||||
Exit codes:
|
||||
- 0: Validation passed
|
||||
- 1: Validation failed
|
||||
- 2: Error (schema not found, invalid format)
|
||||
|
||||
### Generate .env.example
|
||||
|
||||
```bash
|
||||
envschema generate schema.json
|
||||
# Generates .env.example
|
||||
|
||||
envschema generate schema.json --output .env.dev
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
```python
|
||||
from envschema import Schema, ValidationEngine, EnvLoader
|
||||
|
||||
# Load schema
|
||||
schema = Schema.load("schema.json")
|
||||
|
||||
# Load environment
|
||||
loader = EnvLoader(".env")
|
||||
env_vars = loader.load()
|
||||
|
||||
# Validate
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate(env_vars)
|
||||
|
||||
if not result.is_valid:
|
||||
print(result.missing_required)
|
||||
print(result.type_errors)
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details.
|
||||
17
envschema_repo/envschema/__init__.py
Normal file
17
envschema_repo/envschema/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""EnvSchema - Environment variable schema validation tool."""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
|
||||
from envschema.schema import Schema, EnvVar, EnvVarType
|
||||
from envschema.core import ValidationEngine, ValidationResult, ValidationError
|
||||
from envschema.loader import EnvLoader
|
||||
|
||||
__all__ = [
|
||||
"Schema",
|
||||
"EnvVar",
|
||||
"EnvVarType",
|
||||
"ValidationEngine",
|
||||
"ValidationResult",
|
||||
"ValidationError",
|
||||
"EnvLoader",
|
||||
]
|
||||
152
envschema_repo/envschema/cli.py
Normal file
152
envschema_repo/envschema/cli.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""CLI interface for EnvSchema."""
|
||||
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
import click
|
||||
|
||||
from envschema import __version__
|
||||
from envschema.schema import load_schema_from_file, Schema
|
||||
from envschema.core import validate_environment
|
||||
from envschema.generator import generate_env_example_to_file
|
||||
from envschema.formatters import format_result
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version=__version__)
|
||||
def cli():
|
||||
"""EnvSchema - Validate environment variables against a schema."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("schema", type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--file",
|
||||
"-f",
|
||||
"env_file",
|
||||
type=click.Path(),
|
||||
help="Path to .env file to validate",
|
||||
)
|
||||
@click.option(
|
||||
"--env/--no-env",
|
||||
default=True,
|
||||
help="Include os.environ in validation",
|
||||
)
|
||||
@click.option(
|
||||
"--format",
|
||||
"-o",
|
||||
"output_format",
|
||||
type=click.Choice(["text", "json"]),
|
||||
default="text",
|
||||
help="Output format",
|
||||
)
|
||||
@click.option(
|
||||
"--ci",
|
||||
is_flag=True,
|
||||
help="CI mode (cleaner output)",
|
||||
)
|
||||
@click.option(
|
||||
"--strict",
|
||||
is_flag=True,
|
||||
help="Fail on warnings",
|
||||
)
|
||||
def validate(schema, env_file, env, output_format, ci, strict):
|
||||
"""Validate environment variables against a schema.
|
||||
|
||||
SCHEMA is the path to the schema file (JSON or YAML).
|
||||
"""
|
||||
try:
|
||||
result = validate_environment(
|
||||
schema_path=schema,
|
||||
env_file=env_file,
|
||||
use_environment=env,
|
||||
)
|
||||
|
||||
output = format_result(
|
||||
result,
|
||||
output_format=output_format,
|
||||
color=not ci,
|
||||
ci_mode=ci,
|
||||
)
|
||||
click.echo(output)
|
||||
|
||||
if not result.is_valid:
|
||||
sys.exit(1)
|
||||
|
||||
if strict and result.warnings:
|
||||
click.echo("Warnings found in strict mode", err=True)
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
||||
|
||||
except FileNotFoundError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("schema", type=click.Path(exists=True))
|
||||
@click.option(
|
||||
"--output",
|
||||
"-o",
|
||||
"output_file",
|
||||
type=click.Path(),
|
||||
help="Output file path (default: .env.example)",
|
||||
)
|
||||
@click.option(
|
||||
"--no-comments",
|
||||
is_flag=True,
|
||||
help="Don't include description comments",
|
||||
)
|
||||
def generate(schema, output_file, no_comments):
|
||||
"""Generate .env.example from a schema.
|
||||
|
||||
SCHEMA is the path to the schema file (JSON or YAML).
|
||||
"""
|
||||
try:
|
||||
schema_obj = load_schema_from_file(schema)
|
||||
|
||||
output_path = output_file or ".env.example"
|
||||
|
||||
generate_env_example_to_file(
|
||||
schema_obj,
|
||||
output_path,
|
||||
include_descriptions=not no_comments,
|
||||
)
|
||||
|
||||
click.echo(f"Generated {output_path}")
|
||||
|
||||
except FileNotFoundError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("schema", type=click.Path(exists=True))
|
||||
def check(schema):
|
||||
"""Check if a schema file is valid.
|
||||
|
||||
SCHEMA is the path to the schema file (JSON or YAML).
|
||||
"""
|
||||
try:
|
||||
schema_obj = load_schema_from_file(schema)
|
||||
click.echo(f"Schema is valid (version: {schema_obj.version})")
|
||||
click.echo(f"Found {len(schema_obj.envvars)} environment variables")
|
||||
sys.exit(0)
|
||||
except FileNotFoundError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
except ValueError as e:
|
||||
click.echo(f"Error: {e}", err=True)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
162
envschema_repo/envschema/core.py
Normal file
162
envschema_repo/envschema/core.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""Core validation engine for environment variables."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Optional
|
||||
|
||||
from envschema.schema import Schema, EnvVar
|
||||
from envschema.loader import EnvLoader
|
||||
from envschema.validators import validate_value
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationError:
|
||||
"""Represents a validation error for a specific variable."""
|
||||
|
||||
var_name: str
|
||||
error_type: str
|
||||
message: str
|
||||
value: Optional[str] = None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON output."""
|
||||
return {
|
||||
"var_name": self.var_name,
|
||||
"error_type": self.error_type,
|
||||
"message": self.message,
|
||||
"value": self.value,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ValidationResult:
|
||||
"""Result of schema validation."""
|
||||
|
||||
is_valid: bool
|
||||
missing_required: list[str] = field(default_factory=list)
|
||||
type_errors: list[ValidationError] = field(default_factory=list)
|
||||
pattern_errors: list[ValidationError] = field(default_factory=list)
|
||||
warnings: list[str] = field(default_factory=list)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert to dictionary for JSON output."""
|
||||
return {
|
||||
"is_valid": self.is_valid,
|
||||
"missing_required": self.missing_required,
|
||||
"type_errors": [e.to_dict() for e in self.type_errors],
|
||||
"pattern_errors": [e.to_dict() for e in self.pattern_errors],
|
||||
"warnings": self.warnings,
|
||||
}
|
||||
|
||||
|
||||
class ValidationEngine:
|
||||
"""Engine for validating environment variables against a schema."""
|
||||
|
||||
def __init__(self, schema: Schema):
|
||||
"""Initialize the validation engine.
|
||||
|
||||
Args:
|
||||
schema: The schema to validate against.
|
||||
"""
|
||||
self.schema = schema
|
||||
|
||||
def validate(self, env_vars: dict[str, str]) -> ValidationResult:
|
||||
"""Validate environment variables against the schema.
|
||||
|
||||
Args:
|
||||
env_vars: Dictionary of environment variable names to values.
|
||||
|
||||
Returns:
|
||||
ValidationResult with all errors and warnings.
|
||||
"""
|
||||
result = ValidationResult(is_valid=True)
|
||||
|
||||
self._check_required_vars(env_vars, result)
|
||||
self._validate_types(env_vars, result)
|
||||
self._check_extra_vars(env_vars, result)
|
||||
|
||||
result.is_valid = (
|
||||
len(result.missing_required) == 0
|
||||
and len(result.type_errors) == 0
|
||||
and len(result.pattern_errors) == 0
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def _check_required_vars(self, env_vars: dict[str, str], result: ValidationResult) -> None:
|
||||
"""Check for missing required variables.
|
||||
|
||||
Args:
|
||||
env_vars: Environment variables.
|
||||
result: Validation result to update.
|
||||
"""
|
||||
required_vars = self.schema.get_required_vars()
|
||||
env_keys_upper = {k.upper() for k in env_vars.keys()}
|
||||
|
||||
for var in required_vars:
|
||||
if var.name.upper() not in env_keys_upper:
|
||||
result.missing_required.append(var.name)
|
||||
result.is_valid = False
|
||||
|
||||
def _validate_types(self, env_vars: dict[str, str], result: ValidationResult) -> None:
|
||||
"""Validate types of environment variables.
|
||||
|
||||
Args:
|
||||
env_vars: Environment variables.
|
||||
result: Validation result to update.
|
||||
"""
|
||||
env_vars_upper = {k.upper(): v for k, v in env_vars.items()}
|
||||
for var in self.schema.envvars:
|
||||
value = env_vars_upper.get(var.name.upper())
|
||||
|
||||
if value is None and var.default is not None:
|
||||
continue
|
||||
|
||||
if value is not None:
|
||||
is_valid, error = validate_value(value, var.type, var.pattern)
|
||||
if not is_valid and error:
|
||||
result.type_errors.append(
|
||||
ValidationError(
|
||||
var_name=var.name,
|
||||
error_type="type_mismatch",
|
||||
message=error.message,
|
||||
value=error.value,
|
||||
)
|
||||
)
|
||||
result.is_valid = False
|
||||
|
||||
def _check_extra_vars(self, env_vars: dict[str, str], result: ValidationResult) -> None:
|
||||
"""Check for extra variables not in schema (warning only).
|
||||
|
||||
Args:
|
||||
env_vars: Environment variables.
|
||||
result: Validation result to update.
|
||||
"""
|
||||
schema_keys_upper = {v.name.upper() for v in self.schema.envvars}
|
||||
for key in env_vars.keys():
|
||||
if key.upper() not in schema_keys_upper:
|
||||
result.warnings.append(f"Unknown environment variable: {key}")
|
||||
|
||||
|
||||
def validate_environment(
|
||||
schema_path: str,
|
||||
env_file: Optional[str] = None,
|
||||
use_environment: bool = True,
|
||||
) -> ValidationResult:
|
||||
"""Convenience function to validate environment against a schema file.
|
||||
|
||||
Args:
|
||||
schema_path: Path to the schema file (JSON or YAML).
|
||||
env_file: Optional path to .env file.
|
||||
use_environment: Whether to include os.environ.
|
||||
|
||||
Returns:
|
||||
ValidationResult with validation status.
|
||||
"""
|
||||
from envschema.schema import load_schema_from_file
|
||||
|
||||
schema = load_schema_from_file(schema_path)
|
||||
loader = EnvLoader(file_path=env_file, use_environment=use_environment)
|
||||
env_vars = loader.load()
|
||||
|
||||
engine = ValidationEngine(schema)
|
||||
return engine.validate(env_vars)
|
||||
133
envschema_repo/envschema/formatters.py
Normal file
133
envschema_repo/envschema/formatters.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Output formatters for validation results."""
|
||||
|
||||
import json
|
||||
from typing import Optional
|
||||
|
||||
from envschema.core import ValidationResult
|
||||
|
||||
|
||||
class TextFormatter:
|
||||
"""Formatter for human-readable text output."""
|
||||
|
||||
RED = "\033[91m"
|
||||
GREEN = "\033[92m"
|
||||
YELLOW = "\033[93m"
|
||||
RESET = "\033[0m"
|
||||
BOLD = "\033[1m"
|
||||
|
||||
@staticmethod
|
||||
def format(result: ValidationResult, color: bool = True, ci_mode: bool = False) -> str:
|
||||
"""Format validation result as text.
|
||||
|
||||
Args:
|
||||
result: Validation result to format.
|
||||
color: Whether to use ANSI colors.
|
||||
ci_mode: CI mode (cleaner output without special chars).
|
||||
|
||||
Returns:
|
||||
Formatted text.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
if ci_mode:
|
||||
return TextFormatter._format_ci(result)
|
||||
|
||||
if result.is_valid:
|
||||
lines.append(f"{TextFormatter._color(TextFormatter.GREEN, '✓', color)} Validation passed")
|
||||
if result.warnings:
|
||||
lines.append("")
|
||||
lines.append("Warnings:")
|
||||
for warning in result.warnings:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.YELLOW, '⚠', color)} {warning}")
|
||||
else:
|
||||
lines.append(f"{TextFormatter._color(TextFormatter.RED, '✗', color)} Validation failed")
|
||||
lines.append("")
|
||||
|
||||
if result.missing_required:
|
||||
lines.append("Missing required variables:")
|
||||
for var in result.missing_required:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.RED, '✗', color)} {var}")
|
||||
lines.append("")
|
||||
|
||||
if result.type_errors:
|
||||
lines.append("Type errors:")
|
||||
for error in result.type_errors:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.RED, '✗', color)} {error.var_name}: {error.message}")
|
||||
lines.append("")
|
||||
|
||||
if result.pattern_errors:
|
||||
lines.append("Pattern errors:")
|
||||
for error in result.pattern_errors:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.RED, '✗', color)} {error.var_name}: {error.message}")
|
||||
lines.append("")
|
||||
|
||||
if result.warnings:
|
||||
lines.append("Warnings:")
|
||||
for warning in result.warnings:
|
||||
lines.append(f" {TextFormatter._color(TextFormatter.YELLOW, '⚠', color)} {warning}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@staticmethod
|
||||
def _format_ci(result: ValidationResult) -> str:
|
||||
"""Format result for CI mode."""
|
||||
lines = []
|
||||
|
||||
if result.is_valid:
|
||||
lines.append("Validation passed")
|
||||
else:
|
||||
lines.append("Validation failed")
|
||||
|
||||
if result.missing_required:
|
||||
lines.append("Missing required variables: " + ", ".join(result.missing_required))
|
||||
|
||||
if result.type_errors:
|
||||
for error in result.type_errors:
|
||||
lines.append(f"{error.var_name}: {error.message}")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
@staticmethod
|
||||
def _color(color_code: str, text: str, use_color: bool) -> str:
|
||||
"""Apply color to text."""
|
||||
if use_color:
|
||||
return f"{color_code}{text}{TextFormatter.RESET}"
|
||||
return text
|
||||
|
||||
|
||||
class JsonFormatter:
|
||||
"""Formatter for JSON output."""
|
||||
|
||||
@staticmethod
|
||||
def format(result: ValidationResult) -> str:
|
||||
"""Format validation result as JSON.
|
||||
|
||||
Args:
|
||||
result: Validation result to format.
|
||||
|
||||
Returns:
|
||||
JSON string.
|
||||
"""
|
||||
return json.dumps(result.to_dict(), indent=2)
|
||||
|
||||
|
||||
def format_result(
|
||||
result: ValidationResult,
|
||||
output_format: str = "text",
|
||||
color: bool = True,
|
||||
ci_mode: bool = False,
|
||||
) -> str:
|
||||
"""Format a validation result.
|
||||
|
||||
Args:
|
||||
result: Validation result to format.
|
||||
output_format: Output format ('text' or 'json').
|
||||
color: Whether to use colors (text format only).
|
||||
ci_mode: CI mode (text format only).
|
||||
|
||||
Returns:
|
||||
Formatted output string.
|
||||
"""
|
||||
if output_format == "json":
|
||||
return JsonFormatter.format(result)
|
||||
return TextFormatter.format(result, color=color, ci_mode=ci_mode)
|
||||
57
envschema_repo/envschema/generator.py
Normal file
57
envschema_repo/envschema/generator.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Generator for .env.example files from schema definitions."""
|
||||
|
||||
from envschema.schema import Schema
|
||||
|
||||
|
||||
def generate_env_example(schema: Schema, include_descriptions: bool = True) -> str:
|
||||
"""Generate an .env.example file content from a schema.
|
||||
|
||||
Args:
|
||||
schema: The schema to generate from.
|
||||
include_descriptions: Whether to include description comments.
|
||||
|
||||
Returns:
|
||||
Formatted .env.example content.
|
||||
"""
|
||||
lines = []
|
||||
|
||||
lines.append("# Environment Variables Schema")
|
||||
if schema.version:
|
||||
lines.append(f"# Version: {schema.version}")
|
||||
lines.append("")
|
||||
|
||||
for var in schema.envvars:
|
||||
if include_descriptions and var.description:
|
||||
lines.append(f"# {var.description}")
|
||||
|
||||
if var.required:
|
||||
lines.append(f"# REQUIRED")
|
||||
elif var.default is not None:
|
||||
lines.append(f"# Default: {var.default}")
|
||||
|
||||
default_part = f"# {var.default}" if var.default else ""
|
||||
type_part = f"[{var.type.value}]"
|
||||
|
||||
if var.required:
|
||||
lines.append(f"{var.name}=")
|
||||
elif var.default is not None:
|
||||
lines.append(f"{var.name}={var.default}")
|
||||
else:
|
||||
lines.append(f"{var.name}=")
|
||||
|
||||
lines.append("")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def generate_env_example_to_file(schema: Schema, output_path: str, include_descriptions: bool = True) -> None:
|
||||
"""Generate and write an .env.example file.
|
||||
|
||||
Args:
|
||||
schema: The schema to generate from.
|
||||
output_path: Path to write the .env.example file.
|
||||
include_descriptions: Whether to include description comments.
|
||||
"""
|
||||
content = generate_env_example(schema, include_descriptions)
|
||||
with open(output_path, "w") as f:
|
||||
f.write(content)
|
||||
91
envschema_repo/envschema/loader.py
Normal file
91
envschema_repo/envschema/loader.py
Normal file
@@ -0,0 +1,91 @@
|
||||
"""Environment variable loader for .env files and os.environ."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
from dotenv import dotenv_values
|
||||
|
||||
|
||||
class EnvLoader:
|
||||
"""Load environment variables from .env files or os.environ."""
|
||||
|
||||
def __init__(self, file_path: Optional[str] = None, use_environment: bool = True):
|
||||
"""Initialize the loader.
|
||||
|
||||
Args:
|
||||
file_path: Path to .env file. If None, only uses os.environ.
|
||||
use_environment: Whether to include os.environ as fallback.
|
||||
"""
|
||||
self.file_path = file_path
|
||||
self.use_environment = use_environment
|
||||
self._env_vars: dict[str, str] = {}
|
||||
|
||||
def load(self) -> dict[str, str]:
|
||||
"""Load environment variables.
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variable names to values.
|
||||
"""
|
||||
self._env_vars = {}
|
||||
|
||||
if self.file_path:
|
||||
path = Path(self.file_path)
|
||||
if path.exists():
|
||||
file_values = dotenv_values(str(path))
|
||||
for key, value in file_values.items():
|
||||
if value is not None:
|
||||
self._env_vars[key] = value
|
||||
|
||||
if self.use_environment:
|
||||
for key, value in os.environ.items():
|
||||
if key not in self._env_vars:
|
||||
self._env_vars[key] = value
|
||||
|
||||
return self._env_vars
|
||||
|
||||
def get(self, key: str, default: Optional[str] = None) -> Optional[str]:
|
||||
"""Get an environment variable value.
|
||||
|
||||
Args:
|
||||
key: Variable name.
|
||||
default: Default value if not found.
|
||||
|
||||
Returns:
|
||||
Variable value or default.
|
||||
"""
|
||||
if not self._env_vars:
|
||||
self.load()
|
||||
return self._env_vars.get(key, default)
|
||||
|
||||
def get_raw(self) -> dict[str, str]:
|
||||
"""Get all loaded variables as a raw dictionary.
|
||||
|
||||
Returns:
|
||||
Dictionary of all loaded environment variables.
|
||||
"""
|
||||
if not self._env_vars:
|
||||
self.load()
|
||||
return self._env_vars.copy()
|
||||
|
||||
def set(self, key: str, value: str) -> None:
|
||||
"""Set an environment variable value in memory.
|
||||
|
||||
Args:
|
||||
key: Variable name.
|
||||
value: Variable value.
|
||||
"""
|
||||
self._env_vars[key] = value
|
||||
|
||||
|
||||
def load_env_file(file_path: str) -> dict[str, str]:
|
||||
"""Convenience function to load environment from a file.
|
||||
|
||||
Args:
|
||||
file_path: Path to .env file.
|
||||
|
||||
Returns:
|
||||
Dictionary of environment variables.
|
||||
"""
|
||||
loader = EnvLoader(file_path=file_path, use_environment=False)
|
||||
return loader.load()
|
||||
110
envschema_repo/envschema/schema.py
Normal file
110
envschema_repo/envschema/schema.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""Schema models for environment variable definitions."""
|
||||
|
||||
import json
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class EnvVarType(str, Enum):
|
||||
"""Supported environment variable types."""
|
||||
|
||||
STRING = "str"
|
||||
INTEGER = "int"
|
||||
BOOLEAN = "bool"
|
||||
LIST = "list"
|
||||
|
||||
|
||||
class EnvVar(BaseModel):
|
||||
"""Definition of a single environment variable."""
|
||||
|
||||
name: str = Field(..., description="Variable name (e.g., DATABASE_URL)")
|
||||
type: EnvVarType = Field(default=EnvVarType.STRING, description="Variable type")
|
||||
required: bool = Field(default=False, description="Whether variable is required")
|
||||
default: Optional[str] = Field(default=None, description="Default value if optional")
|
||||
description: Optional[str] = Field(default=None, description="Variable description")
|
||||
pattern: Optional[str] = Field(default=None, description="Regex pattern for validation")
|
||||
|
||||
@field_validator("name")
|
||||
@classmethod
|
||||
def name_must_be_valid_env_var(cls, v: str) -> str:
|
||||
if not v.replace("_", "").replace("-", "").isalnum():
|
||||
raise ValueError("Variable name must contain only alphanumeric characters, underscores, and hyphens")
|
||||
return v.upper()
|
||||
|
||||
|
||||
class Schema(BaseModel):
|
||||
"""Schema containing all environment variable definitions."""
|
||||
|
||||
version: Optional[str] = Field(default="1.0", description="Schema version")
|
||||
envvars: list[EnvVar] = Field(default_factory=list, alias="envVars")
|
||||
|
||||
model_config = {"populate_by_name": True}
|
||||
|
||||
def get_var(self, name: str) -> Optional[EnvVar]:
|
||||
"""Get an environment variable by name."""
|
||||
name_upper = name.upper()
|
||||
for var in self.envvars:
|
||||
if var.name.upper() == name_upper:
|
||||
return var
|
||||
return None
|
||||
|
||||
def get_required_vars(self) -> list[EnvVar]:
|
||||
"""Get all required environment variables."""
|
||||
return [var for var in self.envvars if var.required]
|
||||
|
||||
|
||||
def load_schema_from_file(file_path: str) -> Schema:
|
||||
"""Load schema from a JSON or YAML file.
|
||||
|
||||
Args:
|
||||
file_path: Path to the schema file
|
||||
|
||||
Returns:
|
||||
Parsed Schema object
|
||||
|
||||
Raises:
|
||||
FileNotFoundError: If schema file doesn't exist
|
||||
ValueError: If schema format is invalid
|
||||
"""
|
||||
path = Path(file_path)
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Schema file not found: {file_path}")
|
||||
|
||||
content = path.read_text()
|
||||
|
||||
if path.suffix.lower() in [".yaml", ".yml"]:
|
||||
return load_yaml_schema(content)
|
||||
elif path.suffix.lower() == ".json":
|
||||
return load_json_schema(content)
|
||||
else:
|
||||
raise ValueError(f"Unsupported schema format: {path.suffix}. Use .json or .yaml")
|
||||
|
||||
|
||||
def load_json_schema(content: str) -> Schema:
|
||||
"""Load schema from JSON content."""
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON schema: {e}")
|
||||
|
||||
try:
|
||||
return Schema.model_validate(data)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid schema structure: {e}")
|
||||
|
||||
|
||||
def load_yaml_schema(content: str) -> Schema:
|
||||
"""Load schema from YAML content."""
|
||||
try:
|
||||
data = yaml.safe_load(content)
|
||||
except yaml.YAMLError as e:
|
||||
raise ValueError(f"Invalid YAML schema: {e}")
|
||||
|
||||
try:
|
||||
return Schema.model_validate(data)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid schema structure: {e}")
|
||||
153
envschema_repo/envschema/validators.py
Normal file
153
envschema_repo/envschema/validators.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""Type validators for environment variable values."""
|
||||
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from envschema.schema import EnvVarType
|
||||
|
||||
|
||||
class ValidationError:
|
||||
"""Represents a validation error."""
|
||||
|
||||
def __init__(self, message: str, value: Optional[str] = None):
|
||||
self.message = message
|
||||
self.value = value
|
||||
|
||||
def __str__(self) -> str:
|
||||
if self.value is not None:
|
||||
return f"{self.message} (got: {self.value!r})"
|
||||
return self.message
|
||||
|
||||
|
||||
class StringValidator:
|
||||
"""Validator for string type - always passes."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
return True, None
|
||||
|
||||
|
||||
class IntegerValidator:
|
||||
"""Validator for integer type."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
try:
|
||||
int(value)
|
||||
return True, None
|
||||
except ValueError:
|
||||
return False, ValidationError(
|
||||
f"Invalid integer value",
|
||||
value=value
|
||||
)
|
||||
|
||||
|
||||
class BooleanValidator:
|
||||
"""Validator for boolean type."""
|
||||
|
||||
TRUE_VALUES = {"true", "1", "yes", "on"}
|
||||
FALSE_VALUES = {"false", "0", "no", "off"}
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
value_lower = value.lower().strip()
|
||||
if value_lower in BooleanValidator.TRUE_VALUES:
|
||||
return True, None
|
||||
if value_lower in BooleanValidator.FALSE_VALUES:
|
||||
return True, None
|
||||
return False, ValidationError(
|
||||
f"Invalid boolean value (expected: true, false, 1, 0, yes, no, on, off)",
|
||||
value=value
|
||||
)
|
||||
|
||||
|
||||
class ListValidator:
|
||||
"""Validator for list type (comma-separated values)."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str]) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
if "," in value:
|
||||
return True, None
|
||||
return False, ValidationError(
|
||||
f"Invalid list value (expected comma-separated values)",
|
||||
value=value
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def parse(value: str) -> list[str]:
|
||||
"""Parse a comma-separated string into a list.
|
||||
|
||||
Args:
|
||||
value: Comma-separated string.
|
||||
|
||||
Returns:
|
||||
List of values.
|
||||
"""
|
||||
return [item.strip() for item in value.split(",") if item.strip()]
|
||||
|
||||
|
||||
class PatternValidator:
|
||||
"""Validator for pattern/regex validation."""
|
||||
|
||||
@staticmethod
|
||||
def validate(value: Optional[str], pattern: str) -> tuple[bool, Optional[ValidationError]]:
|
||||
if value is None:
|
||||
return True, None
|
||||
try:
|
||||
if re.match(pattern, value):
|
||||
return True, None
|
||||
return False, ValidationError(
|
||||
f"Value does not match pattern: {pattern}",
|
||||
value=value
|
||||
)
|
||||
except re.error:
|
||||
return False, ValidationError(
|
||||
f"Invalid regex pattern: {pattern}",
|
||||
value=value
|
||||
)
|
||||
|
||||
|
||||
def get_validator(var_type: EnvVarType):
|
||||
"""Get the validator class for a given type.
|
||||
|
||||
Args:
|
||||
var_type: The environment variable type.
|
||||
|
||||
Returns:
|
||||
Validator class.
|
||||
"""
|
||||
validators = {
|
||||
EnvVarType.STRING: StringValidator,
|
||||
EnvVarType.INTEGER: IntegerValidator,
|
||||
EnvVarType.BOOLEAN: BooleanValidator,
|
||||
EnvVarType.LIST: ListValidator,
|
||||
}
|
||||
return validators.get(var_type, StringValidator)
|
||||
|
||||
|
||||
def validate_value(value: Optional[str], var_type: EnvVarType, pattern: Optional[str] = None) -> tuple[bool, Optional[ValidationError]]:
|
||||
"""Validate a value against a type and optional pattern.
|
||||
|
||||
Args:
|
||||
value: The value to validate.
|
||||
var_type: The expected type.
|
||||
pattern: Optional regex pattern.
|
||||
|
||||
Returns:
|
||||
Tuple of (is_valid, error).
|
||||
"""
|
||||
validator = get_validator(var_type)
|
||||
is_valid, error = validator.validate(value)
|
||||
|
||||
if is_valid and pattern and value is not None:
|
||||
is_valid, error = PatternValidator.validate(value, pattern)
|
||||
|
||||
return is_valid, error
|
||||
49
envschema_repo/pyproject.toml
Normal file
49
envschema_repo/pyproject.toml
Normal file
@@ -0,0 +1,49 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=61.0", "wheel"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "envschema"
|
||||
version = "0.1.0"
|
||||
description = "A CLI tool that validates environment variables against a JSON/YAML schema file"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.10"
|
||||
license = {text = "MIT"}
|
||||
authors = [
|
||||
{name = "EnvSchema Team"}
|
||||
]
|
||||
keywords = ["environment", "validation", "schema", "cli", "devops"]
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
]
|
||||
dependencies = [
|
||||
"click>=8.1.7",
|
||||
"pyyaml>=6.0.1",
|
||||
"python-dotenv>=1.0.0",
|
||||
"pydantic>=2.5.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
dev = [
|
||||
"pytest>=7.0.0",
|
||||
"pytest-cov>=4.0.0",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
envschema = "envschema.cli:cli"
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
include = ["envschema*"]
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests"]
|
||||
python_files = ["test_*.py"]
|
||||
python_functions = ["test_*"]
|
||||
addopts = "-v --cov=envschema --cov-report=term-missing"
|
||||
37
envschema_repo/setup.py
Normal file
37
envschema_repo/setup.py
Normal file
@@ -0,0 +1,37 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name="envschema",
|
||||
version="0.1.0",
|
||||
description="A CLI tool that validates environment variables against a JSON/YAML schema file",
|
||||
author="EnvSchema Team",
|
||||
packages=find_packages(where="."),
|
||||
package_dir={"": "."},
|
||||
python_requires=">=3.10",
|
||||
install_requires=[
|
||||
"click>=8.1.7",
|
||||
"pyyaml>=6.0.1",
|
||||
"python-dotenv>=1.0.0",
|
||||
"pydantic>=2.5.0",
|
||||
],
|
||||
extras_require={
|
||||
"dev": [
|
||||
"pytest>=7.0.0",
|
||||
"pytest-cov>=4.0.0",
|
||||
]
|
||||
},
|
||||
entry_points={
|
||||
"console_scripts": [
|
||||
"envschema=envschema.cli:cli",
|
||||
],
|
||||
},
|
||||
classifiers=[
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
],
|
||||
)
|
||||
0
envschema_repo/tests/__init__.py
Normal file
0
envschema_repo/tests/__init__.py
Normal file
73
envschema_repo/tests/conftest.py
Normal file
73
envschema_repo/tests/conftest.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_har_data():
|
||||
return {
|
||||
"log": {
|
||||
"version": "1.2",
|
||||
"creator": {"name": "Test", "version": "1.0"},
|
||||
"entries": [
|
||||
{
|
||||
"startedDateTime": "2024-01-01T00:00:00.000Z",
|
||||
"time": 100,
|
||||
"request": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/users/123",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "Authorization", "value": "Bearer test_token"},
|
||||
],
|
||||
"queryString": [{"name": "include", "value": "profile"}],
|
||||
"postData": None,
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": '{"id": 123, "name": "John Doe", "email": "john@example.com"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"startedDateTime": "2024-01-01T00:00:01.000Z",
|
||||
"time": 200,
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"url": "https://api.example.com/users",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
],
|
||||
"queryString": [],
|
||||
"postData": {
|
||||
"mimeType": "application/json",
|
||||
"text": '{"name": "Jane Doe", "email": "jane@example.com"}',
|
||||
},
|
||||
},
|
||||
"response": {
|
||||
"status": 201,
|
||||
"statusText": "Created",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": '{"id": 456, "name": "Jane Doe", "email": "jane@example.com", "created_at": "2024-01-01T00:00:01Z"}',
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_har_file(tmp_path, sample_har_data):
|
||||
import json
|
||||
har_file = tmp_path / "test.har"
|
||||
har_file.write_text(json.dumps(sample_har_data))
|
||||
return str(har_file)
|
||||
0
envschema_repo/tests/integration/__init__.py
Normal file
0
envschema_repo/tests/integration/__init__.py
Normal file
120
envschema_repo/tests/integration/test_cli.py
Normal file
120
envschema_repo/tests/integration/test_cli.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""Integration tests for CLI commands."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from envschema.cli import cli
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_schema_file():
|
||||
"""Create a temporary schema file."""
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
|
||||
json.dump({
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "DATABASE_URL", "type": "str", "required": True},
|
||||
{"name": "DEBUG_MODE", "type": "bool", "required": False, "default": "false"},
|
||||
{"name": "PORT", "type": "int", "required": False, "default": "8080"},
|
||||
]
|
||||
}, f)
|
||||
temp_path = f.name
|
||||
yield temp_path
|
||||
os.unlink(temp_path)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_env_file():
|
||||
"""Create a temporary .env file."""
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".env", delete=False) as f:
|
||||
f.write("DATABASE_URL=postgres://localhost/mydb\n")
|
||||
f.write("DEBUG_MODE=true\n")
|
||||
temp_path = f.name
|
||||
yield temp_path
|
||||
os.unlink(temp_path)
|
||||
|
||||
|
||||
class TestValidateCommand:
|
||||
"""Tests for the validate command."""
|
||||
|
||||
def test_validate_missing_schema(self):
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", "/nonexistent/schema.json"])
|
||||
assert result.exit_code == 2
|
||||
assert "Error" in result.output
|
||||
|
||||
def test_validate_valid_env(self, temp_schema_file, temp_env_file):
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", temp_schema_file, "--file", temp_env_file, "--no-env"])
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_validate_missing_required(self, temp_schema_file):
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", temp_schema_file, "--no-env"])
|
||||
assert result.exit_code == 1
|
||||
assert "DATABASE_URL" in result.output
|
||||
|
||||
def test_validate_with_json_output(self, temp_schema_file, temp_env_file):
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", temp_schema_file, "--file", temp_env_file, "--no-env", "--format", "json"])
|
||||
assert result.exit_code == 0
|
||||
data = json.loads(result.output)
|
||||
assert data["is_valid"] is True
|
||||
|
||||
def test_validate_ci_mode(self, temp_schema_file, temp_env_file):
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", temp_schema_file, "--file", temp_env_file, "--no-env", "--ci"])
|
||||
assert result.exit_code == 0
|
||||
assert "✓" not in result.output
|
||||
|
||||
|
||||
class TestGenerateCommand:
|
||||
"""Tests for the generate command."""
|
||||
|
||||
def test_generate_basic(self, temp_schema_file):
|
||||
runner = CliRunner()
|
||||
with runner.isolated_filesystem():
|
||||
result = runner.invoke(cli, ["generate", temp_schema_file])
|
||||
assert result.exit_code == 0
|
||||
assert ".env.example" in result.output
|
||||
|
||||
def test_generate_to_custom_path(self, temp_schema_file, tmp_path):
|
||||
runner = CliRunner()
|
||||
output_path = tmp_path / "custom.env.example"
|
||||
result = runner.invoke(cli, ["generate", temp_schema_file, "--output", str(output_path)])
|
||||
assert result.exit_code == 0
|
||||
assert output_path.read_text()
|
||||
|
||||
def test_generate_no_comments(self, temp_schema_file, tmp_path):
|
||||
runner = CliRunner()
|
||||
output_path = tmp_path / "no_comments.env"
|
||||
result = runner.invoke(cli, ["generate", temp_schema_file, "--output", str(output_path), "--no-comments"])
|
||||
assert result.exit_code == 0
|
||||
content = output_path.read_text()
|
||||
assert "description" not in content.lower() or "#" not in content
|
||||
|
||||
|
||||
class TestCheckCommand:
|
||||
"""Tests for the check command."""
|
||||
|
||||
def test_check_valid_schema(self, temp_schema_file):
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["check", temp_schema_file])
|
||||
assert result.exit_code == 0
|
||||
assert "valid" in result.output.lower()
|
||||
|
||||
def test_check_invalid_schema(self):
|
||||
runner = CliRunner()
|
||||
with tempfile.NamedTemporaryFile(mode="w", suffix=".json", delete=False) as f:
|
||||
f.write('{"version": "1.0", "envVars": [{"name": "VAR", "type": "invalid_type"}]}')
|
||||
temp_path = f.name
|
||||
try:
|
||||
result = runner.invoke(cli, ["check", temp_path])
|
||||
assert result.exit_code == 2
|
||||
finally:
|
||||
os.unlink(temp_path)
|
||||
183
envschema_repo/tests/integration/test_full_flow.py
Normal file
183
envschema_repo/tests/integration/test_full_flow.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""End-to-end integration tests for the full validation flow."""
|
||||
|
||||
import json
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
import pytest
|
||||
from click.testing import CliRunner
|
||||
|
||||
from envschema.cli import cli
|
||||
from envschema.core import validate_environment
|
||||
from envschema.generator import generate_env_example
|
||||
|
||||
|
||||
class TestFullValidationFlow:
|
||||
"""Integration tests for complete validation workflows."""
|
||||
|
||||
def test_json_schema_with_valid_env(self):
|
||||
"""Test validating a valid .env against a JSON schema."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
schema_path = os.path.join(tmpdir, "schema.json")
|
||||
env_path = os.path.join(tmpdir, ".env")
|
||||
|
||||
schema_data = {
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "DATABASE_URL", "type": "str", "required": True},
|
||||
{"name": "DEBUG", "type": "bool", "required": False, "default": "false"},
|
||||
{"name": "PORT", "type": "int", "required": False, "default": "8080"},
|
||||
{"name": "ALLOWED_HOSTS", "type": "list", "required": False},
|
||||
]
|
||||
}
|
||||
|
||||
with open(schema_path, "w") as f:
|
||||
json.dump(schema_data, f)
|
||||
|
||||
with open(env_path, "w") as f:
|
||||
f.write("DATABASE_URL=postgres://localhost/mydb\n")
|
||||
f.write("DEBUG=true\n")
|
||||
f.write("PORT=3000\n")
|
||||
f.write("ALLOWED_HOSTS=localhost,127.0.0.1\n")
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", schema_path, "--file", env_path, "--no-env"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
|
||||
def test_json_schema_with_invalid_types(self):
|
||||
"""Test that type mismatches are caught."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
schema_path = os.path.join(tmpdir, "schema.json")
|
||||
env_path = os.path.join(tmpdir, ".env")
|
||||
|
||||
schema_data = {
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "PORT", "type": "int", "required": True},
|
||||
]
|
||||
}
|
||||
|
||||
with open(schema_path, "w") as f:
|
||||
json.dump(schema_data, f)
|
||||
|
||||
with open(env_path, "w") as f:
|
||||
f.write("PORT=not_a_number\n")
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", schema_path, "--file", env_path, "--no-env"])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "PORT" in result.output
|
||||
|
||||
def test_missing_required_variables(self):
|
||||
"""Test that missing required variables are reported."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
schema_path = os.path.join(tmpdir, "schema.json")
|
||||
env_path = os.path.join(tmpdir, ".env")
|
||||
|
||||
schema_data = {
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "REQUIRED_VAR1", "type": "str", "required": True},
|
||||
{"name": "REQUIRED_VAR2", "type": "str", "required": True},
|
||||
{"name": "OPTIONAL_VAR", "type": "str", "required": False},
|
||||
]
|
||||
}
|
||||
|
||||
with open(schema_path, "w") as f:
|
||||
json.dump(schema_data, f)
|
||||
|
||||
with open(env_path, "w") as f:
|
||||
f.write("REQUIRED_VAR1=value1\n")
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", schema_path, "--file", env_path, "--no-env"])
|
||||
|
||||
assert result.exit_code == 1
|
||||
assert "REQUIRED_VAR2" in result.output
|
||||
|
||||
def test_generate_and_validate_flow(self):
|
||||
"""Test generating .env.example and then validating it."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
schema_path = os.path.join(tmpdir, "schema.json")
|
||||
example_path = os.path.join(tmpdir, ".env.example")
|
||||
|
||||
schema_data = {
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "DATABASE_URL", "type": "str", "required": True, "description": "Database connection string"},
|
||||
{"name": "DEBUG", "type": "bool", "required": False, "default": "false", "description": "Enable debug mode"},
|
||||
{"name": "PORT", "type": "int", "required": False, "default": "8080", "description": "Server port"},
|
||||
]
|
||||
}
|
||||
|
||||
with open(schema_path, "w") as f:
|
||||
json.dump(schema_data, f)
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["generate", schema_path, "--output", example_path])
|
||||
assert result.exit_code == 0
|
||||
|
||||
with open(example_path, "r") as f:
|
||||
content = f.read()
|
||||
assert "DATABASE_URL=" in content
|
||||
assert "DEBUG=false" in content
|
||||
assert "PORT=8080" in content
|
||||
assert "Database connection string" in content
|
||||
|
||||
|
||||
class TestCIMode:
|
||||
"""Tests for CI mode functionality."""
|
||||
|
||||
def test_ci_mode_clean_output(self):
|
||||
"""Test that CI mode produces cleaner output."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
schema_path = os.path.join(tmpdir, "schema.json")
|
||||
env_path = os.path.join(tmpdir, ".env")
|
||||
|
||||
schema_data = {
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "DATABASE_URL", "type": "str", "required": True},
|
||||
]
|
||||
}
|
||||
|
||||
with open(schema_path, "w") as f:
|
||||
json.dump(schema_data, f)
|
||||
|
||||
with open(env_path, "w") as f:
|
||||
f.write("DATABASE_URL=postgres://localhost/mydb\n")
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", schema_path, "--file", env_path, "--no-env", "--ci"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert "✓" not in result.output
|
||||
assert "✗" not in result.output
|
||||
|
||||
def test_ci_mode_json_output(self):
|
||||
"""Test CI mode with JSON output."""
|
||||
with tempfile.TemporaryDirectory() as tmpdir:
|
||||
schema_path = os.path.join(tmpdir, "schema.json")
|
||||
env_path = os.path.join(tmpdir, ".env")
|
||||
|
||||
schema_data = {
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "DATABASE_URL", "type": "str", "required": True},
|
||||
]
|
||||
}
|
||||
|
||||
with open(schema_path, "w") as f:
|
||||
json.dump(schema_data, f)
|
||||
|
||||
with open(env_path, "w") as f:
|
||||
f.write("DATABASE_URL=postgres://localhost/mydb\n")
|
||||
|
||||
runner = CliRunner()
|
||||
result = runner.invoke(cli, ["validate", schema_path, "--file", env_path, "--no-env", "--ci", "--format", "json"])
|
||||
|
||||
assert result.exit_code == 0
|
||||
data = json.loads(result.output)
|
||||
assert data["is_valid"] is True
|
||||
0
envschema_repo/tests/unit/__init__.py
Normal file
0
envschema_repo/tests/unit/__init__.py
Normal file
188
envschema_repo/tests/unit/test_core.py
Normal file
188
envschema_repo/tests/unit/test_core.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""Unit tests for the validation engine."""
|
||||
|
||||
import pytest
|
||||
|
||||
from envschema.schema import Schema, EnvVar, EnvVarType
|
||||
from envschema.core import ValidationEngine, ValidationResult
|
||||
|
||||
|
||||
class TestValidationResult:
|
||||
"""Tests for ValidationResult."""
|
||||
|
||||
def test_valid_result(self):
|
||||
result = ValidationResult(is_valid=True)
|
||||
assert result.is_valid is True
|
||||
assert result.missing_required == []
|
||||
assert result.type_errors == []
|
||||
assert result.pattern_errors == []
|
||||
assert result.warnings == []
|
||||
|
||||
def test_result_to_dict(self):
|
||||
result = ValidationResult(is_valid=True)
|
||||
d = result.to_dict()
|
||||
assert d["is_valid"] is True
|
||||
assert d["missing_required"] == []
|
||||
|
||||
|
||||
class TestValidationEngine:
|
||||
"""Tests for ValidationEngine."""
|
||||
|
||||
def test_validate_empty_env(self):
|
||||
schema = Schema(envvars=[])
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_missing_required(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="REQUIRED_VAR", required=True),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({})
|
||||
assert result.is_valid is False
|
||||
assert "REQUIRED_VAR" in result.missing_required
|
||||
|
||||
def test_validate_present_required(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="REQUIRED_VAR", required=True),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"REQUIRED_VAR": "value"})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_optional_missing(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="OPTIONAL_VAR", required=False),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_with_default(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="VAR_WITH_DEFAULT", required=False, default="default_value"),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_string_type(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="STRING_VAR", type=EnvVarType.STRING),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"STRING_VAR": "any value"})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_integer_type_valid(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="INT_VAR", type=EnvVarType.INTEGER),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"INT_VAR": "42"})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_integer_type_invalid(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="INT_VAR", type=EnvVarType.INTEGER),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"INT_VAR": "not_a_number"})
|
||||
assert result.is_valid is False
|
||||
assert len(result.type_errors) == 1
|
||||
assert result.type_errors[0].var_name == "INT_VAR"
|
||||
|
||||
def test_validate_boolean_type_valid(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="BOOL_VAR", type=EnvVarType.BOOLEAN),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"BOOL_VAR": "true"})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_boolean_type_invalid(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="BOOL_VAR", type=EnvVarType.BOOLEAN),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"BOOL_VAR": "maybe"})
|
||||
assert result.is_valid is False
|
||||
|
||||
def test_validate_list_type_valid(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="LIST_VAR", type=EnvVarType.LIST),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"LIST_VAR": "a,b,c"})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_list_type_invalid(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="LIST_VAR", type=EnvVarType.LIST),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"LIST_VAR": "single_value"})
|
||||
assert result.is_valid is False
|
||||
|
||||
def test_validate_pattern_match(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="PATTERN_VAR", type=EnvVarType.STRING, pattern=r"^[A-Z]+$"),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"PATTERN_VAR": "VALID"})
|
||||
assert result.is_valid is True
|
||||
|
||||
def test_validate_pattern_no_match(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="PATTERN_VAR", type=EnvVarType.STRING, pattern=r"^[A-Z]+$"),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"PATTERN_VAR": "invalid"})
|
||||
assert result.is_valid is False
|
||||
|
||||
def test_validate_extra_var_warning(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="KNOWN_VAR", type=EnvVarType.STRING),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"KNOWN_VAR": "value", "UNKNOWN_VAR": "other"})
|
||||
assert result.is_valid is True
|
||||
assert "Unknown environment variable: UNKNOWN_VAR" in result.warnings
|
||||
|
||||
def test_validate_case_insensitive(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="TEST_VAR", required=True),
|
||||
]
|
||||
)
|
||||
engine = ValidationEngine(schema)
|
||||
result = engine.validate({"test_var": "value"})
|
||||
assert result.is_valid is True
|
||||
105
envschema_repo/tests/unit/test_generator.py
Normal file
105
envschema_repo/tests/unit/test_generator.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Unit tests for the .env.example generator."""
|
||||
|
||||
import pytest
|
||||
|
||||
from envschema.schema import Schema, EnvVar, EnvVarType
|
||||
from envschema.generator import generate_env_example, generate_env_example_to_file
|
||||
|
||||
|
||||
class TestGenerateEnvExample:
|
||||
"""Tests for generate_env_example function."""
|
||||
|
||||
def test_empty_schema(self):
|
||||
schema = Schema()
|
||||
result = generate_env_example(schema)
|
||||
assert "# Environment Variables Schema" in result
|
||||
|
||||
def test_basic_variable(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="TEST_VAR", type=EnvVarType.STRING),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema)
|
||||
assert "TEST_VAR=" in result
|
||||
|
||||
def test_required_variable(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="REQUIRED_VAR", required=True),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema)
|
||||
assert "# REQUIRED" in result
|
||||
assert "REQUIRED_VAR=" in result
|
||||
|
||||
def test_variable_with_default(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="VAR_WITH_DEFAULT", default="default_value"),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema)
|
||||
assert "VAR_WITH_DEFAULT=default_value" in result
|
||||
|
||||
def test_variable_with_description(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(
|
||||
name="DESCRIBED_VAR",
|
||||
description="This is a description",
|
||||
),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema)
|
||||
assert "# This is a description" in result
|
||||
|
||||
def test_variable_with_type(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="INT_VAR", type=EnvVarType.INTEGER),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema)
|
||||
assert "INT_VAR=" in result
|
||||
|
||||
def test_no_descriptions(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(
|
||||
name="VAR",
|
||||
description="Some description",
|
||||
),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema, include_descriptions=False)
|
||||
assert "Some description" not in result
|
||||
|
||||
def test_multiple_variables(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="VAR1", required=True, description="First var"),
|
||||
EnvVar(name="VAR2", default="value"),
|
||||
EnvVar(name="VAR3", type=EnvVarType.INTEGER),
|
||||
]
|
||||
)
|
||||
result = generate_env_example(schema)
|
||||
assert "VAR1=" in result
|
||||
assert "VAR2=value" in result
|
||||
assert "VAR3=" in result
|
||||
|
||||
|
||||
class TestGenerateEnvExampleToFile:
|
||||
"""Tests for generate_env_example_to_file function."""
|
||||
|
||||
def test_write_to_file(self, tmp_path):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="TEST_VAR"),
|
||||
]
|
||||
)
|
||||
output_path = tmp_path / ".env.example"
|
||||
generate_env_example_to_file(schema, str(output_path))
|
||||
|
||||
content = output_path.read_text()
|
||||
assert "TEST_VAR=" in content
|
||||
174
envschema_repo/tests/unit/test_schema.py
Normal file
174
envschema_repo/tests/unit/test_schema.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""Unit tests for schema parsing."""
|
||||
|
||||
import json
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
|
||||
from envschema.schema import (
|
||||
Schema,
|
||||
EnvVar,
|
||||
EnvVarType,
|
||||
load_schema_from_file,
|
||||
load_json_schema,
|
||||
load_yaml_schema,
|
||||
)
|
||||
|
||||
|
||||
class TestEnvVar:
|
||||
"""Tests for EnvVar model."""
|
||||
|
||||
def test_env_var_creation(self):
|
||||
var = EnvVar(name="TEST_VAR", type=EnvVarType.STRING)
|
||||
assert var.name == "TEST_VAR"
|
||||
assert var.type == EnvVarType.STRING
|
||||
assert var.required is False
|
||||
assert var.default is None
|
||||
|
||||
def test_env_var_with_all_fields(self):
|
||||
var = EnvVar(
|
||||
name="DATABASE_URL",
|
||||
type=EnvVarType.STRING,
|
||||
required=True,
|
||||
default="postgres://localhost",
|
||||
description="Database connection string",
|
||||
pattern=r"^postgres://.*",
|
||||
)
|
||||
assert var.required is True
|
||||
assert var.default == "postgres://localhost"
|
||||
assert var.description == "Database connection string"
|
||||
assert var.pattern == r"^postgres://.*"
|
||||
|
||||
def test_env_var_name_uppercase(self):
|
||||
var = EnvVar(name="test_var")
|
||||
assert var.name == "TEST_VAR"
|
||||
|
||||
def test_env_var_invalid_name(self):
|
||||
with pytest.raises(ValueError):
|
||||
EnvVar(name="invalid name with spaces")
|
||||
|
||||
|
||||
class TestSchema:
|
||||
"""Tests for Schema model."""
|
||||
|
||||
def test_schema_creation(self):
|
||||
schema = Schema()
|
||||
assert schema.version == "1.0"
|
||||
assert schema.envvars == []
|
||||
|
||||
def test_schema_with_vars(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="VAR1", type=EnvVarType.STRING),
|
||||
EnvVar(name="VAR2", type=EnvVarType.INTEGER, required=True),
|
||||
]
|
||||
)
|
||||
assert len(schema.envvars) == 2
|
||||
|
||||
def test_get_var(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="DATABASE_URL", type=EnvVarType.STRING),
|
||||
]
|
||||
)
|
||||
var = schema.get_var("DATABASE_URL")
|
||||
assert var is not None
|
||||
assert var.name == "DATABASE_URL"
|
||||
|
||||
def test_get_var_case_insensitive(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="DATABASE_URL", type=EnvVarType.STRING),
|
||||
]
|
||||
)
|
||||
var = schema.get_var("database_url")
|
||||
assert var is not None
|
||||
|
||||
def test_get_var_not_found(self):
|
||||
schema = Schema()
|
||||
var = schema.get_var("NONEXISTENT")
|
||||
assert var is None
|
||||
|
||||
def test_get_required_vars(self):
|
||||
schema = Schema(
|
||||
envvars=[
|
||||
EnvVar(name="VAR1", required=True),
|
||||
EnvVar(name="VAR2", required=False),
|
||||
EnvVar(name="VAR3", required=True),
|
||||
]
|
||||
)
|
||||
required = schema.get_required_vars()
|
||||
assert len(required) == 2
|
||||
assert {v.name for v in required} == {"VAR1", "VAR3"}
|
||||
|
||||
|
||||
class TestLoadJsonSchema:
|
||||
"""Tests for JSON schema loading."""
|
||||
|
||||
def test_load_valid_json_schema(self):
|
||||
json_content = json.dumps({
|
||||
"version": "1.0",
|
||||
"envVars": [
|
||||
{"name": "TEST_VAR", "type": "str"}
|
||||
]
|
||||
})
|
||||
schema = load_json_schema(json_content)
|
||||
assert schema.version == "1.0"
|
||||
assert len(schema.envvars) == 1
|
||||
|
||||
def test_load_invalid_json(self):
|
||||
with pytest.raises(ValueError, match="Invalid JSON"):
|
||||
load_json_schema("not valid json")
|
||||
|
||||
def test_load_invalid_schema_structure(self):
|
||||
with pytest.raises((ValueError, Exception), match="Invalid schema"):
|
||||
load_json_schema('{"version": "1.0", "envVars": [{"name": "VAR", "type": "invalid_type"}]}')
|
||||
|
||||
|
||||
class TestLoadYamlSchema:
|
||||
"""Tests for YAML schema loading."""
|
||||
|
||||
def test_load_valid_yaml_schema(self):
|
||||
yaml_content = """
|
||||
version: "1.0"
|
||||
envVars:
|
||||
- name: TEST_VAR
|
||||
type: str
|
||||
"""
|
||||
schema = load_yaml_schema(yaml_content)
|
||||
assert schema.version == "1.0"
|
||||
assert len(schema.envvars) == 1
|
||||
|
||||
def test_load_invalid_yaml(self):
|
||||
with pytest.raises(ValueError, match="Invalid YAML"):
|
||||
load_yaml_schema("invalid: yaml: content:")
|
||||
|
||||
|
||||
class TestLoadSchemaFromFile:
|
||||
"""Tests for file-based schema loading."""
|
||||
|
||||
def test_load_json_file(self, tmp_path):
|
||||
schema_file = tmp_path / "schema.json"
|
||||
schema_file.write_text(json.dumps({
|
||||
"version": "1.0",
|
||||
"envVars": [{"name": "TEST", "type": "str"}]
|
||||
}))
|
||||
schema = load_schema_from_file(str(schema_file))
|
||||
assert schema.version == "1.0"
|
||||
|
||||
def test_load_yaml_file(self, tmp_path):
|
||||
schema_file = tmp_path / "schema.yaml"
|
||||
schema_file.write_text('version: "1.0"\nenvVars: []')
|
||||
schema = load_schema_from_file(str(schema_file))
|
||||
assert schema.version == "1.0"
|
||||
|
||||
def test_file_not_found(self):
|
||||
with pytest.raises(FileNotFoundError):
|
||||
load_schema_from_file("/nonexistent/path/schema.json")
|
||||
|
||||
def test_unsupported_format(self, tmp_path):
|
||||
schema_file = tmp_path / "schema.txt"
|
||||
schema_file.write_text("some content")
|
||||
with pytest.raises(ValueError, match="Unsupported schema format"):
|
||||
load_schema_from_file(str(schema_file))
|
||||
176
envschema_repo/tests/unit/test_validators.py
Normal file
176
envschema_repo/tests/unit/test_validators.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""Unit tests for type validators."""
|
||||
|
||||
import pytest
|
||||
|
||||
from envschema.schema import EnvVarType
|
||||
from envschema.validators import (
|
||||
StringValidator,
|
||||
IntegerValidator,
|
||||
BooleanValidator,
|
||||
ListValidator,
|
||||
PatternValidator,
|
||||
validate_value,
|
||||
)
|
||||
|
||||
|
||||
class TestStringValidator:
|
||||
"""Tests for StringValidator."""
|
||||
|
||||
def test_valid_string(self):
|
||||
is_valid, error = StringValidator.validate("any value")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_empty_string(self):
|
||||
is_valid, error = StringValidator.validate("")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_none_value(self):
|
||||
is_valid, error = StringValidator.validate(None)
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
|
||||
class TestIntegerValidator:
|
||||
"""Tests for IntegerValidator."""
|
||||
|
||||
def test_valid_integer(self):
|
||||
is_valid, error = IntegerValidator.validate("42")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_valid_negative_integer(self):
|
||||
is_valid, error = IntegerValidator.validate("-10")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_valid_zero(self):
|
||||
is_valid, error = IntegerValidator.validate("0")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_invalid_float(self):
|
||||
is_valid, error = IntegerValidator.validate("3.14")
|
||||
assert is_valid is False
|
||||
assert error is not None
|
||||
|
||||
def test_invalid_string(self):
|
||||
is_valid, error = IntegerValidator.validate("abc")
|
||||
assert is_valid is False
|
||||
assert error is not None
|
||||
|
||||
def test_none_value(self):
|
||||
is_valid, error = IntegerValidator.validate(None)
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
|
||||
class TestBooleanValidator:
|
||||
"""Tests for BooleanValidator."""
|
||||
|
||||
@pytest.mark.parametrize("value", ["true", "True", "TRUE", "1", "yes", "Yes", "YES", "on", "ON"])
|
||||
def test_valid_true_values(self, value):
|
||||
is_valid, error = BooleanValidator.validate(value)
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
@pytest.mark.parametrize("value", ["false", "False", "FALSE", "0", "no", "No", "NO", "off", "OFF"])
|
||||
def test_valid_false_values(self, value):
|
||||
is_valid, error = BooleanValidator.validate(value)
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
@pytest.mark.parametrize("value", ["maybe", "2", "truee", "yess"])
|
||||
def test_invalid_boolean_values(self, value):
|
||||
is_valid, error = BooleanValidator.validate(value)
|
||||
assert is_valid is False
|
||||
assert error is not None
|
||||
|
||||
def test_none_value(self):
|
||||
is_valid, error = BooleanValidator.validate(None)
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
|
||||
class TestListValidator:
|
||||
"""Tests for ListValidator."""
|
||||
|
||||
def test_valid_list(self):
|
||||
is_valid, error = ListValidator.validate("item1,item2,item3")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_single_item_list(self):
|
||||
is_valid, error = ListValidator.validate("single")
|
||||
assert is_valid is False
|
||||
assert error is not None
|
||||
|
||||
def test_empty_string(self):
|
||||
is_valid, error = ListValidator.validate("")
|
||||
assert is_valid is False
|
||||
|
||||
def test_none_value(self):
|
||||
is_valid, error = ListValidator.validate(None)
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_parse_list(self):
|
||||
result = ListValidator.parse("item1, item2 , item3")
|
||||
assert result == ["item1", "item2", "item3"]
|
||||
|
||||
def test_parse_list_with_empty_items(self):
|
||||
result = ListValidator.parse("item1,,item2")
|
||||
assert result == ["item1", "item2"]
|
||||
|
||||
|
||||
class TestPatternValidator:
|
||||
"""Tests for PatternValidator."""
|
||||
|
||||
def test_valid_pattern_match(self):
|
||||
is_valid, error = PatternValidator.validate("ABC123", r"^[A-Z]+[0-9]+$")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
def test_invalid_pattern_match(self):
|
||||
is_valid, error = PatternValidator.validate("abc123", r"^[A-Z]+[0-9]+$")
|
||||
assert is_valid is False
|
||||
assert error is not None
|
||||
|
||||
def test_invalid_regex_pattern(self):
|
||||
is_valid, error = PatternValidator.validate("test", r"[invalid")
|
||||
assert is_valid is False
|
||||
assert error is not None
|
||||
|
||||
def test_none_value(self):
|
||||
is_valid, error = PatternValidator.validate(None, r"^[A-Z]+$")
|
||||
assert is_valid is True
|
||||
assert error is None
|
||||
|
||||
|
||||
class TestValidateValue:
|
||||
"""Tests for the main validate_value function."""
|
||||
|
||||
def test_validate_string(self):
|
||||
is_valid, error = validate_value("test", EnvVarType.STRING)
|
||||
assert is_valid is True
|
||||
|
||||
def test_validate_integer(self):
|
||||
is_valid, error = validate_value("42", EnvVarType.INTEGER)
|
||||
assert is_valid is True
|
||||
|
||||
def test_validate_boolean(self):
|
||||
is_valid, error = validate_value("true", EnvVarType.BOOLEAN)
|
||||
assert is_valid is True
|
||||
|
||||
def test_validate_list(self):
|
||||
is_valid, error = validate_value("a,b,c", EnvVarType.LIST)
|
||||
assert is_valid is True
|
||||
|
||||
def test_validate_with_pattern(self):
|
||||
is_valid, error = validate_value("ABC123", EnvVarType.STRING, r"^[A-Z]+[0-9]+$")
|
||||
assert is_valid is True
|
||||
|
||||
def test_validate_with_invalid_pattern(self):
|
||||
is_valid, error = validate_value("abc123", EnvVarType.STRING, r"^[A-Z]+[0-9]+$")
|
||||
assert is_valid is False
|
||||
156
examples/example_usage.py
Normal file
156
examples/example_usage.py
Normal file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Example usage of the API Mock CLI library.
|
||||
|
||||
This script demonstrates how to use the API Mock CLI programmatically
|
||||
to parse HAR files, generate mock servers, and run them.
|
||||
"""
|
||||
|
||||
import json
|
||||
from api_mock_cli.core.har_parser import HARParser
|
||||
from api_mock_cli.core.mock_generator import MockGenerator
|
||||
from api_mock_cli.core.server import create_mock_server_from_har
|
||||
from api_mock_cli.utils.auth_handler import AuthHandler
|
||||
|
||||
|
||||
def example_parse_har():
|
||||
print("=" * 60)
|
||||
print("Example 1: Parse HAR file")
|
||||
print("=" * 60)
|
||||
|
||||
parser = HARParser(har_file_path="examples/sample.har")
|
||||
result = parser.parse()
|
||||
|
||||
print(f"Base URL: {result.base_url}")
|
||||
print(f"Total entries: {result.entry_count}")
|
||||
print(f"Skipped entries: {result.skipped_count}")
|
||||
print(f"Valid requests: {len(result.requests)}")
|
||||
print()
|
||||
|
||||
for i, req in enumerate(result.requests[:3], 1):
|
||||
print(f"Request {i}:")
|
||||
print(f" Method: {req.method}")
|
||||
print(f" URL: {req.url}")
|
||||
print(f" Status: {req.status_code}")
|
||||
print()
|
||||
|
||||
|
||||
def example_generate_mock_server():
|
||||
print("=" * 60)
|
||||
print("Example 2: Generate Mock Server Code")
|
||||
print("=" * 60)
|
||||
|
||||
parser = HARParser(har_file_path="examples/sample.har")
|
||||
result = parser.parse()
|
||||
|
||||
generator = MockGenerator(result)
|
||||
routes = generator.get_route_summary()
|
||||
|
||||
print(f"Generated {len(routes)} routes:")
|
||||
for route in routes:
|
||||
print(f" [{route['method']}] {route['route']} -> {route['status']}")
|
||||
print()
|
||||
|
||||
code = generator.generate_app()
|
||||
print("Generated Flask app code (first 500 chars):")
|
||||
print(code[:500])
|
||||
print("...")
|
||||
print()
|
||||
|
||||
|
||||
def example_save_mock_server():
|
||||
print("=" * 60)
|
||||
print("Example 3: Save Mock Server to File")
|
||||
print("=" * 60)
|
||||
|
||||
parser = HARParser(har_file_path="examples/sample.har")
|
||||
result = parser.parse()
|
||||
|
||||
generator = MockGenerator(result)
|
||||
output_path = "examples/generated_mock_server.py"
|
||||
generator.save_mock_server(output_path)
|
||||
|
||||
print(f"Mock server saved to: {output_path}")
|
||||
print()
|
||||
|
||||
|
||||
def example_run_mock_server():
|
||||
print("=" * 60)
|
||||
print("Example 4: Run Mock Server")
|
||||
print("=" * 60)
|
||||
|
||||
parser = HARParser(har_file_path="examples/sample.har")
|
||||
result = parser.parse()
|
||||
|
||||
print("Creating mock server...")
|
||||
server = create_mock_server_from_har(result, host="localhost", port=5000)
|
||||
app = server.create_app()
|
||||
|
||||
print("Mock server created successfully!")
|
||||
print("Note: Run the server with: python examples/generated_mock_server.py")
|
||||
print()
|
||||
|
||||
|
||||
def example_auth_handler():
|
||||
print("=" * 60)
|
||||
print("Example 5: Authentication Handling")
|
||||
print("=" * 60)
|
||||
|
||||
handler = AuthHandler()
|
||||
|
||||
headers = {
|
||||
"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.test",
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
|
||||
auth_info = handler.extract_auth(headers)
|
||||
if auth_info:
|
||||
print(f"Auth Type: {auth_info.auth_type}")
|
||||
print(f"Credentials: {auth_info.credentials}")
|
||||
print(f"Header: {auth_info.header_name}: {auth_info.header_value[:20]}...")
|
||||
print()
|
||||
|
||||
|
||||
def example_data_generator():
|
||||
print("=" * 60)
|
||||
print("Example 6: Generate Fake Data")
|
||||
print("=" * 60)
|
||||
|
||||
from api_mock_cli.core.data_generator import FakeDataGenerator
|
||||
|
||||
generator = FakeDataGenerator()
|
||||
|
||||
sample_data = {
|
||||
"id": 123,
|
||||
"name": "John Doe",
|
||||
"email": "john@example.com",
|
||||
"created_at": "2024-01-01T00:00:00Z",
|
||||
"is_active": True,
|
||||
"profile": {
|
||||
"avatar": "https://example.com/avatar.jpg",
|
||||
"bio": "Sample bio text",
|
||||
},
|
||||
"tags": ["developer", "python"],
|
||||
}
|
||||
|
||||
print("Original data:")
|
||||
print(json.dumps(sample_data, indent=2))
|
||||
print()
|
||||
|
||||
generated = generator.generate_from_dict(sample_data)
|
||||
print("Generated fake data:")
|
||||
print(json.dumps(generated, indent=2))
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
example_parse_har()
|
||||
example_generate_mock_server()
|
||||
example_save_mock_server()
|
||||
example_run_mock_server()
|
||||
example_auth_handler()
|
||||
example_data_generator()
|
||||
|
||||
print("=" * 60)
|
||||
print("All examples completed!")
|
||||
print("=" * 60)
|
||||
158
examples/sample.har
Normal file
158
examples/sample.har
Normal file
@@ -0,0 +1,158 @@
|
||||
{
|
||||
"log": {
|
||||
"version": "1.2",
|
||||
"creator": {
|
||||
"name": "API Mock CLI",
|
||||
"version": "0.1.0"
|
||||
},
|
||||
"entries": [
|
||||
{
|
||||
"startedDateTime": "2024-01-15T10:30:00.000Z",
|
||||
"time": 150,
|
||||
"request": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/users/123",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "Authorization", "value": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.test"}
|
||||
],
|
||||
"queryString": [
|
||||
{"name": "include", "value": "profile"},
|
||||
{"name": "fields", "value": "name,email"}
|
||||
]
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "X-Request-Id", "value": "req_abc123"}
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "{\"id\": 123, \"name\": \"John Doe\", \"email\": \"john.doe@example.com\", \"created_at\": \"2024-01-01T00:00:00Z\", \"is_active\": true}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"startedDateTime": "2024-01-15T10:30:01.000Z",
|
||||
"time": 200,
|
||||
"request": {
|
||||
"method": "POST",
|
||||
"url": "https://api.example.com/users",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "Authorization", "value": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.test"}
|
||||
],
|
||||
"queryString": []
|
||||
},
|
||||
"response": {
|
||||
"status": 201,
|
||||
"statusText": "Created",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"}
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "{\"id\": 456, \"name\": \"Jane Smith\", \"email\": \"jane.smith@example.com\", \"created_at\": \"2024-01-15T10:30:01Z\"}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"startedDateTime": "2024-01-15T10:30:02.000Z",
|
||||
"time": 100,
|
||||
"request": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/posts",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"}
|
||||
],
|
||||
"queryString": [
|
||||
{"name": "page", "value": "1"},
|
||||
{"name": "limit", "value": "10"}
|
||||
]
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"}
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "{\"posts\": [{\"id\": 1, \"title\": \"First Post\", \"author\": \"John Doe\"}, {\"id\": 2, \"title\": \"Second Post\", \"author\": \"Jane Smith\"}], \"total\": 2}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"startedDateTime": "2024-01-15T10:30:03.000Z",
|
||||
"time": 180,
|
||||
"request": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/products/abc123-def456-ghi789",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "X-API-Key", "value": "sk_test_123456789"}
|
||||
],
|
||||
"queryString": []
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"}
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "{\"id\": \"abc123-def456-ghi789\", \"name\": \"Premium Widget\", \"price\": 29.99, \"in_stock\": true}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"startedDateTime": "2024-01-15T10:30:04.000Z",
|
||||
"time": 250,
|
||||
"request": {
|
||||
"method": "PUT",
|
||||
"url": "https://api.example.com/users/123",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "Authorization", "value": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.test"}
|
||||
],
|
||||
"queryString": []
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"}
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "{\"id\": 123, \"name\": \"John Updated\", \"email\": \"john.updated@example.com\", \"updated_at\": \"2024-01-15T10:30:04Z\"}"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"startedDateTime": "2024-01-15T10:30:05.000Z",
|
||||
"time": 100,
|
||||
"request": {
|
||||
"method": "DELETE",
|
||||
"url": "https://api.example.com/users/789",
|
||||
"headers": [
|
||||
{"name": "Authorization", "value": "Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.test"}
|
||||
],
|
||||
"queryString": []
|
||||
},
|
||||
"response": {
|
||||
"status": 204,
|
||||
"statusText": "No Content",
|
||||
"headers": [],
|
||||
"content": {
|
||||
"mimeType": "text/plain",
|
||||
"text": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
160
http_log_explorer/README.md
Normal file
160
http_log_explorer/README.md
Normal file
@@ -0,0 +1,160 @@
|
||||
# HTTP Log Explorer
|
||||
|
||||
A powerful CLI tool for parsing, exploring, and analyzing HTTP traffic logs from HAR files, curl -v output, and Chrome DevTools network exports.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multi-format parsing**: HAR files, curl -v verbose output, and Chrome DevTools network exports
|
||||
- **Interactive CLI**: Rich terminal UI with beautifully formatted tables
|
||||
- **Advanced filtering**: Filter by method, status code, URL pattern, content type
|
||||
- **Request/Response diffing**: Side-by-side comparison of HTTP pairs
|
||||
- **API analytics**: Endpoint frequency, method distribution, status code breakdown, response time statistics
|
||||
- **OpenAPI generation**: Automatically generate OpenAPI 3.0 specs from observed traffic
|
||||
- **Export capabilities**: JSON, cURL commands, Python/JavaScript/Go code snippets
|
||||
|
||||
## Installation
|
||||
|
||||
### From Source
|
||||
|
||||
```bash
|
||||
pip install -e .
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
```
|
||||
click==8.1.7
|
||||
rich==13.7.0
|
||||
haralyzer==2.0.0
|
||||
pytest==8.0.0
|
||||
openapi-spec-validator==0.7.1
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Load a HAR file and show statistics
|
||||
http-log-explorer load access.har --stats
|
||||
|
||||
# List entries with filters
|
||||
http-log-explorer list-entries --method GET --status 200
|
||||
|
||||
# Search across URLs and bodies
|
||||
http-log-explorer search "api/users"
|
||||
|
||||
# Compare two requests
|
||||
http-log-explorer diff entry-1 entry-2
|
||||
|
||||
# Export to OpenAPI spec
|
||||
http-log-explorer export-openapi api-spec.json --title "My API"
|
||||
|
||||
# Export as cURL commands
|
||||
http-log-explorer export-curl commands.sh
|
||||
|
||||
# Export as Python code
|
||||
http-log-explorer export-code client.py --language python
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `load FILE` | Load and parse an HTTP log file |
|
||||
| `list-entries` | List entries with optional filtering |
|
||||
| `search QUERY` | Search across URLs and bodies |
|
||||
| `diff ID1 ID2` | Compare two entries by ID |
|
||||
| `stats` | Show traffic statistics |
|
||||
| `filter-entries` | Filter entries and show results |
|
||||
| `export-json FILE` | Export entries to JSON |
|
||||
| `export-curl FILE` | Export as cURL commands |
|
||||
| `export-code FILE` | Export as code snippets |
|
||||
| `export-openapi FILE` | Generate OpenAPI spec |
|
||||
|
||||
## Filtering Options
|
||||
|
||||
```bash
|
||||
# Filter by HTTP method
|
||||
http-log-explorer list-entries --method GET --method POST
|
||||
|
||||
# Filter by status code
|
||||
http-log-explorer list-entries --status 200 --status 404
|
||||
|
||||
# Filter by URL pattern (regex)
|
||||
http-log-explorer list-entries --url "/api/users"
|
||||
|
||||
# Filter by content type
|
||||
http-log-explorer list-entries --content-type application/json
|
||||
```
|
||||
|
||||
## Supported Formats
|
||||
|
||||
### HAR Files (HTTP Archive)
|
||||
|
||||
Export from browser DevTools or capture with tools like Wireshark.
|
||||
|
||||
### curl -v Output
|
||||
|
||||
Paste output from `curl -v` or `curl --verbose`.
|
||||
|
||||
### Chrome DevTools Network Export
|
||||
|
||||
Export network requests from Chrome DevTools.
|
||||
|
||||
## Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
- `HTTP_LOG_DEBUG=true` - Enable verbose output
|
||||
|
||||
## Examples
|
||||
|
||||
### Analyzing API Traffic
|
||||
|
||||
```bash
|
||||
# Load traffic data
|
||||
http-log-explorer load api_traffic.har
|
||||
|
||||
# See overall statistics
|
||||
http-log-explorer stats
|
||||
|
||||
# Find all 4xx errors
|
||||
http-log-explorer list-entries --status 404 --status 400
|
||||
|
||||
# Search for specific endpoints
|
||||
http-log-explorer search "/users"
|
||||
```
|
||||
|
||||
### Generating API Documentation
|
||||
|
||||
```bash
|
||||
# Load traffic and export OpenAPI spec
|
||||
http-log-explorer load api.har
|
||||
http-log-explorer export-openapi openapi.json --title "User API" --version "2.0"
|
||||
```
|
||||
|
||||
### Exporting to Code
|
||||
|
||||
```bash
|
||||
# Export as Python requests
|
||||
http-log-explorer load api.har
|
||||
http-log-explorer export-code client.py --language python
|
||||
|
||||
# Export as JavaScript/Node.js
|
||||
http-log-explorer export-code client.js --language javascript
|
||||
|
||||
# Export as Go
|
||||
http-log-explorer export-code client.go --language go
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT License - see LICENSE file for details
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions welcome! Please ensure tests pass before submitting PRs.
|
||||
|
||||
```bash
|
||||
pytest tests/ -v
|
||||
ruff check http_log_explorer/
|
||||
```
|
||||
3
http_log_explorer/__init__.py
Normal file
3
http_log_explorer/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""HTTP Log Explorer - A CLI tool for parsing and analyzing HTTP traffic logs."""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
7
http_log_explorer/analyzers/__init__.py
Normal file
7
http_log_explorer/analyzers/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Analyzers for HTTP traffic."""
|
||||
|
||||
from http_log_explorer.analyzers.diff_engine import DiffEngine
|
||||
from http_log_explorer.analyzers.stats_generator import StatsGenerator
|
||||
from http_log_explorer.analyzers.traffic_analyzer import TrafficAnalyzer
|
||||
|
||||
__all__ = ["DiffEngine", "StatsGenerator", "TrafficAnalyzer"]
|
||||
185
http_log_explorer/analyzers/diff_engine.py
Normal file
185
http_log_explorer/analyzers/diff_engine.py
Normal file
@@ -0,0 +1,185 @@
|
||||
"""Diff engine for comparing HTTP entries."""
|
||||
|
||||
import difflib
|
||||
|
||||
from http_log_explorer.models import DiffResult, HTTPEntry
|
||||
|
||||
|
||||
class DiffEngine:
|
||||
"""Engine for comparing HTTP request/response pairs."""
|
||||
|
||||
def diff(self, entry1: HTTPEntry, entry2: HTTPEntry) -> DiffResult:
|
||||
"""Compare two HTTP entries.
|
||||
|
||||
Args:
|
||||
entry1: First HTTPEntry
|
||||
entry2: Second HTTPEntry
|
||||
|
||||
Returns:
|
||||
DiffResult with differences
|
||||
"""
|
||||
result = DiffResult(
|
||||
entry1_id=entry1.id,
|
||||
entry2_id=entry2.id,
|
||||
)
|
||||
|
||||
result.url_changed = entry1.request.url != entry2.request.url
|
||||
|
||||
result.status_changed = entry1.response.status != entry2.response.status
|
||||
result.status1 = entry1.response.status
|
||||
result.status2 = entry2.response.status
|
||||
|
||||
result.request_headers_diff = self.headers_diff(
|
||||
entry1.request.headers,
|
||||
entry2.request.headers,
|
||||
)
|
||||
|
||||
result.response_headers_diff = self.headers_diff(
|
||||
entry1.response.headers,
|
||||
entry2.response.headers,
|
||||
)
|
||||
|
||||
result.request_body_diff = self.body_diff(
|
||||
entry1.request.body,
|
||||
entry2.request.body,
|
||||
)
|
||||
|
||||
result.response_body_diff = self.body_diff(
|
||||
entry1.response.body,
|
||||
entry2.response.body,
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def headers_diff(
|
||||
self, headers1: dict[str, str], headers2: dict[str, str]
|
||||
) -> list[str]:
|
||||
"""Compare two header dictionaries.
|
||||
|
||||
Args:
|
||||
headers1: First headers dict
|
||||
headers2: Second headers dict
|
||||
|
||||
Returns:
|
||||
List of diff lines
|
||||
"""
|
||||
all_keys = set(headers1.keys()) | set(headers2.keys())
|
||||
diff_lines: list[str] = []
|
||||
|
||||
for key in sorted(all_keys):
|
||||
val1 = headers1.get(key)
|
||||
val2 = headers2.get(key)
|
||||
|
||||
if val1 != val2:
|
||||
if val1 is None:
|
||||
diff_lines.append(f"+ {key}: {val2}")
|
||||
elif val2 is None:
|
||||
diff_lines.append(f"- {key}: {val1}")
|
||||
else:
|
||||
diff_lines.append(f"- {key}: {val1}")
|
||||
diff_lines.append(f"+ {key}: {val2}")
|
||||
|
||||
return diff_lines
|
||||
|
||||
def body_diff(
|
||||
self, body1: str | None, body2: str | None
|
||||
) -> list[str]:
|
||||
"""Compare two body strings.
|
||||
|
||||
Args:
|
||||
body1: First body
|
||||
body2: Second body
|
||||
|
||||
Returns:
|
||||
List of diff lines (unified format)
|
||||
"""
|
||||
if body1 == body2:
|
||||
return []
|
||||
|
||||
b1 = body1 or ""
|
||||
b2 = body2 or ""
|
||||
|
||||
lines1 = b1.splitlines(keepends=True)
|
||||
lines2 = b2.splitlines(keepends=True)
|
||||
|
||||
if not lines1 and not lines2:
|
||||
return []
|
||||
|
||||
diff = list(difflib.unified_diff(
|
||||
lines1,
|
||||
lines2,
|
||||
fromfile="before",
|
||||
tofile="after",
|
||||
lineterm="",
|
||||
))
|
||||
|
||||
return diff
|
||||
|
||||
def unified_diff_output(self, diff_result: DiffResult) -> str:
|
||||
"""Generate a human-readable unified diff output.
|
||||
|
||||
Args:
|
||||
diff_result: The diff result
|
||||
|
||||
Returns:
|
||||
Formatted string with all differences
|
||||
"""
|
||||
lines: list[str] = []
|
||||
lines.append(f"=== Diff: {diff_result.entry1_id} vs {diff_result.entry2_id} ===")
|
||||
lines.append("")
|
||||
|
||||
if diff_result.url_changed:
|
||||
lines.append(f"URL changed: {diff_result.url_changed}")
|
||||
|
||||
if diff_result.status_changed:
|
||||
lines.append(f"Status: {diff_result.status1} -> {diff_result.status2}")
|
||||
|
||||
if diff_result.request_headers_diff:
|
||||
lines.append("")
|
||||
lines.append("--- Request Headers ---")
|
||||
lines.extend(diff_result.request_headers_diff)
|
||||
|
||||
if diff_result.request_body_diff:
|
||||
lines.append("")
|
||||
lines.append("--- Request Body ---")
|
||||
lines.extend(diff_result.request_body_diff)
|
||||
|
||||
if diff_result.response_headers_diff:
|
||||
lines.append("")
|
||||
lines.append("--- Response Headers ---")
|
||||
lines.extend(diff_result.response_headers_diff)
|
||||
|
||||
if diff_result.response_body_diff:
|
||||
lines.append("")
|
||||
lines.append("--- Response Body ---")
|
||||
lines.extend(diff_result.response_body_diff)
|
||||
|
||||
if not any([
|
||||
diff_result.url_changed,
|
||||
diff_result.status_changed,
|
||||
diff_result.request_headers_diff,
|
||||
diff_result.request_body_diff,
|
||||
diff_result.response_headers_diff,
|
||||
diff_result.response_body_diff,
|
||||
]):
|
||||
lines.append("No differences found.")
|
||||
|
||||
return "\n".join(lines)
|
||||
|
||||
def has_differences(self, diff_result: DiffResult) -> bool:
|
||||
"""Check if there are any differences.
|
||||
|
||||
Args:
|
||||
diff_result: The diff result
|
||||
|
||||
Returns:
|
||||
True if there are any differences
|
||||
"""
|
||||
return bool(
|
||||
diff_result.url_changed
|
||||
or diff_result.status_changed
|
||||
or diff_result.request_headers_diff
|
||||
or diff_result.request_body_diff
|
||||
or diff_result.response_headers_diff
|
||||
or diff_result.response_body_diff
|
||||
)
|
||||
277
http_log_explorer/analyzers/stats_generator.py
Normal file
277
http_log_explorer/analyzers/stats_generator.py
Normal file
@@ -0,0 +1,277 @@
|
||||
"""Statistics generator for HTTP traffic analytics."""
|
||||
|
||||
import re
|
||||
from collections import Counter, defaultdict
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from rich.table import Table
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
|
||||
@dataclass
|
||||
class TrafficStats:
|
||||
"""Container for traffic statistics."""
|
||||
|
||||
total_requests: int
|
||||
endpoint_count: dict[str, int]
|
||||
method_distribution: dict[str, int]
|
||||
status_breakdown: dict[int, int]
|
||||
content_type_distribution: dict[str, int]
|
||||
response_time_stats: dict[str, float]
|
||||
hosts: dict[str, int]
|
||||
|
||||
|
||||
class StatsGenerator:
|
||||
"""Generate statistics from HTTP entries."""
|
||||
|
||||
def __init__(self, entries: list[HTTPEntry]) -> None:
|
||||
"""Initialize with HTTP entries.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
"""
|
||||
self.entries = entries
|
||||
|
||||
def generate(self) -> TrafficStats:
|
||||
"""Generate all statistics.
|
||||
|
||||
Returns:
|
||||
TrafficStats object with all computed statistics
|
||||
"""
|
||||
return TrafficStats(
|
||||
total_requests=len(self.entries),
|
||||
endpoint_count=self.endpoint_count(),
|
||||
method_distribution=self.method_distribution(),
|
||||
status_breakdown=self.status_breakdown(),
|
||||
content_type_distribution=self.content_type_distribution(),
|
||||
response_time_stats=self.response_time_stats(),
|
||||
hosts=self.hosts(),
|
||||
)
|
||||
|
||||
def endpoint_count(self) -> dict[str, int]:
|
||||
"""Count requests per endpoint pattern.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping endpoint patterns to counts
|
||||
"""
|
||||
counter: Counter[str] = Counter()
|
||||
for entry in self.entries:
|
||||
endpoint = self._normalize_endpoint(entry.endpoint)
|
||||
counter[endpoint] += 1
|
||||
return dict(counter.most_common())
|
||||
|
||||
def method_distribution(self) -> dict[str, int]:
|
||||
"""Get distribution of HTTP methods.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping methods to counts
|
||||
"""
|
||||
counter = Counter(e.request.method for e in self.entries)
|
||||
return dict(counter)
|
||||
|
||||
def status_breakdown(self) -> dict[int, int]:
|
||||
"""Get breakdown of status codes.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping status codes to counts
|
||||
"""
|
||||
counter = Counter(e.response.status for e in self.entries)
|
||||
return dict(sorted(counter.items()))
|
||||
|
||||
def content_type_distribution(self) -> dict[str, int]:
|
||||
"""Get distribution of content types.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping content types to counts
|
||||
"""
|
||||
counter: Counter[str] = Counter()
|
||||
for entry in self.entries:
|
||||
ct = entry.content_type or "unknown"
|
||||
main_type = ct.split(";")[0].strip()
|
||||
counter[main_type] += 1
|
||||
return dict(counter.most_common())
|
||||
|
||||
def response_time_stats(self) -> dict[str, float]:
|
||||
"""Calculate response time statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with min, max, avg, median response times in ms
|
||||
"""
|
||||
times = [e.duration_ms for e in self.entries if e.duration_ms is not None]
|
||||
if not times:
|
||||
return {"min": 0.0, "max": 0.0, "avg": 0.0, "median": 0.0, "p95": 0.0, "p99": 0.0}
|
||||
|
||||
sorted_times = sorted(times)
|
||||
n = len(sorted_times)
|
||||
|
||||
stats = {
|
||||
"min": float(sorted_times[0]),
|
||||
"max": float(sorted_times[-1]),
|
||||
"avg": float(sum(times) / n),
|
||||
"median": float(sorted_times[n // 2]),
|
||||
}
|
||||
|
||||
p95_idx = int(n * 0.95)
|
||||
p99_idx = int(n * 0.99)
|
||||
stats["p95"] = float(sorted_times[min(p95_idx, n - 1)])
|
||||
stats["p99"] = float(sorted_times[min(p99_idx, n - 1)])
|
||||
|
||||
return stats
|
||||
|
||||
def hosts(self) -> dict[str, int]:
|
||||
"""Get request count per host.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping hosts to counts
|
||||
"""
|
||||
counter = Counter(e.host for e in self.entries)
|
||||
return dict(counter.most_common())
|
||||
|
||||
def status_code_categories(self) -> dict[str, int]:
|
||||
"""Get counts by status code category.
|
||||
|
||||
Returns:
|
||||
Dictionary with 1xx, 2xx, 3xx, 4xx, 5xx counts
|
||||
"""
|
||||
categories: dict[str, int] = {
|
||||
"1xx informational": 0,
|
||||
"2xx success": 0,
|
||||
"3xx redirection": 0,
|
||||
"4xx client error": 0,
|
||||
"5xx server error": 0,
|
||||
}
|
||||
|
||||
for entry in self.entries:
|
||||
status = entry.response.status
|
||||
if 100 <= status < 200:
|
||||
categories["1xx informational"] += 1
|
||||
elif 200 <= status < 300:
|
||||
categories["2xx success"] += 1
|
||||
elif 300 <= status < 400:
|
||||
categories["3xx redirection"] += 1
|
||||
elif 400 <= status < 500:
|
||||
categories["4xx client error"] += 1
|
||||
elif 500 <= status < 600:
|
||||
categories["5xx server error"] += 1
|
||||
|
||||
return categories
|
||||
|
||||
def endpoint_patterns(self) -> dict[str, int]:
|
||||
"""Extract common endpoint patterns with path parameters.
|
||||
|
||||
Returns:
|
||||
Dictionary mapping patterns to counts
|
||||
"""
|
||||
patterns: dict[str, int] = defaultdict(int)
|
||||
|
||||
for entry in self.entries:
|
||||
pattern = self._extract_pattern(entry.endpoint)
|
||||
patterns[pattern] += 1
|
||||
|
||||
return dict(sorted(patterns.items(), key=lambda x: x[1], reverse=True))
|
||||
|
||||
def _normalize_endpoint(self, endpoint: str) -> str:
|
||||
"""Normalize endpoint by removing IDs and versions."""
|
||||
cleaned = re.sub(r"/\d+", "/{id}", endpoint)
|
||||
cleaned = re.sub(r"/[a-f0-9-]{36}", "/{uuid}", cleaned)
|
||||
cleaned = re.sub(r"/v\d+(?:\.\d+)?", "", cleaned)
|
||||
return cleaned
|
||||
|
||||
def _extract_pattern(self, endpoint: str) -> str:
|
||||
"""Extract endpoint pattern with parameter placeholders."""
|
||||
parts = endpoint.split("/")
|
||||
normalized_parts = []
|
||||
|
||||
for part in parts:
|
||||
if not part:
|
||||
normalized_parts.append("")
|
||||
elif part.isdigit():
|
||||
normalized_parts.append("{id}")
|
||||
elif self._is_uuid(part):
|
||||
normalized_parts.append("{uuid}")
|
||||
elif self._is_hash(part):
|
||||
normalized_parts.append("{hash}")
|
||||
else:
|
||||
normalized_parts.append(part)
|
||||
|
||||
return "/".join(normalized_parts)
|
||||
|
||||
def _is_uuid(self, s: str) -> bool:
|
||||
"""Check if string looks like a UUID."""
|
||||
uuid_pattern = re.compile(
|
||||
r"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
return bool(uuid_pattern.match(s))
|
||||
|
||||
def _is_hash(self, s: str) -> bool:
|
||||
"""Check if string looks like a hash."""
|
||||
hash_pattern = re.compile(r"^[a-f0-9]{32,}$", re.IGNORECASE)
|
||||
return bool(hash_pattern.match(s))
|
||||
|
||||
def render_table(self, stats: TrafficStats | None = None) -> Table:
|
||||
"""Render statistics as a Rich table.
|
||||
|
||||
Args:
|
||||
stats: Pre-generated stats, or None to generate new
|
||||
|
||||
Returns:
|
||||
Rich Table object
|
||||
"""
|
||||
if stats is None:
|
||||
stats = self.generate()
|
||||
|
||||
table = Table(title="Traffic Statistics")
|
||||
|
||||
table.add_column("Metric", style="cyan")
|
||||
table.add_column("Value", style="green")
|
||||
|
||||
table.add_row("Total Requests", str(stats.total_requests))
|
||||
|
||||
method_rows = [f"{m}: {c}" for m, c in sorted(stats.method_distribution.items())]
|
||||
table.add_row("Methods", ", ".join(method_rows) if method_rows else "N/A")
|
||||
|
||||
status_rows = [f"{s}: {c}" for s, c in sorted(stats.status_breakdown.items())]
|
||||
table.add_row("Status Codes", ", ".join(status_rows) if status_rows else "N/A")
|
||||
|
||||
rt = stats.response_time_stats
|
||||
if rt["avg"] > 0:
|
||||
table.add_row(
|
||||
"Response Time (avg)",
|
||||
f"{rt['avg']:.2f}ms",
|
||||
)
|
||||
table.add_row(
|
||||
"Response Time (p95)",
|
||||
f"{rt['p95']:.2f}ms",
|
||||
)
|
||||
|
||||
top_endpoints = list(stats.endpoint_count.items())[:5]
|
||||
endpoint_rows = [f"{e}: {c}" for e, c in top_endpoints]
|
||||
table.add_row("Top Endpoints", ", ".join(endpoint_rows) if endpoint_rows else "N/A")
|
||||
|
||||
return table
|
||||
|
||||
def to_dict(self, stats: TrafficStats | None = None) -> dict[str, Any]:
|
||||
"""Convert stats to dictionary.
|
||||
|
||||
Args:
|
||||
stats: Pre-generated stats, or None to generate new
|
||||
|
||||
Returns:
|
||||
Dictionary representation of stats
|
||||
"""
|
||||
if stats is None:
|
||||
stats = self.generate()
|
||||
|
||||
return {
|
||||
"total_requests": stats.total_requests,
|
||||
"endpoint_count": stats.endpoint_count,
|
||||
"method_distribution": stats.method_distribution,
|
||||
"status_breakdown": stats.status_breakdown,
|
||||
"content_type_distribution": stats.content_type_distribution,
|
||||
"response_time_stats": stats.response_time_stats,
|
||||
"hosts": stats.hosts,
|
||||
"status_code_categories": self.status_code_categories(),
|
||||
}
|
||||
196
http_log_explorer/analyzers/traffic_analyzer.py
Normal file
196
http_log_explorer/analyzers/traffic_analyzer.py
Normal file
@@ -0,0 +1,196 @@
|
||||
"""Traffic analyzer for filtering HTTP entries."""
|
||||
|
||||
import re
|
||||
from collections.abc import Callable
|
||||
|
||||
from http_log_explorer.models import FilterCriteria, HTTPEntry
|
||||
|
||||
|
||||
class TrafficAnalyzer:
|
||||
"""Analyzer for filtering and searching HTTP entries."""
|
||||
|
||||
def __init__(self, entries: list[HTTPEntry]) -> None:
|
||||
"""Initialize with HTTP entries.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects to analyze
|
||||
"""
|
||||
self.entries = entries
|
||||
|
||||
def filter(self, criteria: FilterCriteria) -> list[HTTPEntry]:
|
||||
"""Filter entries based on criteria.
|
||||
|
||||
Args:
|
||||
criteria: FilterCriteria object with filtering rules
|
||||
|
||||
Returns:
|
||||
Filtered list of HTTPEntry objects
|
||||
"""
|
||||
predicates: list[Callable[[HTTPEntry], bool]] = []
|
||||
|
||||
if criteria.methods:
|
||||
predicates.append(lambda e: e.request.method in criteria.methods)
|
||||
|
||||
if criteria.status_codes:
|
||||
predicates.append(lambda e: e.response.status in criteria.status_codes)
|
||||
|
||||
if criteria.url_pattern:
|
||||
pattern = re.compile(criteria.url_pattern)
|
||||
predicates.append(lambda e: bool(pattern.search(e.request.url)))
|
||||
|
||||
if criteria.content_types:
|
||||
predicates.append(lambda e: bool(e.content_type and any(ct in e.content_type for ct in criteria.content_types)))
|
||||
|
||||
if criteria.start_time:
|
||||
predicates.append(lambda e: bool(e.timestamp and e.timestamp >= criteria.start_time))
|
||||
|
||||
if criteria.end_time:
|
||||
predicates.append(lambda e: bool(e.timestamp and e.timestamp <= criteria.end_time))
|
||||
|
||||
if criteria.min_response_time_ms is not None:
|
||||
predicates.append(lambda e: bool(e.duration_ms and e.duration_ms >= criteria.min_response_time_ms))
|
||||
|
||||
if criteria.max_response_time_ms is not None:
|
||||
predicates.append(lambda e: bool(e.duration_ms and e.duration_ms <= criteria.max_response_time_ms))
|
||||
|
||||
if criteria.request_body_contains:
|
||||
predicates.append(
|
||||
lambda e: bool(e.request.body and criteria.request_body_contains in e.request.body)
|
||||
)
|
||||
|
||||
if criteria.response_body_contains:
|
||||
predicates.append(
|
||||
lambda e: bool(e.response.body and criteria.response_body_contains in e.response.body)
|
||||
)
|
||||
|
||||
if not predicates:
|
||||
return list(self.entries)
|
||||
|
||||
return [entry for entry in self.entries if all(pred(entry) for pred in predicates)]
|
||||
|
||||
def by_method(self, methods: list[str]) -> list[HTTPEntry]:
|
||||
"""Filter by HTTP methods.
|
||||
|
||||
Args:
|
||||
methods: List of methods (GET, POST, PUT, DELETE, etc.)
|
||||
|
||||
Returns:
|
||||
Filtered entries
|
||||
"""
|
||||
criteria = FilterCriteria(methods=methods)
|
||||
return self.filter(criteria)
|
||||
|
||||
def by_status(self, status_codes: list[int]) -> list[HTTPEntry]:
|
||||
"""Filter by status codes.
|
||||
|
||||
Args:
|
||||
status_codes: List of status codes to include
|
||||
|
||||
Returns:
|
||||
Filtered entries
|
||||
"""
|
||||
criteria = FilterCriteria(status_codes=status_codes)
|
||||
return self.filter(criteria)
|
||||
|
||||
def by_url(self, url_pattern: str) -> list[HTTPEntry]:
|
||||
"""Filter by URL pattern.
|
||||
|
||||
Args:
|
||||
url_pattern: Regular expression pattern to match URLs
|
||||
|
||||
Returns:
|
||||
Filtered entries
|
||||
"""
|
||||
criteria = FilterCriteria(url_pattern=url_pattern)
|
||||
return self.filter(criteria)
|
||||
|
||||
def by_content_type(self, content_types: list[str]) -> list[HTTPEntry]:
|
||||
"""Filter by content types.
|
||||
|
||||
Args:
|
||||
content_types: List of content type substrings to match
|
||||
|
||||
Returns:
|
||||
Filtered entries
|
||||
"""
|
||||
criteria = FilterCriteria(content_types=content_types)
|
||||
return self.filter(criteria)
|
||||
|
||||
def by_status_range(self, min_status: int, max_status: int) -> list[HTTPEntry]:
|
||||
"""Filter by status code range.
|
||||
|
||||
Args:
|
||||
min_status: Minimum status code (inclusive)
|
||||
max_status: Maximum status code (inclusive)
|
||||
|
||||
Returns:
|
||||
Filtered entries
|
||||
"""
|
||||
all_in_range = list(range(min_status, max_status + 1))
|
||||
return self.by_status(all_in_range)
|
||||
|
||||
def successful_requests(self) -> list[HTTPEntry]:
|
||||
"""Get all 2xx responses.
|
||||
|
||||
Returns:
|
||||
Entries with 2xx status codes
|
||||
"""
|
||||
return self.by_status_range(200, 299)
|
||||
|
||||
def client_errors(self) -> list[HTTPEntry]:
|
||||
"""Get all 4xx responses.
|
||||
|
||||
Returns:
|
||||
Entries with 4xx status codes
|
||||
"""
|
||||
return self.by_status_range(400, 499)
|
||||
|
||||
def server_errors(self) -> list[HTTPEntry]:
|
||||
"""Get all 5xx responses.
|
||||
|
||||
Returns:
|
||||
Entries with 5xx status codes
|
||||
"""
|
||||
return self.by_status_range(500, 599)
|
||||
|
||||
def search(self, query: str, case_sensitive: bool = False) -> list[HTTPEntry]:
|
||||
"""Search across URL, request body, and response body.
|
||||
|
||||
Args:
|
||||
query: Search string
|
||||
case_sensitive: Whether search should be case sensitive
|
||||
|
||||
Returns:
|
||||
Entries matching the query
|
||||
"""
|
||||
search_query = query if case_sensitive else query.lower()
|
||||
|
||||
def matches(entry: HTTPEntry) -> bool:
|
||||
url = entry.request.url if case_sensitive else entry.request.url.lower()
|
||||
if search_query in url:
|
||||
return True
|
||||
if entry.request.body:
|
||||
body = entry.request.body if case_sensitive else entry.request.body.lower()
|
||||
if search_query in body:
|
||||
return True
|
||||
if entry.response.body:
|
||||
body = entry.response.body if case_sensitive else entry.response.body.lower()
|
||||
if search_query in body:
|
||||
return True
|
||||
return False
|
||||
|
||||
return [e for e in self.entries if matches(e)]
|
||||
|
||||
def get_entry_by_id(self, entry_id: str) -> HTTPEntry | None:
|
||||
"""Get a specific entry by its ID.
|
||||
|
||||
Args:
|
||||
entry_id: The entry ID to find
|
||||
|
||||
Returns:
|
||||
The HTTPEntry or None if not found
|
||||
"""
|
||||
for entry in self.entries:
|
||||
if entry.id == entry_id:
|
||||
return entry
|
||||
return None
|
||||
3
http_log_explorer/cli/__init__.py
Normal file
3
http_log_explorer/cli/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
"""CLI interface for HTTP Log Explorer."""
|
||||
|
||||
__version__ = "0.1.0"
|
||||
339
http_log_explorer/cli/commands.py
Normal file
339
http_log_explorer/cli/commands.py
Normal file
@@ -0,0 +1,339 @@
|
||||
"""CLI commands for HTTP Log Explorer."""
|
||||
|
||||
import sys
|
||||
|
||||
import click
|
||||
from rich.console import Console
|
||||
|
||||
from http_log_explorer.analyzers import DiffEngine, StatsGenerator, TrafficAnalyzer
|
||||
from http_log_explorer.cli.formatter import Formatter
|
||||
from http_log_explorer.exporters import CodeExporter, CurlExporter, JSONExporter
|
||||
from http_log_explorer.generators import OpenAPIGenerator
|
||||
from http_log_explorer.models import FilterCriteria, HTTPEntry
|
||||
from http_log_explorer.parsers import get_parser
|
||||
|
||||
console = Console()
|
||||
formatter = Formatter()
|
||||
|
||||
_entries_store: list[HTTPEntry] = []
|
||||
|
||||
|
||||
def reset_entries() -> None:
|
||||
"""Reset the global entries store. Used for testing."""
|
||||
global _entries_store
|
||||
_entries_store = []
|
||||
|
||||
|
||||
@click.group()
|
||||
@click.version_option(version="0.1.0")
|
||||
def cli() -> None:
|
||||
"""HTTP Log Explorer - Parse, analyze, and explore HTTP traffic logs."""
|
||||
pass
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("file", type=click.Path(exists=True))
|
||||
@click.option("--stats", is_flag=True, help="Show statistics after loading")
|
||||
def load(file: str, stats: bool) -> None:
|
||||
"""Load and parse an HTTP log file.
|
||||
|
||||
Supports HAR files, curl -v output, and Chrome DevTools exports.
|
||||
"""
|
||||
global _entries_store
|
||||
|
||||
try:
|
||||
with open(file, encoding="utf-8", errors="replace") as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error reading file: {e}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
if not content.strip():
|
||||
console.print("[red]Error: File is empty[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
parser = get_parser(content)
|
||||
console.print(f"[green]Using parser: {parser.get_parser_name()}[/green]")
|
||||
entries = parser.parse(content, source_file=file)
|
||||
except ValueError as e:
|
||||
console.print(f"[red]Parse error: {e}[/red]")
|
||||
console.print("[yellow]Supported formats:[/yellow]")
|
||||
console.print(" - HAR files (HTTP Archive format)")
|
||||
console.print(" - curl -v output")
|
||||
console.print(" - Chrome DevTools network exports")
|
||||
sys.exit(1)
|
||||
|
||||
_entries_store = entries
|
||||
console.print(f"[green]Loaded {len(entries)} entries[/green]")
|
||||
|
||||
if stats and entries:
|
||||
_show_stats(entries)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--limit", type=int, default=50, help="Limit number of entries shown")
|
||||
@click.option("--method", multiple=True, help="Filter by method (e.g., GET, POST)")
|
||||
@click.option("--status", multiple=True, type=int, help="Filter by status code")
|
||||
@click.option("--url", help="Filter by URL pattern (regex)")
|
||||
@click.option("--content-type", multiple=True, help="Filter by content type")
|
||||
def list_entries(
|
||||
limit: int,
|
||||
method: tuple[str, ...],
|
||||
status: tuple[int, ...],
|
||||
url: str | None,
|
||||
content_type: tuple[str, ...],
|
||||
) -> None:
|
||||
"""List loaded HTTP entries with optional filtering."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
entries = list(_entries_store)
|
||||
|
||||
criteria = FilterCriteria(
|
||||
methods=list(method) if method else None,
|
||||
status_codes=list(status) if status else None,
|
||||
url_pattern=url,
|
||||
content_types=list(content_type) if content_type else None,
|
||||
)
|
||||
|
||||
analyzer = TrafficAnalyzer(entries)
|
||||
filtered = analyzer.filter(criteria)
|
||||
|
||||
table = formatter.format_entry_table(filtered, limit=limit)
|
||||
console.print(table)
|
||||
console.print(f"\n[dim]Showing {min(limit, len(filtered))} of {len(filtered)} entries[/dim]")
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("query")
|
||||
@click.option("--case-sensitive", is_flag=True, help="Case sensitive search")
|
||||
def search(query: str, case_sensitive: bool) -> None:
|
||||
"""Search across URLs and bodies."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
analyzer = TrafficAnalyzer(_entries_store)
|
||||
results = analyzer.search(query, case_sensitive=case_sensitive)
|
||||
|
||||
table = formatter.format_entry_table(results, limit=50)
|
||||
console.print(table)
|
||||
console.print(f"\n[dim]Found {len(results)} matching entries[/dim]")
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument("entry_id1")
|
||||
@click.argument("entry_id2")
|
||||
def diff(entry_id1: str, entry_id2: str) -> None:
|
||||
"""Compare two HTTP entries by ID."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
analyzer = TrafficAnalyzer(_entries_store)
|
||||
entry1 = analyzer.get_entry_by_id(entry_id1)
|
||||
entry2 = analyzer.get_entry_by_id(entry_id2)
|
||||
|
||||
if not entry1:
|
||||
console.print(f"[red]Entry not found: {entry_id1}[/red]")
|
||||
return
|
||||
if not entry2:
|
||||
console.print(f"[red]Entry not found: {entry_id2}[/red]")
|
||||
return
|
||||
|
||||
engine = DiffEngine()
|
||||
diff_result = engine.diff(entry1, entry2)
|
||||
diff_output = engine.unified_diff_output(diff_result)
|
||||
|
||||
console.print(diff_output)
|
||||
|
||||
|
||||
@cli.command()
|
||||
def stats() -> None:
|
||||
"""Show statistics for loaded entries."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
_show_stats(_entries_store)
|
||||
|
||||
|
||||
def _show_stats(entries: list[HTTPEntry]) -> None:
|
||||
"""Show statistics for entries."""
|
||||
generator = StatsGenerator(entries)
|
||||
stats_data = generator.to_dict()
|
||||
|
||||
console.print("\n[bold cyan]Traffic Statistics[/bold cyan]")
|
||||
console.print(f"Total Requests: {stats_data['total_requests']}")
|
||||
|
||||
console.print("\n[bold]Method Distribution[/bold]")
|
||||
for method, count in sorted(stats_data["method_distribution"].items()):
|
||||
console.print(f" {method}: {count}")
|
||||
|
||||
console.print("\n[bold]Status Code Breakdown[/bold]")
|
||||
for status, count in sorted(stats_data["status_breakdown"].items()):
|
||||
console.print(f" {status}: {count}")
|
||||
|
||||
console.print("\n[bold]Top Endpoints[/bold]")
|
||||
for endpoint, count in list(stats_data["endpoint_count"].items())[:10]:
|
||||
console.print(f" {endpoint}: {count}")
|
||||
|
||||
rt = stats_data.get("response_time_stats", {})
|
||||
if rt.get("avg", 0) > 0:
|
||||
console.print("\n[bold]Response Times[/bold]")
|
||||
console.print(f" Min: {rt.get('min', 0):.2f}ms")
|
||||
console.print(f" Max: {rt.get('max', 0):.2f}ms")
|
||||
console.print(f" Avg: {rt.get('avg', 0):.2f}ms")
|
||||
console.print(f" Median: {rt.get('median', 0):.2f}ms")
|
||||
console.print(f" P95: {rt.get('p95', 0):.2f}ms")
|
||||
console.print(f" P99: {rt.get('p99', 0):.2f}ms")
|
||||
|
||||
|
||||
@cli.command("export-json")
|
||||
@click.argument("output", type=click.Path())
|
||||
@click.option("--compact", is_flag=True, help="Export compact JSON")
|
||||
@click.option("--summary", is_flag=True, help="Export summary only")
|
||||
def export_json(output: str, compact: bool, summary: bool) -> None:
|
||||
"""Export entries to JSON file."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
exporter = JSONExporter()
|
||||
|
||||
try:
|
||||
if summary:
|
||||
content = exporter.export_summary(_entries_store)
|
||||
elif compact:
|
||||
content = exporter.export_compact(_entries_store)
|
||||
else:
|
||||
content = exporter.export(_entries_store)
|
||||
|
||||
with open(output, "w") as f:
|
||||
f.write(content)
|
||||
|
||||
console.print(f"[green]Exported to {output}[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Export error: {e}[/red]")
|
||||
|
||||
|
||||
@cli.command("export-curl")
|
||||
@click.argument("output", type=click.Path())
|
||||
def export_curl(output: str) -> None:
|
||||
"""Export entries as cURL commands."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
exporter = CurlExporter()
|
||||
|
||||
try:
|
||||
exporter.to_file(_entries_store, output)
|
||||
console.print(f"[green]Exported to {output}[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Export error: {e}[/red]")
|
||||
|
||||
|
||||
@cli.command("export-code")
|
||||
@click.argument("output", type=click.Path())
|
||||
@click.option(
|
||||
"--language",
|
||||
type=click.Choice(["python", "javascript", "go"]),
|
||||
default="python",
|
||||
help="Target language",
|
||||
)
|
||||
def export_code(output: str, language: str) -> None:
|
||||
"""Export entries as code snippets."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
exporter = CodeExporter()
|
||||
|
||||
try:
|
||||
exporter.to_file(_entries_store, output, language)
|
||||
console.print(f"[green]Exported {len(_entries_store)} snippets to {output}[/green]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Export error: {e}[/red]")
|
||||
|
||||
|
||||
@cli.command("export-openapi")
|
||||
@click.argument("output", type=click.Path())
|
||||
@click.option("--title", default="API", help="API title")
|
||||
@click.option("--version", default="1.0.0", help="API version")
|
||||
@click.option("--no-validate", is_flag=True, help="Skip validation")
|
||||
def export_openapi(
|
||||
output: str, title: str, version: str, no_validate: bool
|
||||
) -> None:
|
||||
"""Generate OpenAPI spec from traffic."""
|
||||
global _entries_store
|
||||
|
||||
if not _entries_store:
|
||||
console.print("[yellow]No entries loaded. Use 'load' command first.[/yellow]")
|
||||
return
|
||||
|
||||
generator = OpenAPIGenerator(_entries_store)
|
||||
|
||||
try:
|
||||
spec = generator.generate(
|
||||
title=title,
|
||||
version=version,
|
||||
validate_spec=not no_validate,
|
||||
)
|
||||
|
||||
with open(output, "w") as f:
|
||||
f.write(generator.to_json(spec))
|
||||
|
||||
console.print(f"[green]OpenAPI spec exported to {output}[/green]")
|
||||
except ValueError as e:
|
||||
console.print(f"[red]Validation error: {e}[/red]")
|
||||
except Exception as e:
|
||||
console.print(f"[red]Export error: {e}[/red]")
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.option("--method", multiple=True, help="Filter by method")
|
||||
@click.option("--status", multiple=True, type=int, help="Filter by status code")
|
||||
@click.option("--url", help="Filter by URL pattern")
|
||||
@click.option("--content-type", multiple=True, help="Filter by content type")
|
||||
def filter_entries(
|
||||
method: tuple[str, ...],
|
||||
status: tuple[int, ...],
|
||||
url: str | None,
|
||||
content_type: tuple[str, ...],
|
||||
) -> None:
|
||||
"""Filter entries and show results (alias for list with filters)."""
|
||||
ctx = click.get_current_context()
|
||||
ctx.invoke(
|
||||
list_entries,
|
||||
limit=50,
|
||||
method=method,
|
||||
status=status,
|
||||
url=url,
|
||||
content_type=content_type,
|
||||
)
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""Main entry point."""
|
||||
cli()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
153
http_log_explorer/cli/formatter.py
Normal file
153
http_log_explorer/cli/formatter.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""Rich table formatter for HTTP entries."""
|
||||
|
||||
from typing import Any
|
||||
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
|
||||
class Formatter:
|
||||
"""Format HTTP entries for terminal display."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize formatter."""
|
||||
self.console = Console()
|
||||
|
||||
def format_entry_table(
|
||||
self,
|
||||
entries: list[HTTPEntry],
|
||||
show_headers: bool = True,
|
||||
show_body: bool = False,
|
||||
limit: int | None = None,
|
||||
) -> Table:
|
||||
"""Create a table of HTTP entries.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
show_headers: Whether to show request/response headers
|
||||
show_body: Whether to show request/response body
|
||||
limit: Maximum number of entries to show
|
||||
|
||||
Returns:
|
||||
Rich Table object
|
||||
"""
|
||||
table = Table(title=f"HTTP Entries ({len(entries)} total)")
|
||||
|
||||
table.add_column("ID", style="cyan", no_wrap=True)
|
||||
table.add_column("Method", style="magenta", no_wrap=True)
|
||||
table.add_column("URL", style="blue")
|
||||
table.add_column("Status", justify="center", no_wrap=True)
|
||||
table.add_column("Time", style="dim", no_wrap=True)
|
||||
table.add_column("Duration", justify="right", no_wrap=True)
|
||||
|
||||
if show_headers:
|
||||
table.add_column("Req Headers", style="dim")
|
||||
table.add_column("Resp Headers", style="dim")
|
||||
|
||||
if show_body:
|
||||
table.add_column("Req Body", style="dim")
|
||||
table.add_column("Resp Body", style="dim")
|
||||
|
||||
display_entries = entries[:limit] if limit else entries
|
||||
|
||||
for entry in display_entries:
|
||||
row: list[Any] = [
|
||||
entry.id,
|
||||
entry.request.method,
|
||||
self._truncate_url(entry.request.url),
|
||||
self._format_status(entry.response.status),
|
||||
self._format_timestamp(entry.timestamp),
|
||||
self._format_duration(entry.duration_ms),
|
||||
]
|
||||
|
||||
if show_headers:
|
||||
row.append(self._format_headers(entry.request.headers))
|
||||
row.append(self._format_headers(entry.response.headers))
|
||||
|
||||
if show_body:
|
||||
row.append(self._truncate_body(entry.request.body))
|
||||
row.append(self._truncate_body(entry.response.body))
|
||||
|
||||
table.add_row(*row)
|
||||
|
||||
return table
|
||||
|
||||
def _truncate_url(self, url: str, max_length: int = 60) -> str:
|
||||
"""Truncate URL for display."""
|
||||
if len(url) <= max_length:
|
||||
return url
|
||||
return url[: max_length - 3] + "..."
|
||||
|
||||
def _format_status(self, status: int) -> Text:
|
||||
"""Format status code with color."""
|
||||
if 200 <= status < 300:
|
||||
return Text(str(status), style="green")
|
||||
elif 300 <= status < 400:
|
||||
return Text(str(status), style="blue")
|
||||
elif 400 <= status < 500:
|
||||
return Text(str(status), style="yellow")
|
||||
elif 500 <= status < 600:
|
||||
return Text(str(status), style="red")
|
||||
return Text(str(status))
|
||||
|
||||
def _format_timestamp(self, timestamp: Any) -> str:
|
||||
"""Format timestamp for display."""
|
||||
if timestamp is None:
|
||||
return "-"
|
||||
if hasattr(timestamp, "strftime"):
|
||||
return timestamp.strftime("%H:%M:%S")
|
||||
return str(timestamp)
|
||||
|
||||
def _format_duration(self, duration_ms: float | None) -> str:
|
||||
"""Format duration for display."""
|
||||
if duration_ms is None:
|
||||
return "-"
|
||||
if duration_ms < 1000:
|
||||
return f"{duration_ms:.0f}ms"
|
||||
return f"{duration_ms / 1000:.2f}s"
|
||||
|
||||
def _format_headers(self, headers: dict[str, str]) -> str:
|
||||
"""Format headers for display."""
|
||||
if not headers:
|
||||
return "-"
|
||||
count = len(headers)
|
||||
return f"{count} headers"
|
||||
|
||||
def _truncate_body(self, body: str | None, max_length: int = 50) -> str:
|
||||
"""Truncate body for display."""
|
||||
if body is None:
|
||||
return "-"
|
||||
body = body.strip()
|
||||
if not body:
|
||||
return "-"
|
||||
if len(body) <= max_length:
|
||||
return body
|
||||
return body[: max_length - 3] + "..."
|
||||
|
||||
def format_diff(self, diff_output: str) -> Table:
|
||||
"""Format diff output as table.
|
||||
|
||||
Args:
|
||||
diff_output: Diff output string
|
||||
|
||||
Returns:
|
||||
Rich Table object
|
||||
"""
|
||||
table = Table(title="Diff Comparison")
|
||||
table.add_column("Before/After", style="cyan", no_wrap=True)
|
||||
table.add_column("Change", style="white")
|
||||
|
||||
for line in diff_output.split("\n"):
|
||||
if line.startswith("-"):
|
||||
table.add_row("-", Text(line, style="red"))
|
||||
elif line.startswith("+"):
|
||||
table.add_row("+", Text(line, style="green"))
|
||||
elif line.startswith("---"):
|
||||
table.add_row("", Text(line, style="dim"))
|
||||
else:
|
||||
table.add_row("", line)
|
||||
|
||||
return table
|
||||
7
http_log_explorer/exporters/__init__.py
Normal file
7
http_log_explorer/exporters/__init__.py
Normal file
@@ -0,0 +1,7 @@
|
||||
"""Exporters for various formats."""
|
||||
|
||||
from http_log_explorer.exporters.code_exporter import CodeExporter
|
||||
from http_log_explorer.exporters.curl_exporter import CurlExporter
|
||||
from http_log_explorer.exporters.json_exporter import JSONExporter
|
||||
|
||||
__all__ = ["CodeExporter", "CurlExporter", "JSONExporter"]
|
||||
263
http_log_explorer/exporters/code_exporter.py
Normal file
263
http_log_explorer/exporters/code_exporter.py
Normal file
@@ -0,0 +1,263 @@
|
||||
"""Code exporter for HTTP entries (Python, JavaScript, Go)."""
|
||||
|
||||
import json
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
|
||||
class CodeExporter:
|
||||
"""Export HTTP entries as code snippets in various languages."""
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize code exporter."""
|
||||
self._template_dir = ""
|
||||
|
||||
PYTHON_TEMPLATE = '''import requests
|
||||
|
||||
headers = {headers}
|
||||
{data}
|
||||
response = requests.{method}(
|
||||
"{url}"{params}
|
||||
{headers_param})
|
||||
{body}
|
||||
print(response.status_code)
|
||||
print(response.json())
|
||||
'''
|
||||
|
||||
JAVASCRIPT_TEMPLATE = '''const axios = require('axios');
|
||||
|
||||
const config = {{
|
||||
method: '{method}',
|
||||
url: '{url}',
|
||||
{headers_js}
|
||||
{data_js}
|
||||
{body_js}
|
||||
}};
|
||||
|
||||
axios(config)
|
||||
.then(response => {{
|
||||
console.log(response.status);
|
||||
console.log(response.data);
|
||||
}})
|
||||
.catch(error => {{
|
||||
console.error(error);
|
||||
}});
|
||||
'''
|
||||
|
||||
GO_TEMPLATE = '''package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func main() {{
|
||||
{headers_go}
|
||||
{data_go}
|
||||
body{data_var} := {body_val}
|
||||
{body_prep}
|
||||
req, err := http.NewRequest("{method}", "{url}", {body_ref})
|
||||
if err != nil {{
|
||||
panic(err)
|
||||
}}
|
||||
{set_headers}
|
||||
client := &http.Client{{}}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {{
|
||||
panic(err)
|
||||
}}
|
||||
defer resp.Body.Close()
|
||||
|
||||
fmt.Println("Status:", resp.Status)
|
||||
}}
|
||||
|
||||
'''
|
||||
|
||||
def export_python(self, entry: HTTPEntry) -> str:
|
||||
"""Export entry as Python code.
|
||||
|
||||
Args:
|
||||
entry: HTTPEntry object
|
||||
|
||||
Returns:
|
||||
Python code string
|
||||
"""
|
||||
headers_str = self._format_python_dict(entry.request.headers)
|
||||
|
||||
data_line = ""
|
||||
body_line = ""
|
||||
params_line = ""
|
||||
|
||||
if entry.request.query_params:
|
||||
params_line = f", params={self._format_python_dict(entry.request.query_params)}"
|
||||
|
||||
if entry.request.body:
|
||||
body_line = "data=data,"
|
||||
|
||||
if entry.request.headers:
|
||||
headers_param = "\n headers=headers"
|
||||
else:
|
||||
headers_param = ""
|
||||
|
||||
method_lower = entry.request.method.lower()
|
||||
|
||||
return self.PYTHON_TEMPLATE.format(
|
||||
method=method_lower,
|
||||
url=entry.request.url,
|
||||
headers=headers_str,
|
||||
params=params_line,
|
||||
headers_param=headers_param,
|
||||
data=data_line,
|
||||
body=body_line,
|
||||
)
|
||||
|
||||
def export_javascript(self, entry: HTTPEntry) -> str:
|
||||
"""Export entry as JavaScript code.
|
||||
|
||||
Args:
|
||||
entry: HTTPEntry object
|
||||
|
||||
Returns:
|
||||
JavaScript code string
|
||||
"""
|
||||
headers_lines = []
|
||||
for name, value in entry.request.headers.items():
|
||||
headers_lines.append(f' "{name}": "{value}",')
|
||||
|
||||
headers_js = "\n".join(headers_lines)
|
||||
if headers_js:
|
||||
headers_js = "headers: {\n" + headers_js + "\n},"
|
||||
|
||||
data_js = ""
|
||||
data_val = "{}"
|
||||
body_js = ""
|
||||
|
||||
if entry.request.body:
|
||||
data_val = json.dumps(entry.request.body)
|
||||
data_js = f"const data = {data_val};"
|
||||
body_js = "data: data,"
|
||||
|
||||
if entry.request.query_params:
|
||||
data_val = json.dumps(entry.request.query_params)
|
||||
data_js = f"const params = {data_val};"
|
||||
body_js = "params: params,"
|
||||
|
||||
return self.JAVASCRIPT_TEMPLATE.format(
|
||||
method=entry.request.method.lower(),
|
||||
url=entry.request.url,
|
||||
headers_js=headers_js,
|
||||
data_js=data_js,
|
||||
body_js=body_js,
|
||||
)
|
||||
|
||||
def export_go(self, entry: HTTPEntry) -> str:
|
||||
"""Export entry as Go code.
|
||||
|
||||
Args:
|
||||
entry: HTTPEntry object
|
||||
|
||||
Returns:
|
||||
Go code string
|
||||
"""
|
||||
headers_lines = []
|
||||
for name, value in entry.request.headers.items():
|
||||
headers_lines.append(f' req.Header.Set("{name}", "{value}")')
|
||||
|
||||
headers_go = "\n".join(headers_lines)
|
||||
|
||||
data_val = "nil"
|
||||
data_var = ""
|
||||
body_prep = ""
|
||||
body_ref = "nil"
|
||||
data_go = ""
|
||||
|
||||
if entry.request.body:
|
||||
escaped = self._escape_go_string(entry.request.body)
|
||||
data_val = f"`{escaped}`"
|
||||
body_prep = f' body := bytes.NewBufferString({data_val})'
|
||||
body_ref = "body"
|
||||
|
||||
set_headers = headers_go if headers_go else " // No headers"
|
||||
|
||||
return self.GO_TEMPLATE.format(
|
||||
method=entry.request.method,
|
||||
url=entry.request.url,
|
||||
headers_go=headers_go,
|
||||
data_go=data_go,
|
||||
data_var=data_var,
|
||||
body_val=data_val,
|
||||
body_prep=body_prep,
|
||||
body_ref=body_ref,
|
||||
set_headers=set_headers,
|
||||
)
|
||||
|
||||
def export_batch(
|
||||
self, entries: list[HTTPEntry], language: str
|
||||
) -> list[str]:
|
||||
"""Export multiple entries as code snippets.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
language: Target language (python, javascript, go)
|
||||
|
||||
Returns:
|
||||
List of code strings
|
||||
|
||||
Raises:
|
||||
ValueError: If language is not supported
|
||||
"""
|
||||
language = language.lower()
|
||||
if language == "python":
|
||||
return [self.export_python(e) for e in entries]
|
||||
elif language == "javascript":
|
||||
return [self.export_javascript(e) for e in entries]
|
||||
elif language == "go":
|
||||
return [self.export_go(e) for e in entries]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported language: {language}. "
|
||||
f"Supported: python, javascript, go"
|
||||
)
|
||||
|
||||
def _format_python_dict(self, d: dict[str, str]) -> str:
|
||||
"""Format dictionary as Python code.
|
||||
|
||||
Args:
|
||||
d: Dictionary to format
|
||||
|
||||
Returns:
|
||||
Python dict string
|
||||
"""
|
||||
if not d:
|
||||
return "{}"
|
||||
items = [f'"{k}": "{v}"' for k, v in d.items()]
|
||||
return "{\n " + ",\n ".join(items) + "\n}"
|
||||
|
||||
def _escape_go_string(self, s: str) -> str:
|
||||
"""Escape string for Go.
|
||||
|
||||
Args:
|
||||
s: String to escape
|
||||
|
||||
Returns:
|
||||
Escaped string
|
||||
"""
|
||||
return s.replace("\\", "\\\\").replace("`", "\\`").replace("$", "\\$")
|
||||
|
||||
def to_file(
|
||||
self, entries: list[HTTPEntry], path: str, language: str
|
||||
) -> None:
|
||||
"""Write code snippets to file.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
path: Output file path
|
||||
language: Target language
|
||||
"""
|
||||
snippets = self.export_batch(entries, language)
|
||||
with open(path, "w") as f:
|
||||
for snippet in snippets:
|
||||
f.write(snippet)
|
||||
f.write("\n\n")
|
||||
70
http_log_explorer/exporters/curl_exporter.py
Normal file
70
http_log_explorer/exporters/curl_exporter.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""cURL exporter for HTTP entries."""
|
||||
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
|
||||
class CurlExporter:
|
||||
"""Export HTTP entries as cURL commands."""
|
||||
|
||||
def export(self, entry: HTTPEntry) -> str:
|
||||
"""Export a single entry as cURL command.
|
||||
|
||||
Args:
|
||||
entry: HTTPEntry object
|
||||
|
||||
Returns:
|
||||
cURL command string
|
||||
"""
|
||||
parts = ["curl"]
|
||||
|
||||
parts.append("-X")
|
||||
parts.append(entry.request.method)
|
||||
|
||||
if entry.request.headers:
|
||||
for name, value in entry.request.headers.items():
|
||||
if name.lower() not in ("host", "content-length"):
|
||||
parts.append("-H")
|
||||
parts.append(f"{name}: {value}")
|
||||
|
||||
if entry.request.body:
|
||||
escaped_body = self._escape_body(entry.request.body)
|
||||
parts.append("-d")
|
||||
parts.append(f"'{escaped_body}'")
|
||||
|
||||
parts.append(f"'{entry.request.url}'")
|
||||
|
||||
return " ".join(parts)
|
||||
|
||||
def export_batch(self, entries: list[HTTPEntry]) -> list[str]:
|
||||
"""Export multiple entries as cURL commands.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
|
||||
Returns:
|
||||
List of cURL command strings
|
||||
"""
|
||||
return [self.export(entry) for entry in entries]
|
||||
|
||||
def _escape_body(self, body: str) -> str:
|
||||
"""Escape body string for shell.
|
||||
|
||||
Args:
|
||||
body: Body content
|
||||
|
||||
Returns:
|
||||
Escaped string
|
||||
"""
|
||||
return body.replace("'", "'\\''")
|
||||
|
||||
def to_file(self, entries: list[HTTPEntry], path: str) -> None:
|
||||
"""Write cURL commands to file (one per line).
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
path: Output file path
|
||||
"""
|
||||
with open(path, "w") as f:
|
||||
for entry in entries:
|
||||
f.write(self.export(entry) + "\n")
|
||||
66
http_log_explorer/exporters/json_exporter.py
Normal file
66
http_log_explorer/exporters/json_exporter.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""JSON exporter for HTTP entries."""
|
||||
|
||||
import json
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
|
||||
class JSONExporter:
|
||||
"""Export HTTP entries to JSON format."""
|
||||
|
||||
def export(self, entries: list[HTTPEntry], indent: int = 2) -> str:
|
||||
"""Export entries to JSON string.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
indent: JSON indent level
|
||||
|
||||
Returns:
|
||||
JSON string representation
|
||||
"""
|
||||
data = [entry.to_dict() for entry in entries]
|
||||
return json.dumps(data, indent=indent, default=str)
|
||||
|
||||
def export_compact(self, entries: list[HTTPEntry]) -> str:
|
||||
"""Export entries to compact JSON (no indent).
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
|
||||
Returns:
|
||||
Compact JSON string
|
||||
"""
|
||||
data = [entry.to_dict() for entry in entries]
|
||||
return json.dumps(data, separators=(",", ":"), default=str)
|
||||
|
||||
def save(self, entries: list[HTTPEntry], path: str, indent: int = 2) -> None:
|
||||
"""Save entries to JSON file.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
path: Output file path
|
||||
indent: JSON indent level
|
||||
"""
|
||||
with open(path, "w") as f:
|
||||
f.write(self.export(entries, indent))
|
||||
|
||||
def export_summary(self, entries: list[HTTPEntry]) -> str:
|
||||
"""Export summary of entries (URL, method, status only).
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
|
||||
Returns:
|
||||
JSON string with summary info
|
||||
"""
|
||||
summary = []
|
||||
for entry in entries:
|
||||
summary.append({
|
||||
"id": entry.id,
|
||||
"method": entry.request.method,
|
||||
"url": entry.request.url,
|
||||
"status": entry.response.status,
|
||||
"content_type": entry.content_type,
|
||||
"duration_ms": entry.duration_ms,
|
||||
})
|
||||
return json.dumps(summary, indent=2)
|
||||
5
http_log_explorer/generators/__init__.py
Normal file
5
http_log_explorer/generators/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""Generators for OpenAPI specs and exports."""
|
||||
|
||||
from http_log_explorer.generators.openapi_generator import OpenAPIGenerator
|
||||
|
||||
__all__ = ["OpenAPIGenerator"]
|
||||
431
http_log_explorer/generators/openapi_generator.py
Normal file
431
http_log_explorer/generators/openapi_generator.py
Normal file
@@ -0,0 +1,431 @@
|
||||
"""OpenAPI 3.0 spec generator from HTTP traffic."""
|
||||
|
||||
import json
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
try:
|
||||
from openapi_spec_validator import validate
|
||||
VALIDATION_AVAILABLE = True
|
||||
except ImportError:
|
||||
VALIDATION_AVAILABLE = False
|
||||
|
||||
|
||||
class OpenAPIGenerator:
|
||||
"""Generate OpenAPI 3.0 specification from observed traffic."""
|
||||
|
||||
def __init__(self, entries: list[HTTPEntry]) -> None:
|
||||
"""Initialize with HTTP entries.
|
||||
|
||||
Args:
|
||||
entries: List of HTTPEntry objects
|
||||
"""
|
||||
self.entries = entries
|
||||
self.spec: dict[str, Any] = {}
|
||||
self._schemas: dict[str, dict[str, Any]] = {}
|
||||
self._path_items: dict[str, dict[str, Any]] = defaultdict(dict)
|
||||
|
||||
def generate(
|
||||
self,
|
||||
title: str = "API",
|
||||
version: str = "1.0.0",
|
||||
description: str = "Generated from traffic analysis",
|
||||
validate_spec: bool = True,
|
||||
) -> dict[str, Any]:
|
||||
"""Generate OpenAPI spec from traffic.
|
||||
|
||||
Args:
|
||||
title: API title
|
||||
version: API version
|
||||
description: API description
|
||||
validate_spec: Whether to validate the generated spec
|
||||
|
||||
Returns:
|
||||
OpenAPI spec dictionary
|
||||
|
||||
Raises:
|
||||
ValueError: If validation fails and validate_spec is True
|
||||
"""
|
||||
self.spec = {
|
||||
"openapi": "3.0.3",
|
||||
"info": {
|
||||
"title": title,
|
||||
"version": version,
|
||||
"description": description,
|
||||
},
|
||||
"paths": {},
|
||||
"components": {
|
||||
"schemas": {},
|
||||
},
|
||||
}
|
||||
|
||||
self._schemas = {}
|
||||
self._path_items = defaultdict(dict)
|
||||
|
||||
self._infer_paths()
|
||||
self._infer_schemas()
|
||||
|
||||
self.spec["paths"] = dict(self._path_items)
|
||||
self.spec["components"]["schemas"] = self._schemas
|
||||
|
||||
if validate_spec and VALIDATION_AVAILABLE:
|
||||
try:
|
||||
validate(self.spec)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Generated spec is invalid: {e}") from e
|
||||
|
||||
return self.spec
|
||||
|
||||
def _infer_paths(self) -> None:
|
||||
"""Infer API paths from traffic."""
|
||||
for entry in self.entries:
|
||||
path = self._extract_path(entry.endpoint)
|
||||
method = entry.request.method.lower()
|
||||
|
||||
if path not in self._path_items:
|
||||
self._path_items[path] = {}
|
||||
|
||||
path_params = self._extract_path_params(path)
|
||||
if path_params and "parameters" not in self._path_items[path]:
|
||||
self._path_items[path]["parameters"] = path_params
|
||||
|
||||
operation: dict[str, Any] = {
|
||||
"responses": self._generate_responses(entry),
|
||||
}
|
||||
|
||||
if entry.request.headers:
|
||||
operation["parameters"] = self._generate_parameters(entry)
|
||||
|
||||
if entry.request.body:
|
||||
request_body = self._generate_request_body(entry)
|
||||
if request_body:
|
||||
operation["requestBody"] = request_body
|
||||
|
||||
self._path_items[path][method] = operation
|
||||
|
||||
def _extract_path_params(self, path: str) -> list[dict[str, Any]]:
|
||||
"""Extract path parameters from a path string.
|
||||
|
||||
Args:
|
||||
path: The path string like '/users/{id}'
|
||||
|
||||
Returns:
|
||||
List of parameter definitions
|
||||
"""
|
||||
params = []
|
||||
import re
|
||||
param_pattern = re.compile(r"\{([^}]+)\}")
|
||||
for match in param_pattern.finditer(path):
|
||||
param_name = match.group(1)
|
||||
params.append({
|
||||
"name": param_name,
|
||||
"in": "path",
|
||||
"required": True,
|
||||
"schema": {"type": "string"},
|
||||
})
|
||||
return params
|
||||
|
||||
def _extract_path(self, endpoint: str) -> str:
|
||||
"""Extract and normalize path from endpoint."""
|
||||
path = endpoint
|
||||
|
||||
parts = path.split("/")
|
||||
normalized_parts = []
|
||||
|
||||
for part in parts:
|
||||
if not part:
|
||||
normalized_parts.append("")
|
||||
elif part.isdigit():
|
||||
normalized_parts.append("{" + self._get_param_name(path, part) + "}")
|
||||
elif self._is_uuid(part):
|
||||
normalized_parts.append("{uuid}")
|
||||
elif self._is_hash(part):
|
||||
normalized_parts.append("{id}")
|
||||
else:
|
||||
normalized_parts.append(part)
|
||||
|
||||
return "/".join(normalized_parts) or "/"
|
||||
|
||||
def _get_param_name(self, path: str, value: str) -> str:
|
||||
"""Generate parameter name based on path context."""
|
||||
path_lower = path.lower()
|
||||
if "user" in path_lower or "id" in path_lower:
|
||||
return "id"
|
||||
if "page" in path_lower or "offset" in path_lower:
|
||||
return "page"
|
||||
if "limit" in path_lower or "size" in path_lower:
|
||||
return "limit"
|
||||
return "id"
|
||||
|
||||
def _is_uuid(self, s: str) -> bool:
|
||||
"""Check if string looks like a UUID."""
|
||||
uuid_pattern = re.compile(
|
||||
r"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$",
|
||||
re.IGNORECASE,
|
||||
)
|
||||
return bool(uuid_pattern.match(s))
|
||||
|
||||
def _is_hash(self, s: str) -> bool:
|
||||
"""Check if string looks like a hash."""
|
||||
hash_pattern = re.compile(r"^[a-f0-9]{32,}$", re.IGNORECASE)
|
||||
return bool(hash_pattern.match(s))
|
||||
|
||||
def _generate_responses(self, entry: HTTPEntry) -> dict[str, Any]:
|
||||
"""Generate response definitions."""
|
||||
content = {}
|
||||
ct = entry.content_type
|
||||
|
||||
if ct and "json" in ct.lower():
|
||||
schema = self._extract_schema_from_body(entry.response.body, "response")
|
||||
content = {
|
||||
"application/json": {
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
elif entry.response.body:
|
||||
content = {
|
||||
"text/plain": {
|
||||
"schema": {
|
||||
"type": "string",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
status = entry.response.status
|
||||
status_text = entry.response.status_text or "OK"
|
||||
|
||||
return {
|
||||
str(status): {
|
||||
"description": status_text,
|
||||
"content": content,
|
||||
}
|
||||
}
|
||||
|
||||
def _generate_parameters(self, entry: HTTPEntry) -> list[dict[str, Any]]:
|
||||
"""Generate parameter definitions from query string."""
|
||||
params = []
|
||||
|
||||
for name, value in entry.request.query_params.items():
|
||||
param: dict[str, Any] = {
|
||||
"name": name,
|
||||
"in": "query",
|
||||
"schema": {
|
||||
"type": self._infer_type(value),
|
||||
},
|
||||
}
|
||||
if value:
|
||||
param["example"] = value
|
||||
params.append(param)
|
||||
|
||||
return params
|
||||
|
||||
def _generate_request_body(self, entry: HTTPEntry) -> dict[str, Any] | None:
|
||||
"""Generate request body definition."""
|
||||
body = entry.request.body
|
||||
if not body:
|
||||
return None
|
||||
|
||||
content: dict[str, Any] = {}
|
||||
|
||||
if self._is_json(body):
|
||||
schema = self._extract_schema_from_body(body, "request")
|
||||
content = {
|
||||
"application/json": {
|
||||
"schema": schema,
|
||||
}
|
||||
}
|
||||
else:
|
||||
content = {
|
||||
"text/plain": {
|
||||
"schema": {
|
||||
"type": "string",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
"content": content,
|
||||
"required": True,
|
||||
}
|
||||
|
||||
def _extract_schema_from_body(
|
||||
self, body: str | None, prefix: str = "schema"
|
||||
) -> dict[str, Any]:
|
||||
"""Extract JSON schema from body content.
|
||||
|
||||
Args:
|
||||
body: Body content
|
||||
prefix: Prefix for schema name
|
||||
|
||||
Returns:
|
||||
JSON Schema dictionary
|
||||
"""
|
||||
if not body:
|
||||
return {"type": "string"}
|
||||
|
||||
if not self._is_json(body):
|
||||
return {"type": "string"}
|
||||
|
||||
try:
|
||||
data = json.loads(body)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return {"type": "string"}
|
||||
|
||||
if isinstance(data, dict):
|
||||
schema_name = f"{prefix}Schema"
|
||||
schema = self._dict_to_schema(data, schema_name)
|
||||
self._schemas[schema_name] = schema
|
||||
return {"$ref": f"#/components/schemas/{schema_name}"}
|
||||
elif isinstance(data, list) and data:
|
||||
return {
|
||||
"type": "array",
|
||||
"items": self._dict_to_schema(data[0], f"{prefix}Item"),
|
||||
}
|
||||
|
||||
return {"type": "string"}
|
||||
|
||||
def _dict_to_schema(
|
||||
self, data: dict[str, Any], name: str
|
||||
) -> dict[str, Any]:
|
||||
"""Convert dictionary to JSON schema.
|
||||
|
||||
Args:
|
||||
data: Dictionary to convert
|
||||
name: Schema name
|
||||
|
||||
Returns:
|
||||
JSON Schema dictionary
|
||||
"""
|
||||
properties: dict[str, Any] = {}
|
||||
required: list[str] = []
|
||||
|
||||
for key, value in data.items():
|
||||
prop_schema = self._value_to_schema(value, key)
|
||||
properties[key] = prop_schema
|
||||
required.append(key)
|
||||
|
||||
return {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required,
|
||||
}
|
||||
|
||||
def _value_to_schema(self, value: Any, key: str) -> dict[str, Any]:
|
||||
"""Convert a value to JSON schema.
|
||||
|
||||
Args:
|
||||
value: Value to convert
|
||||
key: Key name (for nested object naming)
|
||||
|
||||
Returns:
|
||||
JSON Schema for the value
|
||||
"""
|
||||
if value is None:
|
||||
return {"type": "string", "nullable": True}
|
||||
elif isinstance(value, bool):
|
||||
return {"type": "boolean"}
|
||||
elif isinstance(value, int):
|
||||
return {"type": "integer"}
|
||||
elif isinstance(value, float):
|
||||
return {"type": "number"}
|
||||
elif isinstance(value, str):
|
||||
if self._is_json(value):
|
||||
nested = self._dict_to_schema(json.loads(value), f"{key}Schema")
|
||||
return nested
|
||||
return {"type": "string"}
|
||||
elif isinstance(value, dict):
|
||||
schema_name = f"{key}Schema"
|
||||
nested = self._dict_to_schema(value, schema_name)
|
||||
self._schemas[schema_name] = nested
|
||||
return {"$ref": f"#/components/schemas/{schema_name}"}
|
||||
elif isinstance(value, list):
|
||||
if value:
|
||||
item_schema = self._value_to_schema(value[0], f"{key}Item")
|
||||
return {"type": "array", "items": item_schema}
|
||||
return {"type": "array", "items": {"type": "string"}}
|
||||
|
||||
return {"type": "string"}
|
||||
|
||||
def _infer_type(self, value: str) -> str:
|
||||
"""Infer JSON type from string value.
|
||||
|
||||
Args:
|
||||
value: String value
|
||||
|
||||
Returns:
|
||||
JSON type string
|
||||
"""
|
||||
if not value:
|
||||
return "string"
|
||||
try:
|
||||
int(value)
|
||||
return "integer"
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
float(value)
|
||||
return "number"
|
||||
except ValueError:
|
||||
pass
|
||||
if value.lower() in ("true", "false"):
|
||||
return "boolean"
|
||||
return "string"
|
||||
|
||||
def _is_json(self, s: str) -> bool:
|
||||
"""Check if string is JSON.
|
||||
|
||||
Args:
|
||||
s: String to check
|
||||
|
||||
Returns:
|
||||
True if string is JSON
|
||||
"""
|
||||
if not s or not s.strip():
|
||||
return False
|
||||
if s.strip().startswith(("{", "[")):
|
||||
try:
|
||||
json.loads(s)
|
||||
return True
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
return False
|
||||
|
||||
def _infer_schemas(self) -> None:
|
||||
"""Infer additional schemas from request/response bodies."""
|
||||
for entry in self.entries:
|
||||
if entry.request.body and self._is_json(entry.request.body):
|
||||
try:
|
||||
data = json.loads(entry.request.body)
|
||||
if isinstance(data, dict):
|
||||
schema_name = "requestBodySchema"
|
||||
if schema_name not in self._schemas:
|
||||
self._schemas[schema_name] = self._dict_to_schema(data, schema_name)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
|
||||
def to_json(self, spec: dict[str, Any] | None = None, indent: int = 2) -> str:
|
||||
"""Convert spec to JSON string.
|
||||
|
||||
Args:
|
||||
spec: Spec to convert, or use self.spec if None
|
||||
indent: JSON indent level
|
||||
|
||||
Returns:
|
||||
JSON string
|
||||
"""
|
||||
if spec is None:
|
||||
spec = self.spec
|
||||
return json.dumps(spec, indent=indent)
|
||||
|
||||
def save_spec(self, path: str, spec: dict[str, Any] | None = None) -> None:
|
||||
"""Save spec to file.
|
||||
|
||||
Args:
|
||||
path: File path to save to
|
||||
spec: Spec to save, or use self.spec if None
|
||||
"""
|
||||
with open(path, "w") as f:
|
||||
f.write(self.to_json(spec))
|
||||
17
http_log_explorer/models/__init__.py
Normal file
17
http_log_explorer/models/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""Data models."""
|
||||
|
||||
from http_log_explorer.models.http_entry import (
|
||||
DiffResult,
|
||||
FilterCriteria,
|
||||
HTTPEntry,
|
||||
Request,
|
||||
Response,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"DiffResult",
|
||||
"FilterCriteria",
|
||||
"HTTPEntry",
|
||||
"Request",
|
||||
"Response",
|
||||
]
|
||||
142
http_log_explorer/models/http_entry.py
Normal file
142
http_log_explorer/models/http_entry.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""Data models for HTTP entries."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
|
||||
@dataclass
|
||||
class Request:
|
||||
"""Represents an HTTP request."""
|
||||
|
||||
method: str
|
||||
url: str
|
||||
http_version: str = "HTTP/1.1"
|
||||
headers: dict[str, str] = field(default_factory=dict)
|
||||
body: str | None = None
|
||||
query_params: dict[str, str] = field(default_factory=dict)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if isinstance(self.headers, list):
|
||||
self.headers = {h.get("name", ""): h.get("value", "") for h in self.headers}
|
||||
|
||||
|
||||
@dataclass
|
||||
class Response:
|
||||
"""Represents an HTTP response."""
|
||||
|
||||
status: int
|
||||
status_text: str
|
||||
http_version: str = "HTTP/1.1"
|
||||
headers: dict[str, str] = field(default_factory=dict)
|
||||
body: str | None = None
|
||||
content_type: str | None = None
|
||||
response_time_ms: float | None = None
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if isinstance(self.headers, list):
|
||||
self.headers = {h.get("name", ""): h.get("value", "") for h in self.headers}
|
||||
|
||||
|
||||
@dataclass
|
||||
class HTTPEntry:
|
||||
"""Represents a complete HTTP request/response pair."""
|
||||
|
||||
id: str
|
||||
request: Request
|
||||
response: Response
|
||||
timestamp: datetime | None = None
|
||||
server_ip: str | None = None
|
||||
connection: str | None = None
|
||||
raw_size: int | None = None
|
||||
source_file: str | None = None
|
||||
|
||||
@property
|
||||
def duration_ms(self) -> float | None:
|
||||
"""Get response time in milliseconds."""
|
||||
return self.response.response_time_ms
|
||||
|
||||
@property
|
||||
def content_type(self) -> str | None:
|
||||
"""Get content type from response headers."""
|
||||
if self.response.content_type:
|
||||
return self.response.content_type
|
||||
for key, value in self.response.headers.items():
|
||||
if key.lower() == "content-type":
|
||||
return value
|
||||
return None
|
||||
|
||||
@property
|
||||
def endpoint(self) -> str:
|
||||
"""Extract endpoint path from URL."""
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed = urlparse(self.request.url)
|
||||
return parsed.path or "/"
|
||||
|
||||
@property
|
||||
def host(self) -> str:
|
||||
"""Extract host from URL."""
|
||||
from urllib.parse import urlparse
|
||||
|
||||
parsed = urlparse(self.request.url)
|
||||
return parsed.netloc
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dictionary representation."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"request": {
|
||||
"method": self.request.method,
|
||||
"url": self.request.url,
|
||||
"http_version": self.request.http_version,
|
||||
"headers": self.request.headers,
|
||||
"body": self.request.body,
|
||||
"query_params": self.request.query_params,
|
||||
},
|
||||
"response": {
|
||||
"status": self.response.status,
|
||||
"status_text": self.response.status_text,
|
||||
"http_version": self.response.http_version,
|
||||
"headers": self.response.headers,
|
||||
"body": self.response.body,
|
||||
"content_type": self.response.content_type,
|
||||
"response_time_ms": self.response.response_time_ms,
|
||||
},
|
||||
"timestamp": self.timestamp.isoformat() if self.timestamp else None,
|
||||
"server_ip": self.server_ip,
|
||||
"connection": self.connection,
|
||||
"raw_size": self.raw_size,
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class FilterCriteria:
|
||||
"""Criteria for filtering HTTP entries."""
|
||||
|
||||
methods: list[str] | None = None
|
||||
status_codes: list[int] | None = None
|
||||
url_pattern: str | None = None
|
||||
content_types: list[str] | None = None
|
||||
start_time: datetime | None = None
|
||||
end_time: datetime | None = None
|
||||
min_response_time_ms: float | None = None
|
||||
max_response_time_ms: float | None = None
|
||||
request_body_contains: str | None = None
|
||||
response_body_contains: str | None = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class DiffResult:
|
||||
"""Represents the result of comparing two HTTP entries."""
|
||||
|
||||
entry1_id: str
|
||||
entry2_id: str
|
||||
request_headers_diff: list[str] = field(default_factory=list)
|
||||
request_body_diff: list[str] = field(default_factory=list)
|
||||
response_headers_diff: list[str] = field(default_factory=list)
|
||||
response_body_diff: list[str] = field(default_factory=list)
|
||||
status_changed: bool = False
|
||||
status1: int = 0
|
||||
status2: int = 0
|
||||
url_changed: bool = False
|
||||
76
http_log_explorer/parsers/__init__.py
Normal file
76
http_log_explorer/parsers/__init__.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""Parser interface for HTTP log formats."""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
|
||||
|
||||
class ParserInterface(ABC):
|
||||
"""Abstract base class for HTTP log parsers."""
|
||||
|
||||
@abstractmethod
|
||||
def parse(self, content: str | bytes, source_file: str | None = None) -> list[HTTPEntry]:
|
||||
"""Parse content and return list of HTTP entries.
|
||||
|
||||
Args:
|
||||
content: The content to parse (string or bytes)
|
||||
source_file: Optional source file name for reference
|
||||
|
||||
Returns:
|
||||
List of HTTPEntry objects
|
||||
|
||||
Raises:
|
||||
ValueError: If content cannot be parsed
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def can_parse(self, content: str | bytes) -> bool:
|
||||
"""Check if this parser can handle the given content.
|
||||
|
||||
Args:
|
||||
content: The content to check
|
||||
|
||||
Returns:
|
||||
True if this parser can handle the content
|
||||
"""
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_parser_name() -> str:
|
||||
"""Return the name of this parser."""
|
||||
return "unknown"
|
||||
|
||||
|
||||
def get_parser(content: str | bytes) -> ParserInterface:
|
||||
"""Get the appropriate parser for the given content.
|
||||
|
||||
Args:
|
||||
content: The content to parse
|
||||
|
||||
Returns:
|
||||
An appropriate parser instance
|
||||
|
||||
Raises:
|
||||
ValueError: If no suitable parser is found
|
||||
"""
|
||||
from http_log_explorer.parsers.curl_parser import CurlParser
|
||||
from http_log_explorer.parsers.devtools_parser import DevToolsParser
|
||||
from http_log_explorer.parsers.har_parser import HARParser
|
||||
|
||||
parsers: list[ParserInterface] = [
|
||||
HARParser(),
|
||||
CurlParser(),
|
||||
DevToolsParser(),
|
||||
]
|
||||
|
||||
for parser in parsers:
|
||||
if parser.can_parse(content):
|
||||
return parser
|
||||
|
||||
raise ValueError(
|
||||
"Unsupported format. Supported formats are: HAR files, curl -v output, and Chrome DevTools network exports."
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["ParserInterface", "get_parser"]
|
||||
140
http_log_explorer/parsers/curl_parser.py
Normal file
140
http_log_explorer/parsers/curl_parser.py
Normal file
@@ -0,0 +1,140 @@
|
||||
"""Parser for curl -v output."""
|
||||
|
||||
import re
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from http_log_explorer.models import HTTPEntry, Request, Response
|
||||
from http_log_explorer.parsers import ParserInterface
|
||||
|
||||
|
||||
class CurlParser(ParserInterface):
|
||||
"""Parser for curl -v verbose output."""
|
||||
|
||||
REQUEST_LINE_RE = re.compile(r"^> (\w+) (\S+) (HTTP/[\d.]+)$", re.MULTILINE)
|
||||
RESPONSE_LINE_RE = re.compile(r"^< (HTTP/[\d.]+) (\d+) (.+)$", re.MULTILINE)
|
||||
HEADER_RE = re.compile(r"^(> |<) ([^:]+): (.+)$")
|
||||
TIMING_RE = re.compile(r"^\* time_conditional check:.*$")
|
||||
|
||||
@staticmethod
|
||||
def get_parser_name() -> str:
|
||||
return "curl"
|
||||
|
||||
def can_parse(self, content: str | bytes) -> bool:
|
||||
"""Check if content appears to be curl -v output."""
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="ignore")
|
||||
has_request = bool(self.REQUEST_LINE_RE.search(content))
|
||||
has_response = bool(self.RESPONSE_LINE_RE.search(content))
|
||||
return has_request and has_response
|
||||
|
||||
def parse(self, content: str | bytes, source_file: str | None = None) -> list[HTTPEntry]:
|
||||
"""Parse curl -v output into HTTPEntry objects."""
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="replace")
|
||||
|
||||
entries: list[HTTPEntry] = []
|
||||
blocks = self._split_blocks(content)
|
||||
|
||||
for idx, block in enumerate(blocks):
|
||||
try:
|
||||
entry = self._parse_block(block, idx, source_file)
|
||||
if entry:
|
||||
entries.append(entry)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return entries
|
||||
|
||||
def _split_blocks(self, content: str) -> list[dict[str, Any]]:
|
||||
"""Split curl output into request/response blocks."""
|
||||
blocks: list[dict[str, Any]] = []
|
||||
current_block: dict[str, Any] = {}
|
||||
|
||||
lines = content.split("\n")
|
||||
for line in lines:
|
||||
request_match = self.REQUEST_LINE_RE.match(line)
|
||||
if request_match:
|
||||
if current_block.get("request"):
|
||||
blocks.append(current_block)
|
||||
current_block = {
|
||||
"request": {
|
||||
"method": request_match.group(1),
|
||||
"url": request_match.group(2),
|
||||
"http_version": request_match.group(3),
|
||||
},
|
||||
"headers": [],
|
||||
"body": None,
|
||||
"response": None,
|
||||
}
|
||||
continue
|
||||
|
||||
response_match = self.RESPONSE_LINE_RE.match(line)
|
||||
if response_match:
|
||||
if current_block.get("request"):
|
||||
current_block["response"] = {
|
||||
"http_version": response_match.group(1),
|
||||
"status": int(response_match.group(2)),
|
||||
"status_text": response_match.group(3),
|
||||
}
|
||||
continue
|
||||
|
||||
header_match = self.HEADER_RE.match(line)
|
||||
if header_match:
|
||||
direction = header_match.group(1)
|
||||
name = header_match.group(2)
|
||||
value = header_match.group(3)
|
||||
if direction == ">" and "headers" in current_block:
|
||||
current_block["headers"].append((name, value))
|
||||
continue
|
||||
|
||||
if current_block and current_block.get("response") and line.strip():
|
||||
if current_block["response"].get("body") is None:
|
||||
current_block["response"]["body"] = ""
|
||||
current_block["response"]["body"] += line + "\n"
|
||||
|
||||
if current_block.get("request"):
|
||||
blocks.append(current_block)
|
||||
|
||||
return blocks
|
||||
|
||||
def _parse_block(
|
||||
self, block: dict[str, Any], idx: int, source_file: str | None
|
||||
) -> HTTPEntry | None:
|
||||
"""Parse a single request/response block."""
|
||||
if not block.get("request") or not block.get("response"):
|
||||
return None
|
||||
|
||||
req_data = block["request"]
|
||||
resp_data = block["response"]
|
||||
|
||||
headers = dict(block.get("headers", []))
|
||||
|
||||
request = Request(
|
||||
method=req_data.get("method", "GET"),
|
||||
url=req_data.get("url", "/"),
|
||||
http_version=req_data.get("http_version", "HTTP/1.1"),
|
||||
headers=headers,
|
||||
body=block.get("body"),
|
||||
)
|
||||
|
||||
response_body = resp_data.get("body", "")
|
||||
if response_body:
|
||||
response_body = response_body.strip()
|
||||
|
||||
response = Response(
|
||||
status=resp_data.get("status", 0),
|
||||
status_text=resp_data.get("status_text", ""),
|
||||
http_version=resp_data.get("http_version", "HTTP/1.1"),
|
||||
headers={},
|
||||
body=response_body if response_body else None,
|
||||
content_type=headers.get("Content-Type") or headers.get("content-type"),
|
||||
)
|
||||
|
||||
return HTTPEntry(
|
||||
id=f"curl-{idx}",
|
||||
request=request,
|
||||
response=response,
|
||||
timestamp=datetime.now(),
|
||||
source_file=source_file,
|
||||
)
|
||||
133
http_log_explorer/parsers/devtools_parser.py
Normal file
133
http_log_explorer/parsers/devtools_parser.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""Parser for Chrome DevTools network export format."""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from http_log_explorer.models import HTTPEntry, Request, Response
|
||||
from http_log_explorer.parsers import ParserInterface
|
||||
|
||||
|
||||
class DevToolsParser(ParserInterface):
|
||||
"""Parser for Chrome DevTools network export JSON."""
|
||||
|
||||
@staticmethod
|
||||
def get_parser_name() -> str:
|
||||
return "DevTools"
|
||||
|
||||
def can_parse(self, content: str | bytes) -> bool:
|
||||
"""Check if content appears to be DevTools network export."""
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="ignore")
|
||||
try:
|
||||
data = json.loads(content)
|
||||
if isinstance(data, list):
|
||||
return all(
|
||||
"request" in item and "response" in item for item in data[:3] if isinstance(item, dict)
|
||||
)
|
||||
if isinstance(data, dict):
|
||||
has_log = "log" in data
|
||||
has_entries = "entries" in data.get("log", {})
|
||||
has_creator = "creator" in data.get("log", {})
|
||||
return has_log and has_entries and not has_creator
|
||||
except json.JSONDecodeError:
|
||||
return False
|
||||
return False
|
||||
|
||||
def parse(self, content: str | bytes, source_file: str | None = None) -> list[HTTPEntry]:
|
||||
"""Parse DevTools network export into HTTPEntry objects."""
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="replace")
|
||||
|
||||
try:
|
||||
data = json.loads(content)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid JSON format: {e}") from e
|
||||
|
||||
if isinstance(data, dict) and "log" in data:
|
||||
entries_data = data.get("log", {}).get("entries", [])
|
||||
elif isinstance(data, list):
|
||||
entries_data = data
|
||||
else:
|
||||
raise ValueError("Unrecognized DevTools format")
|
||||
|
||||
entries: list[HTTPEntry] = []
|
||||
for idx, entry_data in enumerate(entries_data):
|
||||
try:
|
||||
entry = self._convert_entry(entry_data, idx, source_file)
|
||||
if entry:
|
||||
entries.append(entry)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return entries
|
||||
|
||||
def _convert_entry(
|
||||
self, entry_data: dict[str, Any], idx: int, source_file: str | None
|
||||
) -> HTTPEntry | None:
|
||||
"""Convert a DevTools entry to our HTTPEntry model."""
|
||||
request_data = entry_data.get("request", {})
|
||||
response_data = entry_data.get("response", {})
|
||||
|
||||
if not request_data or not response_data:
|
||||
return None
|
||||
|
||||
request = Request(
|
||||
method=request_data.get("method", "GET"),
|
||||
url=request_data.get("url", ""),
|
||||
http_version=request_data.get("httpVersion", "HTTP/1.1"),
|
||||
headers=self._parse_headers(request_data.get("headers", {})),
|
||||
body=request_data.get("postData", {}).get("text") if request_data.get("postData") else None,
|
||||
query_params=self._parse_query_params(request_data.get("queryString", [])),
|
||||
)
|
||||
|
||||
response = Response(
|
||||
status=response_data.get("status", 0),
|
||||
status_text=response_data.get("statusText", ""),
|
||||
http_version=response_data.get("httpVersion", "HTTP/1.1"),
|
||||
headers=self._parse_headers(response_data.get("headers", {})),
|
||||
body=response_data.get("content", {}).get("text") if isinstance(response_data.get("content"), dict) else None,
|
||||
content_type=response_data.get("content", {}).get("mimeType") if isinstance(response_data.get("content"), dict) else None,
|
||||
response_time_ms=self._parse_time(entry_data),
|
||||
)
|
||||
|
||||
timestamp = self._parse_timestamp(entry_data)
|
||||
|
||||
return HTTPEntry(
|
||||
id=f"devtools-{idx}",
|
||||
request=request,
|
||||
response=response,
|
||||
timestamp=timestamp,
|
||||
server_ip=entry_data.get("serverIPAddress"),
|
||||
connection=entry_data.get("connection"),
|
||||
source_file=source_file,
|
||||
)
|
||||
|
||||
def _parse_headers(self, headers: dict[str, Any] | list) -> dict[str, str]:
|
||||
"""Parse headers to dictionary."""
|
||||
if isinstance(headers, dict):
|
||||
return dict(headers)
|
||||
if isinstance(headers, list):
|
||||
return {h.get("name", ""): h.get("value", "") for h in headers}
|
||||
return {}
|
||||
|
||||
def _parse_query_params(self, query_string: list[dict[str, Any]]) -> dict[str, str]:
|
||||
"""Parse query string list to dictionary."""
|
||||
if isinstance(query_string, list):
|
||||
return {p.get("name", ""): p.get("value", "") for p in query_string}
|
||||
return {}
|
||||
|
||||
def _parse_time(self, entry_data: dict[str, Any]) -> float | None:
|
||||
"""Parse time from DevTools entry."""
|
||||
if "time" in entry_data:
|
||||
return float(entry_data["time"])
|
||||
return None
|
||||
|
||||
def _parse_timestamp(self, entry_data: dict[str, Any]) -> datetime | None:
|
||||
"""Parse timestamp from DevTools entry."""
|
||||
if "startedDateTime" in entry_data:
|
||||
try:
|
||||
return datetime.fromisoformat(entry_data["startedDateTime"].replace("Z", "+00:00"))
|
||||
except (ValueError, AttributeError):
|
||||
pass
|
||||
return None
|
||||
47
http_log_explorer/parsers/factory.py
Normal file
47
http_log_explorer/parsers/factory.py
Normal file
@@ -0,0 +1,47 @@
|
||||
"""Parser factory for creating appropriate parsers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from http_log_explorer.parsers.curl_parser import CurlParser
|
||||
from http_log_explorer.parsers.devtools_parser import DevToolsParser
|
||||
from http_log_explorer.parsers.har_parser import HARParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from http_log_explorer.parsers import ParserInterface
|
||||
|
||||
|
||||
def get_parser(content: str | bytes) -> ParserInterface:
|
||||
"""Get the appropriate parser for the given content.
|
||||
|
||||
Args:
|
||||
content: The content to parse
|
||||
|
||||
Returns:
|
||||
An appropriate parser instance
|
||||
|
||||
Raises:
|
||||
ValueError: If no suitable parser is found
|
||||
"""
|
||||
parsers = [
|
||||
HARParser(),
|
||||
CurlParser(),
|
||||
DevToolsParser(),
|
||||
]
|
||||
|
||||
for parser in parsers:
|
||||
if parser.can_parse(content):
|
||||
return parser
|
||||
|
||||
raise ValueError(
|
||||
"Unsupported format. Supported formats are: HAR files, curl -v output, and Chrome DevTools network exports."
|
||||
)
|
||||
|
||||
|
||||
def get_all_parsers() -> list[ParserInterface]:
|
||||
"""Get all available parser instances."""
|
||||
return [HARParser(), CurlParser(), DevToolsParser()]
|
||||
|
||||
|
||||
__all__ = ["get_parser", "get_all_parsers"]
|
||||
146
http_log_explorer/parsers/har_parser.py
Normal file
146
http_log_explorer/parsers/har_parser.py
Normal file
@@ -0,0 +1,146 @@
|
||||
"""HAR file parser using haralyzer."""
|
||||
|
||||
import json
|
||||
from datetime import datetime
|
||||
from typing import Any
|
||||
|
||||
from haralyzer import HarParser
|
||||
|
||||
from http_log_explorer.models import HTTPEntry, Request, Response
|
||||
from http_log_explorer.parsers import ParserInterface
|
||||
|
||||
|
||||
class HARParser(ParserInterface):
|
||||
"""Parser for HAR (HTTP Archive) files."""
|
||||
|
||||
@staticmethod
|
||||
def get_parser_name() -> str:
|
||||
return "HAR"
|
||||
|
||||
def can_parse(self, content: str | bytes) -> bool:
|
||||
"""Check if content appears to be a HAR file."""
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="ignore")
|
||||
try:
|
||||
data = json.loads(content)
|
||||
has_log = "log" in data
|
||||
has_entries = "entries" in data.get("log", {})
|
||||
has_creator = "creator" in data.get("log", {})
|
||||
return has_log and has_entries and has_creator
|
||||
except (json.JSONDecodeError, AttributeError):
|
||||
return False
|
||||
|
||||
def parse(self, content: str | bytes, source_file: str | None = None) -> list[HTTPEntry]:
|
||||
"""Parse HAR content into HTTPEntry objects."""
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8", errors="replace")
|
||||
|
||||
try:
|
||||
data = json.loads(content)
|
||||
har_parser = HarParser(data)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValueError(f"Invalid HAR format: {e}") from e
|
||||
except Exception as e:
|
||||
raise ValueError(f"Invalid HAR format: {e}") from e
|
||||
|
||||
entries: list[HTTPEntry] = []
|
||||
har_entries = har_parser.har_data.get("entries", [])
|
||||
for idx, har_entry in enumerate(har_entries):
|
||||
try:
|
||||
entry = self._convert_har_entry(har_entry, idx, source_file)
|
||||
if entry:
|
||||
entries.append(entry)
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
return entries
|
||||
|
||||
def _convert_har_entry(
|
||||
self, har_entry: Any, idx: int, source_file: str | None
|
||||
) -> HTTPEntry | None:
|
||||
"""Convert a haralyzer entry to our HTTPEntry model."""
|
||||
request_data = har_entry.get("request")
|
||||
response_data = har_entry.get("response")
|
||||
|
||||
if not request_data or not response_data:
|
||||
return None
|
||||
|
||||
request = Request(
|
||||
method=request_data.get("method", "GET"),
|
||||
url=self._build_url(request_data),
|
||||
http_version=request_data.get("httpVersion", "HTTP/1.1"),
|
||||
headers=self._parse_headers(request_data.get("headers", [])),
|
||||
body=self._get_request_body(request_data),
|
||||
query_params=self._parse_query_params(request_data.get("queryString", [])),
|
||||
)
|
||||
|
||||
response = Response(
|
||||
status=response_data.get("status", 0),
|
||||
status_text=response_data.get("statusText", ""),
|
||||
http_version=response_data.get("httpVersion", "HTTP/1.1"),
|
||||
headers=self._parse_headers(response_data.get("headers", [])),
|
||||
body=self._get_response_body(response_data),
|
||||
content_type=self._get_content_type(response_data.get("content", {})),
|
||||
response_time_ms=har_entry.get("time", None),
|
||||
)
|
||||
|
||||
timestamp = self._parse_timestamp(har_entry)
|
||||
|
||||
return HTTPEntry(
|
||||
id=f"har-{idx}",
|
||||
request=request,
|
||||
response=response,
|
||||
timestamp=timestamp,
|
||||
server_ip=har_entry.get("serverIPAddress", None),
|
||||
connection=har_entry.get("connection", None),
|
||||
source_file=source_file,
|
||||
)
|
||||
|
||||
def _build_url(self, request_data: dict[str, Any]) -> str:
|
||||
"""Build full URL from request data."""
|
||||
url = request_data.get("url", "")
|
||||
if not url:
|
||||
host = ""
|
||||
for header in request_data.get("headers", []):
|
||||
if header.get("name", "").lower() == "host":
|
||||
host = header.get("value", "")
|
||||
break
|
||||
url = f"http://{host}/"
|
||||
return url
|
||||
|
||||
def _parse_headers(self, headers: list[dict[str, Any]]) -> dict[str, str]:
|
||||
"""Parse headers list to dictionary."""
|
||||
return {h.get("name", ""): h.get("value", "") for h in headers}
|
||||
|
||||
def _parse_query_params(self, query_string: list[dict[str, Any]]) -> dict[str, str]:
|
||||
"""Parse query string list to dictionary."""
|
||||
return {p.get("name", ""): p.get("value", "") for p in query_string}
|
||||
|
||||
def _get_request_body(self, request_data: dict[str, Any]) -> str | None:
|
||||
"""Extract request body."""
|
||||
post_data = request_data.get("postData", {})
|
||||
if post_data:
|
||||
if isinstance(post_data, dict):
|
||||
return post_data.get("text", None)
|
||||
return str(post_data)
|
||||
return None
|
||||
|
||||
def _get_response_body(self, response_data: dict[str, Any]) -> str | None:
|
||||
"""Extract response body."""
|
||||
content = response_data.get("content", {})
|
||||
if isinstance(content, dict):
|
||||
return content.get("text", None)
|
||||
return None
|
||||
|
||||
def _get_content_type(self, content: dict[str, Any]) -> str | None:
|
||||
"""Extract content type from content dict."""
|
||||
if isinstance(content, dict):
|
||||
return content.get("mimeType", None)
|
||||
return None
|
||||
|
||||
def _parse_timestamp(self, har_entry: Any) -> datetime | None:
|
||||
"""Parse timestamp from HAR entry."""
|
||||
started_datetime = getattr(har_entry, "started_datetime", None)
|
||||
if started_datetime:
|
||||
return started_datetime
|
||||
return None
|
||||
928
main.py
Normal file
928
main.py
Normal file
@@ -0,0 +1,928 @@
|
||||
"""
|
||||
7000%AUTO - AI Automation System
|
||||
Main Entry Point
|
||||
|
||||
This module initializes the FastAPI application, database, and orchestrator.
|
||||
It handles graceful startup and shutdown of all system components.
|
||||
|
||||
Features:
|
||||
- Database initialization on startup
|
||||
- FastAPI web server with uvicorn
|
||||
- Orchestrator workflow running in background task
|
||||
- Graceful shutdown handling
|
||||
- Structured logging with configurable log level
|
||||
"""
|
||||
|
||||
# Set SDK log level BEFORE importing any SDK packages
|
||||
# This must be done at module load time, before opencode_ai is imported
|
||||
# Using setdefault allows users to override via environment variable for debugging
|
||||
import os
|
||||
os.environ.setdefault("OPENCODE_LOG", "warn")
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
import signal
|
||||
import subprocess
|
||||
import sys
|
||||
from contextlib import asynccontextmanager
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
import shutil
|
||||
import structlog
|
||||
import uvicorn
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
# Import project modules
|
||||
from config import settings
|
||||
from database import init_db, close_db
|
||||
from orchestrator import WorkflowOrchestrator
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Logging Configuration
|
||||
# =============================================================================
|
||||
|
||||
def configure_logging(log_level: str = "INFO") -> None:
|
||||
"""
|
||||
Configure structured logging with the specified log level.
|
||||
|
||||
Args:
|
||||
log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
||||
"""
|
||||
# Set root logger level
|
||||
logging.basicConfig(
|
||||
format="%(message)s",
|
||||
level=getattr(logging, log_level.upper(), logging.INFO),
|
||||
stream=sys.stdout,
|
||||
)
|
||||
|
||||
# Silence noisy third-party loggers
|
||||
noisy_loggers = [
|
||||
# SQLAlchemy
|
||||
"sqlalchemy",
|
||||
"sqlalchemy.engine",
|
||||
"sqlalchemy.pool",
|
||||
"sqlalchemy.dialects",
|
||||
"sqlalchemy.orm",
|
||||
"aiosqlite",
|
||||
# HTTP clients
|
||||
"httpx",
|
||||
"httpx._client",
|
||||
"httpcore",
|
||||
"httpcore.http11",
|
||||
"httpcore.http2",
|
||||
"httpcore.connection",
|
||||
"urllib3",
|
||||
"hpack",
|
||||
"h11",
|
||||
"h2",
|
||||
# OpenCode SDK (uses stainless framework)
|
||||
"opencode",
|
||||
"opencode_ai",
|
||||
"opencode_ai._base_client",
|
||||
"opencode_ai._client",
|
||||
# Stainless SDK framework (base for OpenAI/OpenCode SDKs)
|
||||
"stainless",
|
||||
"stainless._base_client",
|
||||
# Uvicorn
|
||||
"uvicorn.access",
|
||||
]
|
||||
for logger_name in noisy_loggers:
|
||||
logging.getLogger(logger_name).setLevel(logging.WARNING)
|
||||
|
||||
# Suppress httpx debug logging via environment variable
|
||||
os.environ.setdefault("HTTPX_LOG_LEVEL", "WARNING")
|
||||
|
||||
# Configure structlog
|
||||
structlog.configure(
|
||||
processors=[
|
||||
structlog.stdlib.filter_by_level,
|
||||
structlog.stdlib.add_logger_name,
|
||||
structlog.stdlib.add_log_level,
|
||||
structlog.stdlib.PositionalArgumentsFormatter(),
|
||||
structlog.processors.TimeStamper(fmt="iso"),
|
||||
structlog.processors.StackInfoRenderer(),
|
||||
structlog.processors.format_exc_info,
|
||||
structlog.processors.UnicodeDecoder(),
|
||||
structlog.processors.JSONRenderer() if not settings.DEBUG else structlog.dev.ConsoleRenderer(),
|
||||
],
|
||||
wrapper_class=structlog.stdlib.BoundLogger,
|
||||
context_class=dict,
|
||||
logger_factory=structlog.stdlib.LoggerFactory(),
|
||||
cache_logger_on_first_use=True,
|
||||
)
|
||||
|
||||
|
||||
# Initialize logging
|
||||
configure_logging(settings.LOG_LEVEL)
|
||||
logger = structlog.get_logger(__name__)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Global State
|
||||
# =============================================================================
|
||||
|
||||
# Global orchestrator instance
|
||||
orchestrator: Optional[WorkflowOrchestrator] = None
|
||||
|
||||
# Background task reference
|
||||
orchestrator_task: Optional[asyncio.Task] = None
|
||||
|
||||
# OpenCode server process
|
||||
opencode_process: Optional[subprocess.Popen] = None
|
||||
opencode_server_url: Optional[str] = None
|
||||
|
||||
# Default OpenCode server port
|
||||
OPENCODE_SERVER_PORT = 18080
|
||||
|
||||
# Shutdown event for graceful termination
|
||||
shutdown_event = asyncio.Event()
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# OpenCode Configuration Generation
|
||||
# =============================================================================
|
||||
|
||||
def generate_opencode_config() -> None:
|
||||
"""
|
||||
Generate opencode.json dynamically from environment variables.
|
||||
|
||||
This ensures all configuration values are properly set from environment
|
||||
variables. If required variables are missing, exits with a clear error.
|
||||
|
||||
Required environment variables:
|
||||
- OPENCODE_API_KEY: API key for the AI provider
|
||||
- OPENCODE_API_BASE: API base URL
|
||||
- OPENCODE_SDK: npm package (e.g. @ai-sdk/anthropic, @ai-sdk/openai)
|
||||
- OPENCODE_MODEL: Model name to use
|
||||
- OPENCODE_MAX_TOKENS: Maximum output tokens
|
||||
"""
|
||||
# Check for required environment variables
|
||||
missing = settings.get_missing_opencode_settings()
|
||||
if missing:
|
||||
logger.error(
|
||||
"Missing required OpenCode environment variables",
|
||||
missing=missing,
|
||||
hint="Set these environment variables before starting the application",
|
||||
)
|
||||
logger.error(
|
||||
"Example configuration:",
|
||||
example={
|
||||
"OPENCODE_API_KEY": "your-api-key",
|
||||
"OPENCODE_API_BASE": "https://api.minimax.io/anthropic/v1",
|
||||
"OPENCODE_SDK": "@ai-sdk/anthropic",
|
||||
"OPENCODE_MODEL": "MiniMax-M2.1",
|
||||
"OPENCODE_MAX_TOKENS": "196608",
|
||||
}
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
# Extract provider name from SDK package (e.g. @ai-sdk/anthropic -> anthropic)
|
||||
# This is used as the provider key in the config
|
||||
sdk_parts = settings.OPENCODE_SDK.split("/")
|
||||
provider_name = sdk_parts[-1] if sdk_parts else "custom"
|
||||
|
||||
config = {
|
||||
"$schema": "https://opencode.ai/config.json",
|
||||
"provider": {
|
||||
provider_name: {
|
||||
"npm": settings.OPENCODE_SDK,
|
||||
"name": provider_name.title(),
|
||||
"options": {
|
||||
"baseURL": settings.OPENCODE_API_BASE,
|
||||
"apiKey": "{env:OPENCODE_API_KEY}"
|
||||
},
|
||||
"models": {
|
||||
settings.OPENCODE_MODEL: {
|
||||
"name": settings.OPENCODE_MODEL,
|
||||
"options": {
|
||||
"max_tokens": settings.OPENCODE_MAX_TOKENS
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"model": f"{provider_name}/{settings.OPENCODE_MODEL}",
|
||||
"agent": {
|
||||
"ideator": {
|
||||
"description": "Finds innovative project ideas from various sources",
|
||||
"mode": "primary",
|
||||
"prompt": "{file:.opencode/agent/ideator.md}",
|
||||
"tools": {
|
||||
"read": True,
|
||||
"grep": True,
|
||||
"glob": True,
|
||||
"bash": True
|
||||
}
|
||||
},
|
||||
"planner": {
|
||||
"description": "Creates detailed implementation plans",
|
||||
"mode": "primary",
|
||||
"prompt": "{file:.opencode/agent/planner.md}",
|
||||
"tools": {
|
||||
"read": True,
|
||||
"grep": True,
|
||||
"glob": True,
|
||||
"bash": True
|
||||
}
|
||||
},
|
||||
"developer": {
|
||||
"description": "Implements code based on plans",
|
||||
"mode": "primary",
|
||||
"prompt": "{file:.opencode/agent/developer.md}",
|
||||
"tools": {
|
||||
"read": True,
|
||||
"write": True,
|
||||
"edit": True,
|
||||
"bash": True,
|
||||
"grep": True,
|
||||
"glob": True
|
||||
}
|
||||
},
|
||||
"tester": {
|
||||
"description": "Tests and validates implementations",
|
||||
"mode": "primary",
|
||||
"prompt": "{file:.opencode/agent/tester.md}",
|
||||
"tools": {
|
||||
"read": True,
|
||||
"bash": True,
|
||||
"grep": True,
|
||||
"glob": True
|
||||
}
|
||||
},
|
||||
"uploader": {
|
||||
"description": "Uploads projects to Gitea",
|
||||
"mode": "primary",
|
||||
"prompt": "{file:.opencode/agent/uploader.md}",
|
||||
"tools": {
|
||||
"read": True,
|
||||
"write": True,
|
||||
"bash": True,
|
||||
"grep": True
|
||||
}
|
||||
},
|
||||
"evangelist": {
|
||||
"description": "Promotes projects on X/Twitter",
|
||||
"mode": "primary",
|
||||
"prompt": "{file:.opencode/agent/evangelist.md}",
|
||||
"tools": {
|
||||
"read": True,
|
||||
"bash": True
|
||||
}
|
||||
}
|
||||
},
|
||||
"mcp": {
|
||||
"search": {
|
||||
"type": "local",
|
||||
"command": ["python", "-m", "mcp_servers.search_mcp"],
|
||||
"enabled": True
|
||||
},
|
||||
"gitea": {
|
||||
"type": "local",
|
||||
"command": ["python", "-m", "mcp_servers.gitea_mcp"],
|
||||
"enabled": True
|
||||
},
|
||||
"x_api": {
|
||||
"type": "local",
|
||||
"command": ["python", "-m", "mcp_servers.x_mcp"],
|
||||
"enabled": True
|
||||
},
|
||||
"database": {
|
||||
"type": "local",
|
||||
"command": ["python", "-m", "mcp_servers.database_mcp"],
|
||||
"enabled": True
|
||||
},
|
||||
"devtest": {
|
||||
"type": "local",
|
||||
"command": ["python", "-m", "mcp_servers.devtest_mcp"],
|
||||
"enabled": True
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Write the config file
|
||||
config_path = Path("opencode.json")
|
||||
config_path.write_text(json.dumps(config, indent=2), encoding="utf-8")
|
||||
|
||||
logger.info(
|
||||
"Generated opencode.json from environment variables",
|
||||
sdk=settings.OPENCODE_SDK,
|
||||
model=settings.OPENCODE_MODEL,
|
||||
max_tokens=settings.OPENCODE_MAX_TOKENS,
|
||||
base_url=settings.OPENCODE_API_BASE,
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# OpenCode Server Management
|
||||
# =============================================================================
|
||||
|
||||
async def start_opencode_server() -> Optional[str]:
|
||||
"""
|
||||
Start the OpenCode server as a subprocess.
|
||||
|
||||
Returns:
|
||||
The server URL if successful, None otherwise
|
||||
"""
|
||||
global opencode_process, opencode_server_url
|
||||
|
||||
# Check if OpenCode CLI is available
|
||||
# Check multiple locations: npm global, user home, and PATH
|
||||
possible_paths = [
|
||||
"/usr/local/bin/opencode", # npm global bin (Docker)
|
||||
"/usr/bin/opencode", # System bin
|
||||
os.path.expanduser("~/.opencode/bin/opencode"), # User home (curl install)
|
||||
]
|
||||
|
||||
opencode_path = None
|
||||
for path in possible_paths:
|
||||
if os.path.exists(path):
|
||||
opencode_path = path
|
||||
break
|
||||
|
||||
if not opencode_path:
|
||||
# Try to find in PATH
|
||||
opencode_path = shutil.which("opencode")
|
||||
if not opencode_path:
|
||||
logger.warning(
|
||||
"OpenCode CLI not found",
|
||||
checked_paths=["~/.opencode/bin/opencode", "PATH"]
|
||||
)
|
||||
return None
|
||||
|
||||
# Determine port to use
|
||||
port = OPENCODE_SERVER_PORT
|
||||
if settings.OPENCODE_SERVER_URL:
|
||||
# Extract port from existing URL if configured
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(settings.OPENCODE_SERVER_URL)
|
||||
if parsed.port:
|
||||
port = parsed.port
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
server_url = f"http://127.0.0.1:{port}"
|
||||
|
||||
logger.info(
|
||||
"Starting OpenCode server",
|
||||
opencode_path=opencode_path,
|
||||
port=port,
|
||||
)
|
||||
|
||||
try:
|
||||
# Start OpenCode server in serve mode
|
||||
opencode_process = subprocess.Popen(
|
||||
[
|
||||
opencode_path,
|
||||
"serve",
|
||||
"--port", str(port),
|
||||
"--hostname", "127.0.0.1",
|
||||
],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
cwd=os.getcwd(), # Run in project directory for opencode.json
|
||||
)
|
||||
|
||||
# Wait for server to be ready
|
||||
ready = await wait_for_opencode_server(server_url, timeout=30)
|
||||
|
||||
if ready:
|
||||
opencode_server_url = server_url
|
||||
logger.info(
|
||||
"OpenCode server started successfully",
|
||||
url=server_url,
|
||||
pid=opencode_process.pid,
|
||||
)
|
||||
return server_url
|
||||
else:
|
||||
logger.error("OpenCode server failed to start within timeout")
|
||||
await stop_opencode_server()
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to start OpenCode server",
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
async def wait_for_opencode_server(url: str, timeout: int = 30) -> bool:
|
||||
"""
|
||||
Wait for the OpenCode server to be ready.
|
||||
|
||||
Args:
|
||||
url: Server URL to check
|
||||
timeout: Maximum seconds to wait
|
||||
|
||||
Returns:
|
||||
True if server is ready, False otherwise
|
||||
"""
|
||||
start_time = asyncio.get_event_loop().time()
|
||||
|
||||
async with httpx.AsyncClient(timeout=5.0) as client:
|
||||
while (asyncio.get_event_loop().time() - start_time) < timeout:
|
||||
try:
|
||||
# Try to connect to the server
|
||||
# OpenCode server might not have a /health endpoint,
|
||||
# so we just try to connect
|
||||
response = await client.get(f"{url}/")
|
||||
# Any response means server is up
|
||||
logger.debug("OpenCode server responded", status=response.status_code)
|
||||
return True
|
||||
except httpx.ConnectError:
|
||||
# Server not yet ready
|
||||
await asyncio.sleep(0.5)
|
||||
except httpx.TimeoutException:
|
||||
# Connection timeout, try again
|
||||
await asyncio.sleep(0.5)
|
||||
except Exception as e:
|
||||
logger.debug(f"Waiting for OpenCode server: {e}")
|
||||
await asyncio.sleep(0.5)
|
||||
|
||||
# Check if process has died
|
||||
if opencode_process and opencode_process.poll() is not None:
|
||||
returncode = opencode_process.returncode
|
||||
stderr = opencode_process.stderr.read().decode() if opencode_process.stderr else ""
|
||||
logger.error(
|
||||
"OpenCode server process died",
|
||||
returncode=returncode,
|
||||
stderr=stderr[:500],
|
||||
)
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
|
||||
async def stop_opencode_server() -> None:
|
||||
"""
|
||||
Stop the OpenCode server subprocess.
|
||||
"""
|
||||
global opencode_process, opencode_server_url
|
||||
|
||||
if opencode_process is None:
|
||||
return
|
||||
|
||||
logger.info("Stopping OpenCode server", pid=opencode_process.pid)
|
||||
|
||||
try:
|
||||
# Try graceful termination first
|
||||
opencode_process.terminate()
|
||||
|
||||
# Wait for process to terminate
|
||||
try:
|
||||
opencode_process.wait(timeout=5)
|
||||
except subprocess.TimeoutExpired:
|
||||
# Force kill if needed
|
||||
logger.warning("OpenCode server did not terminate gracefully, killing...")
|
||||
opencode_process.kill()
|
||||
opencode_process.wait(timeout=5)
|
||||
|
||||
logger.info("OpenCode server stopped")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error stopping OpenCode server: {e}")
|
||||
finally:
|
||||
opencode_process = None
|
||||
opencode_server_url = None
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Orchestrator Management
|
||||
# =============================================================================
|
||||
|
||||
async def run_orchestrator_loop() -> None:
|
||||
"""
|
||||
Run the orchestrator pipeline in a continuous loop.
|
||||
|
||||
The orchestrator will run the full pipeline and then wait for a configured
|
||||
interval before starting the next run. This loop continues until shutdown
|
||||
is requested.
|
||||
"""
|
||||
global orchestrator
|
||||
|
||||
orchestrator = WorkflowOrchestrator()
|
||||
|
||||
logger.info(
|
||||
"Orchestrator loop started",
|
||||
auto_start=settings.AUTO_START,
|
||||
max_concurrent_projects=settings.MAX_CONCURRENT_PROJECTS,
|
||||
)
|
||||
|
||||
while not shutdown_event.is_set():
|
||||
try:
|
||||
logger.info("Starting orchestrator pipeline run")
|
||||
|
||||
# Run the full pipeline
|
||||
result = await orchestrator.run_full_pipeline()
|
||||
|
||||
if result.get("success"):
|
||||
logger.info(
|
||||
"Pipeline completed successfully",
|
||||
project_id=result.get("project_id"),
|
||||
github_url=result.get("github_url"),
|
||||
x_post_url=result.get("x_post_url"),
|
||||
iterations=result.get("dev_test_iterations"),
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Pipeline completed with errors",
|
||||
project_id=result.get("project_id"),
|
||||
error=result.get("error"),
|
||||
)
|
||||
|
||||
# Wait before next run (or until shutdown)
|
||||
# Use a reasonable interval between pipeline runs
|
||||
pipeline_interval = 60 # seconds between pipeline runs
|
||||
try:
|
||||
await asyncio.wait_for(
|
||||
shutdown_event.wait(),
|
||||
timeout=pipeline_interval
|
||||
)
|
||||
# If we get here, shutdown was requested
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
# Timeout means we should continue the loop
|
||||
continue
|
||||
|
||||
except asyncio.CancelledError:
|
||||
logger.info("Orchestrator loop cancelled")
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Orchestrator pipeline error",
|
||||
error=str(e),
|
||||
error_type=type(e).__name__,
|
||||
)
|
||||
# Wait before retrying after error
|
||||
try:
|
||||
await asyncio.wait_for(shutdown_event.wait(), timeout=30)
|
||||
break
|
||||
except asyncio.TimeoutError:
|
||||
continue
|
||||
|
||||
logger.info("Orchestrator loop stopped")
|
||||
|
||||
|
||||
async def stop_orchestrator() -> None:
|
||||
"""
|
||||
Stop the orchestrator gracefully.
|
||||
"""
|
||||
global orchestrator, orchestrator_task
|
||||
|
||||
logger.info("Stopping orchestrator...")
|
||||
|
||||
# Signal shutdown
|
||||
shutdown_event.set()
|
||||
|
||||
# Stop the orchestrator if running
|
||||
if orchestrator is not None:
|
||||
await orchestrator.stop()
|
||||
|
||||
# Cancel and wait for background task
|
||||
if orchestrator_task is not None and not orchestrator_task.done():
|
||||
orchestrator_task.cancel()
|
||||
try:
|
||||
await asyncio.wait_for(orchestrator_task, timeout=10.0)
|
||||
except (asyncio.CancelledError, asyncio.TimeoutError):
|
||||
pass
|
||||
|
||||
logger.info("Orchestrator stopped")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Database Initialization
|
||||
# =============================================================================
|
||||
|
||||
async def initialize_database() -> None:
|
||||
"""
|
||||
Initialize the database and create all tables.
|
||||
"""
|
||||
logger.info("Initializing database...")
|
||||
|
||||
# Ensure required directories exist
|
||||
settings.ensure_directories()
|
||||
|
||||
# Initialize database tables
|
||||
await init_db()
|
||||
|
||||
logger.info(
|
||||
"Database initialized successfully",
|
||||
database_url=settings.DATABASE_URL.split("@")[-1] if "@" in settings.DATABASE_URL else "local",
|
||||
)
|
||||
|
||||
|
||||
async def shutdown_database() -> None:
|
||||
"""
|
||||
Close database connections gracefully.
|
||||
"""
|
||||
logger.info("Closing database connections...")
|
||||
await close_db()
|
||||
logger.info("Database connections closed")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# FastAPI Application Lifespan
|
||||
# =============================================================================
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
"""
|
||||
Application lifespan context manager.
|
||||
|
||||
Handles startup and shutdown events for the FastAPI application:
|
||||
- Startup: Initialize database, start OpenCode server, start orchestrator (if AUTO_START)
|
||||
- Shutdown: Stop orchestrator, stop OpenCode server, close database connections
|
||||
"""
|
||||
global orchestrator_task, opencode_server_url
|
||||
|
||||
# === STARTUP ===
|
||||
logger.info(
|
||||
"Starting 7000%AUTO application",
|
||||
app_name=settings.APP_NAME,
|
||||
debug=settings.DEBUG,
|
||||
host=settings.HOST,
|
||||
port=settings.PORT,
|
||||
)
|
||||
|
||||
try:
|
||||
# Initialize database
|
||||
await initialize_database()
|
||||
|
||||
# Mount web dashboard AFTER database is initialized
|
||||
try:
|
||||
from web.app import app as dashboard_app
|
||||
app.mount("/dashboard", dashboard_app)
|
||||
logger.info("Web dashboard mounted at /dashboard")
|
||||
except ImportError:
|
||||
logger.warning("Web dashboard not available, skipping mount")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to mount web dashboard: {e}")
|
||||
|
||||
# Generate opencode.json from environment variables
|
||||
# This ensures all config values are properly set without {env:...} syntax issues
|
||||
generate_opencode_config()
|
||||
|
||||
# Start OpenCode server
|
||||
opencode_url = await start_opencode_server()
|
||||
if opencode_url:
|
||||
# Set the server URL for the orchestrator to use
|
||||
# Update settings dynamically
|
||||
settings.OPENCODE_SERVER_URL = opencode_url
|
||||
logger.info(
|
||||
"OpenCode server ready",
|
||||
url=opencode_url,
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"OpenCode server not available, agent operations may fail",
|
||||
fallback="Will attempt to use OPENCODE_API directly if configured",
|
||||
)
|
||||
|
||||
# Start orchestrator in background if AUTO_START is enabled
|
||||
if settings.AUTO_START:
|
||||
logger.info("AUTO_START enabled, starting orchestrator background task")
|
||||
orchestrator_task = asyncio.create_task(
|
||||
run_orchestrator_loop(),
|
||||
name="orchestrator-loop"
|
||||
)
|
||||
else:
|
||||
logger.info("AUTO_START disabled, orchestrator will not start automatically")
|
||||
|
||||
logger.info(
|
||||
"Application startup complete",
|
||||
auto_start=settings.AUTO_START,
|
||||
gitea_configured=settings.is_gitea_configured,
|
||||
x_configured=settings.is_x_configured,
|
||||
opencode_configured=settings.is_opencode_configured,
|
||||
opencode_available=opencode_url is not None,
|
||||
)
|
||||
|
||||
yield
|
||||
|
||||
finally:
|
||||
# === SHUTDOWN ===
|
||||
logger.info("Shutting down application...")
|
||||
|
||||
# Stop orchestrator
|
||||
await stop_orchestrator()
|
||||
|
||||
# Stop OpenCode server
|
||||
await stop_opencode_server()
|
||||
|
||||
# Close database connections
|
||||
await shutdown_database()
|
||||
|
||||
logger.info("Application shutdown complete")
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# FastAPI Application
|
||||
# =============================================================================
|
||||
|
||||
app = FastAPI(
|
||||
title=settings.APP_NAME,
|
||||
description="Autonomous AI System with 6 Orchestrated Agents: Ideator -> Planner -> Developer <-> Tester -> Uploader -> Evangelist",
|
||||
version="1.0.0",
|
||||
lifespan=lifespan,
|
||||
docs_url="/docs" if settings.DEBUG else None,
|
||||
redoc_url="/redoc" if settings.DEBUG else None,
|
||||
openapi_url="/openapi.json" if settings.DEBUG else None,
|
||||
)
|
||||
|
||||
# Add CORS middleware
|
||||
app.add_middleware(
|
||||
CORSMiddleware,
|
||||
allow_origins=["*"],
|
||||
allow_credentials=True,
|
||||
allow_methods=["*"],
|
||||
allow_headers=["*"],
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Core API Endpoints
|
||||
# =============================================================================
|
||||
|
||||
@app.get("/")
|
||||
async def root():
|
||||
"""Redirect to dashboard."""
|
||||
from fastapi.responses import RedirectResponse
|
||||
return RedirectResponse(url="/dashboard")
|
||||
|
||||
|
||||
@app.get("/health")
|
||||
async def health_check():
|
||||
"""
|
||||
Health check endpoint for monitoring and load balancers.
|
||||
"""
|
||||
orchestrator_status = "running" if (orchestrator and orchestrator.is_running) else "idle"
|
||||
if not settings.AUTO_START and orchestrator is None:
|
||||
orchestrator_status = "disabled"
|
||||
|
||||
opencode_status = "running" if opencode_server_url else "unavailable"
|
||||
|
||||
return {
|
||||
"status": "healthy",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"components": {
|
||||
"database": "healthy",
|
||||
"orchestrator": orchestrator_status,
|
||||
"opencode_server": opencode_status,
|
||||
},
|
||||
"config": {
|
||||
"auto_start": settings.AUTO_START,
|
||||
"debug": settings.DEBUG,
|
||||
"gitea_configured": settings.is_gitea_configured,
|
||||
"x_configured": settings.is_x_configured,
|
||||
"opencode_configured": settings.is_opencode_configured,
|
||||
"opencode_url": opencode_server_url,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@app.get("/status")
|
||||
async def get_status():
|
||||
"""
|
||||
Get detailed system status.
|
||||
"""
|
||||
return {
|
||||
"app_name": settings.APP_NAME,
|
||||
"orchestrator": {
|
||||
"running": orchestrator.is_running if orchestrator else False,
|
||||
"auto_start": settings.AUTO_START,
|
||||
},
|
||||
"opencode": {
|
||||
"available": opencode_server_url is not None,
|
||||
"url": opencode_server_url,
|
||||
"pid": opencode_process.pid if opencode_process else None,
|
||||
},
|
||||
"configuration": {
|
||||
"host": settings.HOST,
|
||||
"port": settings.PORT,
|
||||
"debug": settings.DEBUG,
|
||||
"log_level": settings.LOG_LEVEL,
|
||||
"workspace_dir": str(settings.WORKSPACE_DIR),
|
||||
"max_concurrent_projects": settings.MAX_CONCURRENT_PROJECTS,
|
||||
},
|
||||
"integrations": {
|
||||
"gitea": settings.is_gitea_configured,
|
||||
"x_twitter": settings.is_x_configured,
|
||||
"minimax": settings.is_opencode_configured,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Error Handlers
|
||||
# =============================================================================
|
||||
|
||||
@app.exception_handler(Exception)
|
||||
async def global_exception_handler(request: Request, exc: Exception):
|
||||
"""
|
||||
Global exception handler for unhandled errors.
|
||||
"""
|
||||
logger.error(
|
||||
"Unhandled exception",
|
||||
path=request.url.path,
|
||||
method=request.method,
|
||||
error=str(exc),
|
||||
error_type=type(exc).__name__,
|
||||
)
|
||||
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={
|
||||
"detail": "Internal server error",
|
||||
"error": str(exc) if settings.DEBUG else None,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Signal Handlers
|
||||
# =============================================================================
|
||||
|
||||
def create_signal_handler():
|
||||
"""
|
||||
Create signal handlers for graceful shutdown.
|
||||
"""
|
||||
def handle_signal(signum, frame):
|
||||
"""Handle shutdown signals."""
|
||||
signal_name = signal.Signals(signum).name
|
||||
logger.info(f"Received {signal_name}, initiating graceful shutdown...")
|
||||
shutdown_event.set()
|
||||
|
||||
return handle_signal
|
||||
|
||||
|
||||
def setup_signal_handlers():
|
||||
"""
|
||||
Set up signal handlers for SIGTERM and SIGINT.
|
||||
"""
|
||||
handler = create_signal_handler()
|
||||
|
||||
# Register signal handlers (Unix only)
|
||||
if sys.platform != "win32":
|
||||
signal.signal(signal.SIGTERM, handler)
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
else:
|
||||
# Windows: only SIGINT (Ctrl+C) is supported
|
||||
signal.signal(signal.SIGINT, handler)
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Mount Web Dashboard (mounted lazily in lifespan to avoid import issues)
|
||||
# =============================================================================
|
||||
|
||||
# Dashboard is mounted inside lifespan() after database initialization
|
||||
|
||||
|
||||
# =============================================================================
|
||||
# Main Entry Point
|
||||
# =============================================================================
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main entry point for running the application.
|
||||
|
||||
Configures and starts the uvicorn server with the FastAPI application.
|
||||
"""
|
||||
# Set up signal handlers
|
||||
setup_signal_handlers()
|
||||
|
||||
logger.info(
|
||||
"Starting uvicorn server",
|
||||
host=settings.HOST,
|
||||
port=settings.PORT,
|
||||
log_level=settings.LOG_LEVEL.lower(),
|
||||
reload=settings.DEBUG,
|
||||
)
|
||||
|
||||
# Run uvicorn
|
||||
uvicorn.run(
|
||||
"main:app",
|
||||
host=settings.HOST,
|
||||
port=settings.PORT,
|
||||
reload=settings.DEBUG,
|
||||
log_level=settings.LOG_LEVEL.lower(),
|
||||
access_log=True,
|
||||
# Production settings
|
||||
workers=1, # Use 1 worker for orchestrator state consistency
|
||||
loop="auto",
|
||||
http="auto",
|
||||
# Timeouts
|
||||
timeout_keep_alive=30,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
12
mcp_servers/__init__.py
Normal file
12
mcp_servers/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
"""
|
||||
MCP Servers for 7000%AUTO
|
||||
Provides external API access to AI agents via Model Context Protocol
|
||||
"""
|
||||
|
||||
from .search_mcp import mcp as search_mcp
|
||||
from .x_mcp import mcp as x_mcp
|
||||
from .database_mcp import mcp as database_mcp
|
||||
from .gitea_mcp import mcp as gitea_mcp
|
||||
from .devtest_mcp import mcp as devtest_mcp
|
||||
|
||||
__all__ = ['search_mcp', 'x_mcp', 'database_mcp', 'gitea_mcp', 'devtest_mcp']
|
||||
342
mcp_servers/database_mcp.py
Normal file
342
mcp_servers/database_mcp.py
Normal file
@@ -0,0 +1,342 @@
|
||||
"""Database MCP Server for 7000%AUTO
|
||||
Provides database operations for idea management
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Optional
|
||||
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mcp = FastMCP("Database Server")
|
||||
|
||||
# Database initialization flag for MCP server process
|
||||
_db_ready = False
|
||||
|
||||
|
||||
async def _init_db_if_needed():
|
||||
"""Initialize database if not already initialized. MCP servers run in separate processes."""
|
||||
global _db_ready
|
||||
if not _db_ready:
|
||||
try:
|
||||
from database.db import init_db
|
||||
await init_db()
|
||||
_db_ready = True
|
||||
logger.info("Database initialized in MCP server")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize database in MCP server: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_previous_ideas(limit: int = 50) -> dict:
|
||||
"""
|
||||
Get list of previously generated ideas.
|
||||
|
||||
Args:
|
||||
limit: Maximum number of ideas to return (default 50)
|
||||
|
||||
Returns:
|
||||
Dictionary with list of ideas
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database import get_db, Idea
|
||||
from sqlalchemy import select
|
||||
|
||||
async with get_db() as session:
|
||||
query = select(Idea).order_by(Idea.created_at.desc()).limit(limit)
|
||||
result = await session.execute(query)
|
||||
ideas = result.scalars().all()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"ideas": [
|
||||
{
|
||||
"id": idea.id,
|
||||
"title": idea.title,
|
||||
"description": idea.description[:200],
|
||||
"source": idea.source,
|
||||
"used": idea.used
|
||||
}
|
||||
for idea in ideas
|
||||
],
|
||||
"count": len(ideas)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting previous ideas: {e}")
|
||||
return {"success": False, "error": str(e), "ideas": []}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def check_idea_exists(title: str) -> dict:
|
||||
"""
|
||||
Check if a similar idea already exists.
|
||||
|
||||
Args:
|
||||
title: Title to check for similarity
|
||||
|
||||
Returns:
|
||||
Dictionary with exists flag and similar ideas if found
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database import get_db, Idea
|
||||
from sqlalchemy import select, func
|
||||
|
||||
title_lower = title.lower()
|
||||
title_words = set(title_lower.split())
|
||||
|
||||
async with get_db() as session:
|
||||
# Get all ideas for comparison
|
||||
query = select(Idea)
|
||||
result = await session.execute(query)
|
||||
ideas = result.scalars().all()
|
||||
|
||||
similar = []
|
||||
for idea in ideas:
|
||||
idea_title_lower = idea.title.lower()
|
||||
idea_words = set(idea_title_lower.split())
|
||||
|
||||
# Check exact match
|
||||
if title_lower == idea_title_lower:
|
||||
similar.append({
|
||||
"id": idea.id,
|
||||
"title": idea.title,
|
||||
"match_type": "exact"
|
||||
})
|
||||
continue
|
||||
|
||||
# Check partial match (title contains or is contained)
|
||||
if title_lower in idea_title_lower or idea_title_lower in title_lower:
|
||||
similar.append({
|
||||
"id": idea.id,
|
||||
"title": idea.title,
|
||||
"match_type": "partial"
|
||||
})
|
||||
continue
|
||||
|
||||
# Check word overlap (>50%)
|
||||
overlap = len(title_words & idea_words)
|
||||
total = len(title_words | idea_words)
|
||||
if total > 0 and overlap / total > 0.5:
|
||||
similar.append({
|
||||
"id": idea.id,
|
||||
"title": idea.title,
|
||||
"match_type": "similar"
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"exists": len(similar) > 0,
|
||||
"similar_ideas": similar[:5],
|
||||
"count": len(similar)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking idea existence: {e}")
|
||||
return {"success": False, "error": str(e), "exists": False}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def save_idea(title: str, description: str, source: str) -> dict:
|
||||
"""
|
||||
Save a new idea to the database.
|
||||
|
||||
Args:
|
||||
title: Idea title
|
||||
description: Idea description
|
||||
source: Source of the idea (arxiv, reddit, x, hn, ph)
|
||||
|
||||
Returns:
|
||||
Dictionary with saved idea details
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database import create_idea
|
||||
|
||||
idea = await create_idea(
|
||||
title=title,
|
||||
description=description,
|
||||
source=source
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"idea": {
|
||||
"id": idea.id,
|
||||
"title": idea.title,
|
||||
"description": idea.description,
|
||||
"source": idea.source,
|
||||
"created_at": idea.created_at.isoformat() if idea.created_at else None
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving idea: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_database_stats() -> dict:
|
||||
"""
|
||||
Get database statistics.
|
||||
|
||||
Returns:
|
||||
Dictionary with database stats
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database import get_stats
|
||||
|
||||
stats = await get_stats()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"stats": stats
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting database stats: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def submit_idea(
|
||||
project_id: int,
|
||||
title: str,
|
||||
description: str,
|
||||
source: str,
|
||||
tech_stack: list[str] = None,
|
||||
target_audience: str = None,
|
||||
key_features: list[str] = None,
|
||||
complexity: str = None,
|
||||
estimated_time: str = None,
|
||||
inspiration: str = None
|
||||
) -> dict:
|
||||
"""
|
||||
Submit a generated project idea. Use this tool to finalize and save your idea.
|
||||
The idea will be saved directly to the database for the given project.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to associate this idea with (required)
|
||||
title: Short project name (required)
|
||||
description: Detailed description of the project (required)
|
||||
source: Source of inspiration - arxiv, reddit, x, hn, or ph (required)
|
||||
tech_stack: List of technologies to use (e.g., ["python", "fastapi"])
|
||||
target_audience: Who would use this project
|
||||
key_features: List of key features
|
||||
complexity: low, medium, or high
|
||||
estimated_time: Estimated implementation time (e.g., "2-4 hours")
|
||||
inspiration: Brief note on what inspired this idea
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import set_project_idea_json
|
||||
|
||||
# Build the complete idea dict
|
||||
idea_data = {
|
||||
"title": title,
|
||||
"description": description,
|
||||
"source": source,
|
||||
"tech_stack": tech_stack or [],
|
||||
"target_audience": target_audience or "",
|
||||
"key_features": key_features or [],
|
||||
"complexity": complexity or "medium",
|
||||
"estimated_time": estimated_time or "",
|
||||
"inspiration": inspiration or "",
|
||||
}
|
||||
|
||||
# Save to database
|
||||
success = await set_project_idea_json(project_id, idea_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"Idea submitted for project {project_id}: {title}")
|
||||
return {"success": True, "message": f"Idea '{title}' saved successfully"}
|
||||
else:
|
||||
logger.error(f"Project {project_id} not found")
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error submitting idea: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def submit_plan(
|
||||
project_id: int,
|
||||
project_name: str,
|
||||
overview: str,
|
||||
display_name: str = None,
|
||||
tech_stack: dict = None,
|
||||
file_structure: dict = None,
|
||||
features: list[dict] = None,
|
||||
implementation_steps: list[dict] = None,
|
||||
testing_strategy: dict = None,
|
||||
configuration: dict = None,
|
||||
error_handling: dict = None,
|
||||
readme_sections: list[str] = None
|
||||
) -> dict:
|
||||
"""
|
||||
Submit an implementation plan. Use this tool to finalize your project plan.
|
||||
The plan will be saved directly to the database for the given project.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to associate this plan with (required)
|
||||
project_name: kebab-case project name (required)
|
||||
overview: 2-3 sentence summary of what will be built (required)
|
||||
display_name: Human readable project name
|
||||
tech_stack: Technology stack details with language, runtime, framework, key_dependencies
|
||||
file_structure: File structure with root_files and directories
|
||||
features: List of features with name, priority, description, implementation_notes
|
||||
implementation_steps: Ordered list of implementation steps
|
||||
testing_strategy: Testing approach with unit_tests, integration_tests, test_files, test_commands
|
||||
configuration: Config details with env_variables and config_files
|
||||
error_handling: Error handling strategies
|
||||
readme_sections: List of README section titles
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import set_project_plan_json
|
||||
|
||||
# Build the complete plan dict
|
||||
plan_data = {
|
||||
"project_name": project_name,
|
||||
"display_name": display_name or project_name.replace("-", " ").title(),
|
||||
"overview": overview,
|
||||
"tech_stack": tech_stack or {},
|
||||
"file_structure": file_structure or {},
|
||||
"features": features or [],
|
||||
"implementation_steps": implementation_steps or [],
|
||||
"testing_strategy": testing_strategy or {},
|
||||
"configuration": configuration or {},
|
||||
"error_handling": error_handling or {},
|
||||
"readme_sections": readme_sections or []
|
||||
}
|
||||
|
||||
# Save to database
|
||||
success = await set_project_plan_json(project_id, plan_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"Plan submitted for project {project_id}: {project_name}")
|
||||
return {"success": True, "message": f"Plan '{project_name}' saved successfully"}
|
||||
else:
|
||||
logger.error(f"Project {project_id} not found")
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error submitting plan: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mcp.run()
|
||||
636
mcp_servers/devtest_mcp.py
Normal file
636
mcp_servers/devtest_mcp.py
Normal file
@@ -0,0 +1,636 @@
|
||||
"""Developer-Tester Communication MCP Server for 7000%AUTO
|
||||
Provides structured communication between Developer and Tester agents via MCP tools.
|
||||
|
||||
This enables Developer and Tester to share:
|
||||
- Test results (PASS/FAIL with detailed bug reports)
|
||||
- Implementation status (completed/fixing with file changes)
|
||||
- Project context (plan, current iteration)
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mcp = FastMCP("DevTest Communication Server")
|
||||
|
||||
# Database initialization flag for MCP server process
|
||||
_db_ready = False
|
||||
|
||||
|
||||
async def _init_db_if_needed():
|
||||
"""Initialize database if not already initialized. MCP servers run in separate processes."""
|
||||
global _db_ready
|
||||
if not _db_ready:
|
||||
try:
|
||||
from database.db import init_db
|
||||
await init_db()
|
||||
_db_ready = True
|
||||
logger.info("Database initialized in DevTest MCP server")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to initialize database in DevTest MCP server: {e}")
|
||||
raise
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def submit_test_result(
|
||||
project_id: int,
|
||||
status: str,
|
||||
summary: str,
|
||||
checks_performed: list[dict] = None,
|
||||
bugs: list[dict] = None,
|
||||
code_quality: dict = None,
|
||||
ready_for_upload: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Submit test results after testing the implementation. Use this tool to report test outcomes.
|
||||
The Developer will read these results to fix any bugs.
|
||||
|
||||
Args:
|
||||
project_id: The project ID being tested (required)
|
||||
status: Test status - "PASS" or "FAIL" (required)
|
||||
summary: Brief summary of test results (required)
|
||||
checks_performed: List of checks with {check, result, details} format
|
||||
bugs: List of bugs found with {id, severity, type, file, line, issue, error_message, suggestion} format
|
||||
code_quality: Quality assessment with {error_handling, documentation, test_coverage} ratings
|
||||
ready_for_upload: Whether the project is ready for upload (true only if PASS)
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
|
||||
Example for PASS:
|
||||
submit_test_result(
|
||||
project_id=1,
|
||||
status="PASS",
|
||||
summary="All tests passed successfully",
|
||||
checks_performed=[
|
||||
{"check": "linting", "result": "pass", "details": "No issues found"},
|
||||
{"check": "unit_tests", "result": "pass", "details": "15/15 tests passed"}
|
||||
],
|
||||
ready_for_upload=True
|
||||
)
|
||||
|
||||
Example for FAIL:
|
||||
submit_test_result(
|
||||
project_id=1,
|
||||
status="FAIL",
|
||||
summary="Found 2 critical issues",
|
||||
checks_performed=[
|
||||
{"check": "linting", "result": "pass", "details": "No issues"},
|
||||
{"check": "type_check", "result": "fail", "details": "3 type errors"}
|
||||
],
|
||||
bugs=[
|
||||
{
|
||||
"id": 1,
|
||||
"severity": "critical",
|
||||
"type": "type_error",
|
||||
"file": "src/main.py",
|
||||
"line": 42,
|
||||
"issue": "Missing return type annotation",
|
||||
"error_message": "error: Function is missing return type annotation",
|
||||
"suggestion": "Add -> str return type"
|
||||
}
|
||||
],
|
||||
ready_for_upload=False
|
||||
)
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import set_project_test_result_json
|
||||
|
||||
# Validate status
|
||||
if status not in ("PASS", "FAIL"):
|
||||
return {"success": False, "error": "status must be 'PASS' or 'FAIL'"}
|
||||
|
||||
# Build test result data
|
||||
test_result_data = {
|
||||
"status": status,
|
||||
"summary": summary,
|
||||
"checks_performed": checks_performed or [],
|
||||
"bugs": bugs or [],
|
||||
"code_quality": code_quality or {},
|
||||
"ready_for_upload": ready_for_upload and status == "PASS",
|
||||
"submitted_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
# Save to database
|
||||
success = await set_project_test_result_json(project_id, test_result_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"Test result submitted for project {project_id}: {status}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Test result '{status}' submitted successfully",
|
||||
"bugs_count": len(bugs) if bugs else 0
|
||||
}
|
||||
else:
|
||||
logger.error(f"Project {project_id} not found")
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error submitting test result: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_test_result(project_id: int) -> dict:
|
||||
"""
|
||||
Get the latest test result for a project. Use this tool to see what the Tester found.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to get test results for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with test result data including status, bugs, and suggestions
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import get_project_test_result_json
|
||||
|
||||
test_result = await get_project_test_result_json(project_id)
|
||||
|
||||
if test_result:
|
||||
return {
|
||||
"success": True,
|
||||
"test_result": test_result
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": True,
|
||||
"test_result": None,
|
||||
"message": "No test result found for this project"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting test result: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def submit_implementation_status(
|
||||
project_id: int,
|
||||
status: str,
|
||||
files_created: list[dict] = None,
|
||||
files_modified: list[dict] = None,
|
||||
dependencies_installed: list[str] = None,
|
||||
commands_run: list[str] = None,
|
||||
bugs_addressed: list[dict] = None,
|
||||
notes: str = None,
|
||||
ready_for_testing: bool = True
|
||||
) -> dict:
|
||||
"""
|
||||
Submit implementation status after coding or fixing bugs. Use this tool to inform the Tester.
|
||||
|
||||
Args:
|
||||
project_id: The project ID being worked on (required)
|
||||
status: Status - "completed", "fixed", "in_progress", or "blocked" (required)
|
||||
files_created: List of files created with {path, lines, purpose} format
|
||||
files_modified: List of files modified with {path, changes} format
|
||||
dependencies_installed: List of installed dependencies
|
||||
commands_run: List of commands executed
|
||||
bugs_addressed: List of bugs fixed with {original_issue, fix_applied, file, line} format
|
||||
notes: Any important notes about the implementation
|
||||
ready_for_testing: Whether the code is ready for testing (default: True)
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
|
||||
Example for new implementation:
|
||||
submit_implementation_status(
|
||||
project_id=1,
|
||||
status="completed",
|
||||
files_created=[
|
||||
{"path": "src/main.py", "lines": 150, "purpose": "Main entry point"}
|
||||
],
|
||||
dependencies_installed=["fastapi", "uvicorn"],
|
||||
ready_for_testing=True
|
||||
)
|
||||
|
||||
Example for bug fix:
|
||||
submit_implementation_status(
|
||||
project_id=1,
|
||||
status="fixed",
|
||||
bugs_addressed=[
|
||||
{
|
||||
"original_issue": "TypeError in parse_input()",
|
||||
"fix_applied": "Added null check before processing",
|
||||
"file": "src/parser.py",
|
||||
"line": 42
|
||||
}
|
||||
],
|
||||
ready_for_testing=True
|
||||
)
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import set_project_implementation_status_json
|
||||
|
||||
# Validate status
|
||||
valid_statuses = ("completed", "fixed", "in_progress", "blocked")
|
||||
if status not in valid_statuses:
|
||||
return {"success": False, "error": f"status must be one of: {valid_statuses}"}
|
||||
|
||||
# Build implementation status data
|
||||
implementation_data = {
|
||||
"status": status,
|
||||
"files_created": files_created or [],
|
||||
"files_modified": files_modified or [],
|
||||
"dependencies_installed": dependencies_installed or [],
|
||||
"commands_run": commands_run or [],
|
||||
"bugs_addressed": bugs_addressed or [],
|
||||
"notes": notes or "",
|
||||
"ready_for_testing": ready_for_testing,
|
||||
"submitted_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
# Save to database
|
||||
success = await set_project_implementation_status_json(project_id, implementation_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"Implementation status submitted for project {project_id}: {status}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Implementation status '{status}' submitted successfully"
|
||||
}
|
||||
else:
|
||||
logger.error(f"Project {project_id} not found")
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error submitting implementation status: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_implementation_status(project_id: int) -> dict:
|
||||
"""
|
||||
Get the latest implementation status for a project. Use this tool to see what the Developer did.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to get implementation status for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with implementation status data
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import get_project_implementation_status_json
|
||||
|
||||
impl_status = await get_project_implementation_status_json(project_id)
|
||||
|
||||
if impl_status:
|
||||
return {
|
||||
"success": True,
|
||||
"implementation_status": impl_status
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": True,
|
||||
"implementation_status": None,
|
||||
"message": "No implementation status found for this project"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting implementation status: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_project_context(project_id: int) -> dict:
|
||||
"""
|
||||
Get full project context including idea, plan, and current dev-test state.
|
||||
Use this tool to understand the complete project situation.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to get context for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with complete project context
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import (
|
||||
get_project_by_id,
|
||||
get_project_idea_json,
|
||||
get_project_plan_json,
|
||||
get_project_test_result_json,
|
||||
get_project_implementation_status_json
|
||||
)
|
||||
|
||||
project = await get_project_by_id(project_id)
|
||||
if not project:
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
idea = await get_project_idea_json(project_id)
|
||||
plan = await get_project_plan_json(project_id)
|
||||
test_result = await get_project_test_result_json(project_id)
|
||||
impl_status = await get_project_implementation_status_json(project_id)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"project": {
|
||||
"id": project.id,
|
||||
"name": project.name,
|
||||
"status": project.status,
|
||||
"dev_test_iterations": project.dev_test_iterations,
|
||||
"current_agent": project.current_agent,
|
||||
},
|
||||
"idea": idea,
|
||||
"plan": plan,
|
||||
"test_result": test_result,
|
||||
"implementation_status": impl_status
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting project context: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def submit_ci_result(
|
||||
project_id: int,
|
||||
status: str,
|
||||
repo_name: str,
|
||||
gitea_url: str,
|
||||
run_id: int = None,
|
||||
run_url: str = None,
|
||||
summary: str = None,
|
||||
failed_jobs: list[dict] = None,
|
||||
error_logs: str = None
|
||||
) -> dict:
|
||||
"""
|
||||
Submit CI/CD (Gitea Actions) result after checking workflow status.
|
||||
Use this tool to report CI/CD status to Developer for fixes if needed.
|
||||
|
||||
Args:
|
||||
project_id: The project ID (required)
|
||||
status: CI status - "PASS", "FAIL", or "PENDING" (required)
|
||||
repo_name: Repository name (required)
|
||||
gitea_url: Repository URL on Gitea (required)
|
||||
run_id: Workflow run ID (if available)
|
||||
run_url: URL to the workflow run (if available)
|
||||
summary: Brief summary of CI result
|
||||
failed_jobs: List of failed jobs with {name, conclusion, steps} format
|
||||
error_logs: Relevant error logs or messages
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
|
||||
Example for PASS:
|
||||
submit_ci_result(
|
||||
project_id=1,
|
||||
status="PASS",
|
||||
repo_name="my-project",
|
||||
gitea_url="https://gitea.example.com/user/my-project",
|
||||
summary="All CI checks passed successfully"
|
||||
)
|
||||
|
||||
Example for FAIL:
|
||||
submit_ci_result(
|
||||
project_id=1,
|
||||
status="FAIL",
|
||||
repo_name="my-project",
|
||||
gitea_url="https://gitea.example.com/user/my-project",
|
||||
run_id=123,
|
||||
run_url="https://gitea.example.com/user/my-project/actions/runs/123",
|
||||
summary="CI failed: test job failed",
|
||||
failed_jobs=[{"name": "test", "conclusion": "failure", "steps": [...]}],
|
||||
error_logs="Error: pytest failed with exit code 1"
|
||||
)
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import set_project_ci_result_json
|
||||
|
||||
# Validate status
|
||||
if status not in ("PASS", "FAIL", "PENDING"):
|
||||
return {"success": False, "error": "status must be 'PASS', 'FAIL', or 'PENDING'"}
|
||||
|
||||
# Build CI result data
|
||||
ci_result_data = {
|
||||
"status": status,
|
||||
"repo_name": repo_name,
|
||||
"gitea_url": gitea_url,
|
||||
"run_id": run_id,
|
||||
"run_url": run_url,
|
||||
"summary": summary or "",
|
||||
"failed_jobs": failed_jobs or [],
|
||||
"error_logs": error_logs or "",
|
||||
"submitted_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
# Save to database
|
||||
success = await set_project_ci_result_json(project_id, ci_result_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"CI result submitted for project {project_id}: {status}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"CI result '{status}' submitted successfully",
|
||||
"needs_fix": status == "FAIL"
|
||||
}
|
||||
else:
|
||||
logger.error(f"Project {project_id} not found")
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error submitting CI result: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_ci_result(project_id: int) -> dict:
|
||||
"""
|
||||
Get the latest CI/CD result for a project. Use this to see if CI passed or failed.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to get CI result for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with CI result data including status, failed jobs, and error logs
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import get_project_ci_result_json
|
||||
|
||||
ci_result = await get_project_ci_result_json(project_id)
|
||||
|
||||
if ci_result:
|
||||
return {
|
||||
"success": True,
|
||||
"ci_result": ci_result
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": True,
|
||||
"ci_result": None,
|
||||
"message": "No CI result found for this project"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting CI result: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def submit_upload_status(
|
||||
project_id: int,
|
||||
status: str,
|
||||
repo_name: str,
|
||||
gitea_url: str,
|
||||
files_pushed: list[str] = None,
|
||||
commit_sha: str = None,
|
||||
message: str = None
|
||||
) -> dict:
|
||||
"""
|
||||
Submit upload status after pushing code to Gitea.
|
||||
Use this to inform Tester that code has been uploaded and needs CI check.
|
||||
|
||||
Args:
|
||||
project_id: The project ID (required)
|
||||
status: Upload status - "completed", "failed", or "in_progress" (required)
|
||||
repo_name: Repository name (required)
|
||||
gitea_url: Repository URL on Gitea (required)
|
||||
files_pushed: List of files that were pushed
|
||||
commit_sha: Commit SHA of the push
|
||||
message: Any additional message
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import set_project_upload_status_json
|
||||
|
||||
# Validate status
|
||||
valid_statuses = ("completed", "failed", "in_progress")
|
||||
if status not in valid_statuses:
|
||||
return {"success": False, "error": f"status must be one of: {valid_statuses}"}
|
||||
|
||||
# Build upload status data
|
||||
upload_status_data = {
|
||||
"status": status,
|
||||
"repo_name": repo_name,
|
||||
"gitea_url": gitea_url,
|
||||
"files_pushed": files_pushed or [],
|
||||
"commit_sha": commit_sha or "",
|
||||
"message": message or "",
|
||||
"submitted_at": datetime.utcnow().isoformat(),
|
||||
}
|
||||
|
||||
# Save to database
|
||||
success = await set_project_upload_status_json(project_id, upload_status_data)
|
||||
|
||||
if success:
|
||||
logger.info(f"Upload status submitted for project {project_id}: {status}")
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Upload status '{status}' submitted successfully"
|
||||
}
|
||||
else:
|
||||
logger.error(f"Project {project_id} not found")
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error submitting upload status: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_upload_status(project_id: int) -> dict:
|
||||
"""
|
||||
Get the latest upload status for a project.
|
||||
Use this to see what the Uploader did and get the Gitea repository URL.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to get upload status for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with upload status data including repo URL
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import get_project_upload_status_json
|
||||
|
||||
upload_status = await get_project_upload_status_json(project_id)
|
||||
|
||||
if upload_status:
|
||||
return {
|
||||
"success": True,
|
||||
"upload_status": upload_status
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": True,
|
||||
"upload_status": None,
|
||||
"message": "No upload status found for this project"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting upload status: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def clear_devtest_state(project_id: int) -> dict:
|
||||
"""
|
||||
Clear test result and implementation status for a new dev-test iteration.
|
||||
Use this at the start of each iteration to reset state.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to clear state for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import clear_project_devtest_state
|
||||
|
||||
success = await clear_project_devtest_state(project_id)
|
||||
|
||||
if success:
|
||||
logger.info(f"DevTest state cleared for project {project_id}")
|
||||
return {"success": True, "message": "DevTest state cleared for new iteration"}
|
||||
else:
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing devtest state: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def clear_ci_state(project_id: int) -> dict:
|
||||
"""
|
||||
Clear CI result and upload status for a new CI iteration.
|
||||
Use this at the start of each Uploader-Tester-Developer CI loop iteration.
|
||||
|
||||
Args:
|
||||
project_id: The project ID to clear CI state for (required)
|
||||
|
||||
Returns:
|
||||
Dictionary with success status
|
||||
"""
|
||||
try:
|
||||
await _init_db_if_needed()
|
||||
from database.db import clear_project_ci_state
|
||||
|
||||
success = await clear_project_ci_state(project_id)
|
||||
|
||||
if success:
|
||||
logger.info(f"CI state cleared for project {project_id}")
|
||||
return {"success": True, "message": "CI state cleared for new iteration"}
|
||||
else:
|
||||
return {"success": False, "error": f"Project {project_id} not found"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing CI state: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mcp.run()
|
||||
619
mcp_servers/gitea_mcp.py
Normal file
619
mcp_servers/gitea_mcp.py
Normal file
@@ -0,0 +1,619 @@
|
||||
"""
|
||||
Gitea MCP Server for 7000%AUTO
|
||||
Provides Gitea repository management functionality
|
||||
"""
|
||||
|
||||
import base64
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
import httpx
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mcp = FastMCP("Gitea Server")
|
||||
|
||||
GITEA_TOKEN = os.getenv("GITEA_TOKEN", "")
|
||||
GITEA_URL = os.getenv("GITEA_URL", "https://7000pct.gitea.bloupla.net")
|
||||
GITEA_USERNAME = os.getenv("GITEA_USERNAME", "")
|
||||
|
||||
|
||||
def get_api_base_url() -> str:
|
||||
"""Get the Gitea API base URL"""
|
||||
return f"{GITEA_URL.rstrip('/')}/api/v1"
|
||||
|
||||
|
||||
def get_auth_headers() -> Dict[str, str]:
|
||||
"""Get authentication headers for Gitea API"""
|
||||
return {
|
||||
"Authorization": f"token {GITEA_TOKEN}",
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
|
||||
async def get_gitea_username() -> str:
|
||||
"""Get Gitea username from env or fetch from API"""
|
||||
if GITEA_USERNAME:
|
||||
return GITEA_USERNAME
|
||||
|
||||
if not GITEA_TOKEN:
|
||||
return ""
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
response = await client.get("/user")
|
||||
if response.status_code == 200:
|
||||
return response.json().get("login", "")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get Gitea username: {e}")
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def create_repo(name: str, description: str, private: bool = False) -> dict:
|
||||
"""
|
||||
Create a new Gitea repository.
|
||||
|
||||
Args:
|
||||
name: Repository name (kebab-case recommended)
|
||||
description: Repository description
|
||||
private: Whether the repo should be private (default False)
|
||||
|
||||
Returns:
|
||||
Dictionary with repository URL and details
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
response = await client.post(
|
||||
"/user/repos",
|
||||
json={
|
||||
"name": name,
|
||||
"description": description,
|
||||
"private": private,
|
||||
"auto_init": True,
|
||||
"default_branch": "main",
|
||||
}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 201):
|
||||
repo_data = response.json()
|
||||
logger.info(f"Created repository: {repo_data.get('html_url')}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"repo": {
|
||||
"name": repo_data.get("name"),
|
||||
"full_name": repo_data.get("full_name"),
|
||||
"url": repo_data.get("html_url"),
|
||||
"clone_url": repo_data.get("clone_url"),
|
||||
"description": repo_data.get("description"),
|
||||
}
|
||||
}
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
logger.error(f"Gitea API error: {error_msg}")
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating repo: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def push_files(repo: str, files: dict, message: str, branch: str = "main") -> dict:
|
||||
"""
|
||||
Push multiple files to a Gitea repository.
|
||||
|
||||
Args:
|
||||
repo: Repository name (username/repo or just repo name)
|
||||
files: Dictionary of {path: content} for files to push
|
||||
message: Commit message
|
||||
branch: Target branch (default "main")
|
||||
|
||||
Returns:
|
||||
Dictionary with commit details
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
# Determine owner and repo name
|
||||
if "/" in repo:
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
else:
|
||||
owner = await get_gitea_username()
|
||||
repo_name = repo
|
||||
|
||||
if not owner:
|
||||
return {"success": False, "error": "Could not determine repository owner"}
|
||||
|
||||
pushed_files = []
|
||||
last_commit = None
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
for file_path, content in files.items():
|
||||
# Check if file exists to determine if we need to update or create
|
||||
check_response = await client.get(f"/repos/{owner}/{repo_name}/contents/{file_path}")
|
||||
|
||||
file_data = {
|
||||
"content": base64.b64encode(content.encode()).decode(),
|
||||
"message": message,
|
||||
"branch": branch,
|
||||
}
|
||||
|
||||
if check_response.status_code == 200:
|
||||
# File exists, need to include SHA for update
|
||||
existing = check_response.json()
|
||||
file_data["sha"] = existing.get("sha")
|
||||
response = await client.put(
|
||||
f"/repos/{owner}/{repo_name}/contents/{file_path}",
|
||||
json=file_data
|
||||
)
|
||||
else:
|
||||
# File doesn't exist, create it
|
||||
response = await client.post(
|
||||
f"/repos/{owner}/{repo_name}/contents/{file_path}",
|
||||
json=file_data
|
||||
)
|
||||
|
||||
if response.status_code in (200, 201):
|
||||
result = response.json()
|
||||
last_commit = result.get("commit", {})
|
||||
pushed_files.append(file_path)
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
logger.error(f"Failed to push {file_path}: {error_msg}")
|
||||
|
||||
if pushed_files:
|
||||
logger.info(f"Pushed {len(pushed_files)} files to {owner}/{repo_name}")
|
||||
return {
|
||||
"success": True,
|
||||
"commit": {
|
||||
"sha": last_commit.get("sha", "") if last_commit else "",
|
||||
"message": message,
|
||||
"url": f"{GITEA_URL}/{owner}/{repo_name}/commit/{last_commit.get('sha', '')}" if last_commit else ""
|
||||
},
|
||||
"files_pushed": pushed_files
|
||||
}
|
||||
else:
|
||||
return {"success": False, "error": "No files were pushed"}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error pushing files: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def create_release(
|
||||
repo: str,
|
||||
tag: str,
|
||||
name: str,
|
||||
body: str,
|
||||
draft: bool = False,
|
||||
prerelease: bool = False
|
||||
) -> dict:
|
||||
"""
|
||||
Create a release on Gitea.
|
||||
|
||||
Args:
|
||||
repo: Repository name
|
||||
tag: Tag name (e.g., "v1.0.0")
|
||||
name: Release name
|
||||
body: Release notes/body
|
||||
draft: Whether this is a draft release
|
||||
prerelease: Whether this is a prerelease
|
||||
|
||||
Returns:
|
||||
Dictionary with release URL
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
# Determine owner and repo name
|
||||
if "/" in repo:
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
else:
|
||||
owner = await get_gitea_username()
|
||||
repo_name = repo
|
||||
|
||||
if not owner:
|
||||
return {"success": False, "error": "Could not determine repository owner"}
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
response = await client.post(
|
||||
f"/repos/{owner}/{repo_name}/releases",
|
||||
json={
|
||||
"tag_name": tag,
|
||||
"name": name,
|
||||
"body": body,
|
||||
"draft": draft,
|
||||
"prerelease": prerelease,
|
||||
"target_commitish": "main",
|
||||
}
|
||||
)
|
||||
|
||||
if response.status_code in (200, 201):
|
||||
release_data = response.json()
|
||||
logger.info(f"Created release {tag} for {owner}/{repo_name}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"release": {
|
||||
"tag": tag,
|
||||
"name": name,
|
||||
"url": release_data.get("html_url", f"{GITEA_URL}/{owner}/{repo_name}/releases/tag/{tag}"),
|
||||
"id": release_data.get("id"),
|
||||
}
|
||||
}
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
logger.error(f"Gitea API error: {error_msg}")
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating release: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def setup_actions(repo: str, workflow_content: str, workflow_name: str = "ci.yml") -> dict:
|
||||
"""
|
||||
Set up Gitea Actions workflow.
|
||||
|
||||
Args:
|
||||
repo: Repository name
|
||||
workflow_content: YAML content for the workflow
|
||||
workflow_name: Workflow file name (default "ci.yml")
|
||||
|
||||
Returns:
|
||||
Dictionary with workflow path
|
||||
"""
|
||||
try:
|
||||
workflow_path = f".gitea/workflows/{workflow_name}"
|
||||
|
||||
result = await push_files(
|
||||
repo=repo,
|
||||
files={workflow_path: workflow_content},
|
||||
message=f"Add Gitea Actions workflow: {workflow_name}"
|
||||
)
|
||||
|
||||
if result.get("success"):
|
||||
return {
|
||||
"success": True,
|
||||
"workflow": {
|
||||
"path": workflow_path,
|
||||
"name": workflow_name
|
||||
}
|
||||
}
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error setting up actions: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_workflow_runs(repo: str, status: str = None, branch: str = None, limit: int = 10) -> dict:
|
||||
"""
|
||||
Get workflow runs (Gitea Actions) for a repository.
|
||||
|
||||
Args:
|
||||
repo: Repository name (username/repo or just repo name)
|
||||
status: Filter by status (queued, in_progress, success, failure, cancelled, skipped, timedout)
|
||||
branch: Filter by branch name
|
||||
limit: Maximum number of runs to return (default 10, max 100)
|
||||
|
||||
Returns:
|
||||
Dictionary with workflow runs list
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
# Determine owner and repo name
|
||||
if "/" in repo:
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
else:
|
||||
owner = await get_gitea_username()
|
||||
repo_name = repo
|
||||
|
||||
if not owner:
|
||||
return {"success": False, "error": "Could not determine repository owner"}
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
params = {"per_page": min(limit, 100)}
|
||||
if status:
|
||||
params["status"] = status
|
||||
if branch:
|
||||
params["branch"] = branch
|
||||
|
||||
response = await client.get(
|
||||
f"/repos/{owner}/{repo_name}/actions/runs",
|
||||
params=params
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
runs = data.get("workflow_runs", data) if isinstance(data, dict) else data
|
||||
|
||||
# Simplify the runs data
|
||||
simplified_runs = []
|
||||
for run in (runs if isinstance(runs, list) else []):
|
||||
simplified_runs.append({
|
||||
"id": run.get("id"),
|
||||
"name": run.get("display_title") or run.get("name"),
|
||||
"status": run.get("status"),
|
||||
"conclusion": run.get("conclusion"),
|
||||
"branch": run.get("head_branch"),
|
||||
"commit_sha": run.get("head_sha", "")[:7],
|
||||
"started_at": run.get("run_started_at"),
|
||||
"url": f"{GITEA_URL}/{owner}/{repo_name}/actions/runs/{run.get('id')}"
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"repo": f"{owner}/{repo_name}",
|
||||
"runs": simplified_runs,
|
||||
"total": len(simplified_runs)
|
||||
}
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting workflow runs: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_latest_workflow_status(repo: str, branch: str = "main") -> dict:
|
||||
"""
|
||||
Get the status of the latest workflow run for a repository.
|
||||
Use this to check if CI/CD passed or failed after uploading code.
|
||||
|
||||
Args:
|
||||
repo: Repository name (username/repo or just repo name)
|
||||
branch: Branch to check (default "main")
|
||||
|
||||
Returns:
|
||||
Dictionary with latest run status (passed/failed/pending/none)
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
# Determine owner and repo name
|
||||
if "/" in repo:
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
else:
|
||||
owner = await get_gitea_username()
|
||||
repo_name = repo
|
||||
|
||||
if not owner:
|
||||
return {"success": False, "error": "Could not determine repository owner"}
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
response = await client.get(
|
||||
f"/repos/{owner}/{repo_name}/actions/runs",
|
||||
params={"branch": branch, "per_page": 1}
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
runs = data.get("workflow_runs", data) if isinstance(data, dict) else data
|
||||
|
||||
if not runs or (isinstance(runs, list) and len(runs) == 0):
|
||||
return {
|
||||
"success": True,
|
||||
"status": "none",
|
||||
"message": "No workflow runs found",
|
||||
"repo": f"{owner}/{repo_name}"
|
||||
}
|
||||
|
||||
latest_run = runs[0] if isinstance(runs, list) else runs
|
||||
run_status = latest_run.get("status", "unknown")
|
||||
conclusion = latest_run.get("conclusion")
|
||||
|
||||
# Determine overall status
|
||||
if run_status in ("queued", "in_progress", "waiting"):
|
||||
overall_status = "pending"
|
||||
elif conclusion == "success":
|
||||
overall_status = "passed"
|
||||
elif conclusion in ("failure", "timedout", "action_required"):
|
||||
overall_status = "failed"
|
||||
elif conclusion in ("cancelled", "skipped"):
|
||||
overall_status = "cancelled"
|
||||
else:
|
||||
overall_status = "unknown"
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"status": overall_status,
|
||||
"run_status": run_status,
|
||||
"conclusion": conclusion,
|
||||
"run_id": latest_run.get("id"),
|
||||
"run_name": latest_run.get("display_title") or latest_run.get("name"),
|
||||
"branch": latest_run.get("head_branch"),
|
||||
"commit_sha": latest_run.get("head_sha", "")[:7],
|
||||
"url": f"{GITEA_URL}/{owner}/{repo_name}/actions/runs/{latest_run.get('id')}",
|
||||
"repo": f"{owner}/{repo_name}"
|
||||
}
|
||||
elif response.status_code == 404:
|
||||
return {
|
||||
"success": True,
|
||||
"status": "none",
|
||||
"message": "Actions not enabled or no runs found",
|
||||
"repo": f"{owner}/{repo_name}"
|
||||
}
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting latest workflow status: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_workflow_run_jobs(repo: str, run_id: int) -> dict:
|
||||
"""
|
||||
Get jobs and their status for a specific workflow run.
|
||||
Use this to see which specific jobs failed in a CI/CD run.
|
||||
|
||||
Args:
|
||||
repo: Repository name (username/repo or just repo name)
|
||||
run_id: Workflow run ID
|
||||
|
||||
Returns:
|
||||
Dictionary with job details including status and log URLs
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
# Determine owner and repo name
|
||||
if "/" in repo:
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
else:
|
||||
owner = await get_gitea_username()
|
||||
repo_name = repo
|
||||
|
||||
if not owner:
|
||||
return {"success": False, "error": "Could not determine repository owner"}
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
response = await client.get(
|
||||
f"/repos/{owner}/{repo_name}/actions/runs/{run_id}/jobs"
|
||||
)
|
||||
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
jobs = data.get("jobs", data) if isinstance(data, dict) else data
|
||||
|
||||
simplified_jobs = []
|
||||
for job in (jobs if isinstance(jobs, list) else []):
|
||||
simplified_jobs.append({
|
||||
"id": job.get("id"),
|
||||
"name": job.get("name"),
|
||||
"status": job.get("status"),
|
||||
"conclusion": job.get("conclusion"),
|
||||
"started_at": job.get("started_at"),
|
||||
"completed_at": job.get("completed_at"),
|
||||
"steps": [
|
||||
{
|
||||
"name": step.get("name"),
|
||||
"status": step.get("status"),
|
||||
"conclusion": step.get("conclusion")
|
||||
}
|
||||
for step in job.get("steps", [])
|
||||
]
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"run_id": run_id,
|
||||
"repo": f"{owner}/{repo_name}",
|
||||
"jobs": simplified_jobs
|
||||
}
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting workflow run jobs: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_repo_info(repo: str) -> dict:
|
||||
"""
|
||||
Get repository information.
|
||||
|
||||
Args:
|
||||
repo: Repository name
|
||||
|
||||
Returns:
|
||||
Dictionary with repository details
|
||||
"""
|
||||
if not GITEA_TOKEN:
|
||||
return {"success": False, "error": "Gitea token not configured"}
|
||||
|
||||
try:
|
||||
# Determine owner and repo name
|
||||
if "/" in repo:
|
||||
owner, repo_name = repo.split("/", 1)
|
||||
else:
|
||||
owner = await get_gitea_username()
|
||||
repo_name = repo
|
||||
|
||||
if not owner:
|
||||
return {"success": False, "error": "Could not determine repository owner"}
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
base_url=get_api_base_url(),
|
||||
headers=get_auth_headers(),
|
||||
timeout=30.0,
|
||||
) as client:
|
||||
response = await client.get(f"/repos/{owner}/{repo_name}")
|
||||
|
||||
if response.status_code == 200:
|
||||
repo_data = response.json()
|
||||
return {
|
||||
"success": True,
|
||||
"repo": {
|
||||
"name": repo_data.get("name"),
|
||||
"full_name": repo_data.get("full_name"),
|
||||
"url": repo_data.get("html_url"),
|
||||
"description": repo_data.get("description"),
|
||||
"stars": repo_data.get("stars_count", 0),
|
||||
"forks": repo_data.get("forks_count", 0),
|
||||
"default_branch": repo_data.get("default_branch"),
|
||||
"language": repo_data.get("language"),
|
||||
}
|
||||
}
|
||||
else:
|
||||
error_msg = response.json().get("message", response.text)
|
||||
return {"success": False, "error": error_msg}
|
||||
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mcp.run()
|
||||
208
mcp_servers/search_mcp.py
Normal file
208
mcp_servers/search_mcp.py
Normal file
@@ -0,0 +1,208 @@
|
||||
"""
|
||||
Search MCP Server for 7000%AUTO
|
||||
Provides search functionality across arXiv, Reddit, Hacker News, Product Hunt
|
||||
"""
|
||||
|
||||
import logging
|
||||
import xml.etree.ElementTree as ET
|
||||
from typing import Optional
|
||||
|
||||
import httpx
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mcp = FastMCP("Search Server")
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def search_arxiv(query: str, max_results: int = 5) -> dict:
|
||||
"""
|
||||
Search arXiv papers for the given query.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
max_results: Maximum number of results to return (default 5)
|
||||
|
||||
Returns:
|
||||
Dictionary with papers list containing title, summary, authors, link, published date
|
||||
"""
|
||||
try:
|
||||
url = "http://export.arxiv.org/api/query"
|
||||
params = {
|
||||
"search_query": f"all:{query}",
|
||||
"start": 0,
|
||||
"max_results": max_results,
|
||||
"sortBy": "submittedDate",
|
||||
"sortOrder": "descending"
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
response = await client.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse XML response
|
||||
root = ET.fromstring(response.text)
|
||||
ns = {"atom": "http://www.w3.org/2005/Atom"}
|
||||
|
||||
papers = []
|
||||
for entry in root.findall("atom:entry", ns):
|
||||
title = entry.find("atom:title", ns)
|
||||
summary = entry.find("atom:summary", ns)
|
||||
published = entry.find("atom:published", ns)
|
||||
link = entry.find("atom:id", ns)
|
||||
|
||||
authors = []
|
||||
for author in entry.findall("atom:author", ns):
|
||||
name = author.find("atom:name", ns)
|
||||
if name is not None:
|
||||
authors.append(name.text)
|
||||
|
||||
papers.append({
|
||||
"title": title.text.strip() if title is not None else "",
|
||||
"summary": summary.text.strip()[:500] if summary is not None else "",
|
||||
"authors": authors[:3],
|
||||
"link": link.text if link is not None else "",
|
||||
"published": published.text if published is not None else ""
|
||||
})
|
||||
|
||||
return {"success": True, "papers": papers, "count": len(papers)}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"arXiv search failed: {e}")
|
||||
return {"success": False, "error": str(e), "papers": []}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def search_reddit(subreddit: str, query: str, limit: int = 10) -> dict:
|
||||
"""
|
||||
Search Reddit posts in a specific subreddit.
|
||||
|
||||
Args:
|
||||
subreddit: Subreddit name (e.g., "programming")
|
||||
query: Search query string
|
||||
limit: Maximum number of results (default 10)
|
||||
|
||||
Returns:
|
||||
Dictionary with posts list containing title, score, url, comments count
|
||||
"""
|
||||
try:
|
||||
url = f"https://www.reddit.com/r/{subreddit}/search.json"
|
||||
params = {
|
||||
"q": query,
|
||||
"restrict_sr": "on",
|
||||
"sort": "relevance",
|
||||
"t": "month",
|
||||
"limit": limit
|
||||
}
|
||||
headers = {"User-Agent": "7000AUTO/1.0"}
|
||||
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
response = await client.get(url, params=params, headers=headers)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
posts = []
|
||||
for child in data.get("data", {}).get("children", []):
|
||||
post = child.get("data", {})
|
||||
posts.append({
|
||||
"title": post.get("title", ""),
|
||||
"score": post.get("score", 0),
|
||||
"url": f"https://reddit.com{post.get('permalink', '')}",
|
||||
"comments": post.get("num_comments", 0),
|
||||
"created_utc": post.get("created_utc", 0)
|
||||
})
|
||||
|
||||
return {"success": True, "posts": posts, "count": len(posts)}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Reddit search failed: {e}")
|
||||
return {"success": False, "error": str(e), "posts": []}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def search_hackernews(query: str, limit: int = 10) -> dict:
|
||||
"""
|
||||
Search Hacker News via Algolia API.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
limit: Maximum number of results (default 10)
|
||||
|
||||
Returns:
|
||||
Dictionary with stories list containing title, points, url, comments count
|
||||
"""
|
||||
try:
|
||||
url = "https://hn.algolia.com/api/v1/search"
|
||||
params = {
|
||||
"query": query,
|
||||
"tags": "story",
|
||||
"hitsPerPage": limit
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
response = await client.get(url, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
stories = []
|
||||
for hit in data.get("hits", []):
|
||||
stories.append({
|
||||
"title": hit.get("title", ""),
|
||||
"points": hit.get("points", 0),
|
||||
"url": hit.get("url", f"https://news.ycombinator.com/item?id={hit.get('objectID', '')}"),
|
||||
"comments": hit.get("num_comments", 0),
|
||||
"author": hit.get("author", ""),
|
||||
"created_at": hit.get("created_at", "")
|
||||
})
|
||||
|
||||
return {"success": True, "stories": stories, "count": len(stories)}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Hacker News search failed: {e}")
|
||||
return {"success": False, "error": str(e), "stories": []}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def search_producthunt(days: int = 7) -> dict:
|
||||
"""
|
||||
Get recent Product Hunt posts via RSS feed.
|
||||
|
||||
Args:
|
||||
days: Number of days to look back (default 7)
|
||||
|
||||
Returns:
|
||||
Dictionary with products list containing title, tagline, url
|
||||
"""
|
||||
try:
|
||||
# Product Hunt doesn't have a free API, use RSS feed
|
||||
url = "https://www.producthunt.com/feed"
|
||||
|
||||
async with httpx.AsyncClient(timeout=30) as client:
|
||||
response = await client.get(url)
|
||||
response.raise_for_status()
|
||||
|
||||
# Parse RSS XML
|
||||
root = ET.fromstring(response.text)
|
||||
|
||||
products = []
|
||||
for item in root.findall(".//item")[:20]:
|
||||
title = item.find("title")
|
||||
link = item.find("link")
|
||||
description = item.find("description")
|
||||
|
||||
products.append({
|
||||
"title": title.text if title is not None else "",
|
||||
"tagline": description.text[:200] if description is not None and description.text else "",
|
||||
"url": link.text if link is not None else ""
|
||||
})
|
||||
|
||||
return {"success": True, "products": products, "count": len(products)}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Product Hunt search failed: {e}")
|
||||
return {"success": False, "error": str(e), "products": []}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mcp.run()
|
||||
176
mcp_servers/x_mcp.py
Normal file
176
mcp_servers/x_mcp.py
Normal file
@@ -0,0 +1,176 @@
|
||||
"""
|
||||
X/Twitter MCP Server for 7000%AUTO
|
||||
Provides Twitter posting and search functionality
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
from typing import Optional
|
||||
|
||||
import tweepy
|
||||
from mcp.server.fastmcp import FastMCP
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
mcp = FastMCP("X API Server")
|
||||
|
||||
# Twitter API credentials from environment
|
||||
API_KEY = os.getenv("X_API_KEY", "")
|
||||
API_SECRET = os.getenv("X_API_SECRET", "")
|
||||
ACCESS_TOKEN = os.getenv("X_ACCESS_TOKEN", "")
|
||||
ACCESS_TOKEN_SECRET = os.getenv("X_ACCESS_TOKEN_SECRET", "")
|
||||
BEARER_TOKEN = os.getenv("X_BEARER_TOKEN", "")
|
||||
|
||||
|
||||
def get_client() -> Optional[tweepy.Client]:
|
||||
"""Get authenticated Twitter client"""
|
||||
if not all([API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET]):
|
||||
return None
|
||||
|
||||
return tweepy.Client(
|
||||
consumer_key=API_KEY,
|
||||
consumer_secret=API_SECRET,
|
||||
access_token=ACCESS_TOKEN,
|
||||
access_token_secret=ACCESS_TOKEN_SECRET,
|
||||
bearer_token=BEARER_TOKEN if BEARER_TOKEN else None,
|
||||
wait_on_rate_limit=True
|
||||
)
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def post_tweet(text: str) -> dict:
|
||||
"""
|
||||
Post a tweet to X/Twitter.
|
||||
|
||||
Args:
|
||||
text: Tweet text (max 280 characters)
|
||||
|
||||
Returns:
|
||||
Dictionary with tweet URL and status
|
||||
"""
|
||||
try:
|
||||
if len(text) > 280:
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"Tweet exceeds 280 characters (got {len(text)})"
|
||||
}
|
||||
|
||||
client = get_client()
|
||||
if not client:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Twitter API credentials not configured"
|
||||
}
|
||||
|
||||
response = client.create_tweet(text=text)
|
||||
tweet_id = response.data["id"]
|
||||
|
||||
# Get username for URL construction
|
||||
me = client.get_me()
|
||||
username = me.data.username if me.data else "user"
|
||||
|
||||
tweet_url = f"https://twitter.com/{username}/status/{tweet_id}"
|
||||
|
||||
logger.info(f"Posted tweet: {tweet_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tweet_id": tweet_id,
|
||||
"url": tweet_url,
|
||||
"text": text,
|
||||
"character_count": len(text)
|
||||
}
|
||||
|
||||
except tweepy.TweepyException as e:
|
||||
logger.error(f"Twitter API error: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
except Exception as e:
|
||||
logger.error(f"Error posting tweet: {e}")
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def search_tweets(query: str, max_results: int = 10) -> dict:
|
||||
"""
|
||||
Search recent tweets on X/Twitter.
|
||||
|
||||
Args:
|
||||
query: Search query string
|
||||
max_results: Maximum number of results (default 10, max 100)
|
||||
|
||||
Returns:
|
||||
Dictionary with list of tweets
|
||||
"""
|
||||
try:
|
||||
client = get_client()
|
||||
if not client:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Twitter API credentials not configured"
|
||||
}
|
||||
|
||||
max_results = min(max_results, 100)
|
||||
|
||||
response = client.search_recent_tweets(
|
||||
query=query,
|
||||
max_results=max_results,
|
||||
tweet_fields=["created_at", "public_metrics", "author_id"]
|
||||
)
|
||||
|
||||
tweets = []
|
||||
if response.data:
|
||||
for tweet in response.data:
|
||||
tweets.append({
|
||||
"id": tweet.id,
|
||||
"text": tweet.text,
|
||||
"created_at": tweet.created_at.isoformat() if tweet.created_at else "",
|
||||
"metrics": tweet.public_metrics if hasattr(tweet, "public_metrics") else {}
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"tweets": tweets,
|
||||
"count": len(tweets)
|
||||
}
|
||||
|
||||
except tweepy.TweepyException as e:
|
||||
logger.error(f"Twitter search error: {e}")
|
||||
return {"success": False, "error": str(e), "tweets": []}
|
||||
except Exception as e:
|
||||
logger.error(f"Error searching tweets: {e}")
|
||||
return {"success": False, "error": str(e), "tweets": []}
|
||||
|
||||
|
||||
@mcp.tool()
|
||||
async def get_rate_limit_status() -> dict:
|
||||
"""
|
||||
Get current rate limit status for the Twitter API.
|
||||
|
||||
Returns:
|
||||
Dictionary with rate limit information
|
||||
"""
|
||||
try:
|
||||
client = get_client()
|
||||
if not client:
|
||||
return {
|
||||
"success": False,
|
||||
"error": "Twitter API credentials not configured"
|
||||
}
|
||||
|
||||
# Basic check by getting user info
|
||||
me = client.get_me()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"authenticated": True,
|
||||
"username": me.data.username if me.data else None
|
||||
}
|
||||
|
||||
except tweepy.TweepyException as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
mcp.run()
|
||||
187
old_tests/test_analyzers.py
Normal file
187
old_tests/test_analyzers.py
Normal file
@@ -0,0 +1,187 @@
|
||||
"""Tests for traffic analyzer and stats generator."""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
|
||||
from http_log_explorer.analyzers import StatsGenerator, TrafficAnalyzer
|
||||
from http_log_explorer.models import FilterCriteria, HTTPEntry, Request, Response
|
||||
|
||||
|
||||
def make_entry(
|
||||
method: str = "GET",
|
||||
url: str = "https://api.example.com/test",
|
||||
status: int = 200,
|
||||
body: str | None = None,
|
||||
duration_ms: float = 100.0,
|
||||
) -> HTTPEntry:
|
||||
"""Create a test HTTPEntry."""
|
||||
return HTTPEntry(
|
||||
id=f"entry-{method}-{status}",
|
||||
request=Request(
|
||||
method=method,
|
||||
url=url,
|
||||
headers={"Content-Type": "application/json"},
|
||||
body=body,
|
||||
),
|
||||
response=Response(
|
||||
status=status,
|
||||
status_text="OK",
|
||||
body=body,
|
||||
content_type="application/json",
|
||||
response_time_ms=duration_ms,
|
||||
),
|
||||
timestamp=datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_entries():
|
||||
"""Create sample HTTP entries for testing."""
|
||||
return [
|
||||
make_entry("GET", "https://api.example.com/users", 200, duration_ms=50),
|
||||
make_entry("POST", "https://api.example.com/users", 201, duration_ms=100),
|
||||
make_entry("GET", "https://api.example.com/users/1", 200, duration_ms=75),
|
||||
make_entry("PUT", "https://api.example.com/users/1", 200, duration_ms=80),
|
||||
make_entry("DELETE", "https://api.example.com/users/1", 204, duration_ms=30),
|
||||
make_entry("GET", "https://api.example.com/posts", 404, duration_ms=25),
|
||||
make_entry("POST", "https://api.example.com/posts", 500, duration_ms=200),
|
||||
]
|
||||
|
||||
|
||||
class TestTrafficAnalyzer:
|
||||
"""Tests for TrafficAnalyzer."""
|
||||
|
||||
def test_filter_by_method(self, sample_entries):
|
||||
"""Test filtering by HTTP method."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
get_entries = analyzer.by_method(["GET"])
|
||||
|
||||
assert len(get_entries) == 3
|
||||
assert all(e.request.method == "GET" for e in get_entries)
|
||||
|
||||
def test_filter_by_status(self, sample_entries):
|
||||
"""Test filtering by status code."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
success_entries = analyzer.by_status([200, 201, 204])
|
||||
|
||||
assert len(success_entries) == 5
|
||||
assert all(e.response.status < 300 for e in success_entries)
|
||||
|
||||
def test_filter_by_url_pattern(self, sample_entries):
|
||||
"""Test filtering by URL pattern."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
user_entries = analyzer.by_url(r"/users")
|
||||
|
||||
assert len(user_entries) == 5
|
||||
assert all("/users" in e.request.url for e in user_entries)
|
||||
|
||||
def test_filter_by_content_type(self, sample_entries):
|
||||
"""Test filtering by content type."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
json_entries = analyzer.by_content_type(["application/json"])
|
||||
|
||||
assert len(json_entries) == 7
|
||||
|
||||
def test_successful_requests(self, sample_entries):
|
||||
"""Test getting successful requests (2xx)."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
success = analyzer.successful_requests()
|
||||
|
||||
assert len(success) == 5
|
||||
assert all(200 <= e.response.status < 300 for e in success)
|
||||
|
||||
def test_client_errors(self, sample_entries):
|
||||
"""Test getting client errors (4xx)."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
errors = analyzer.client_errors()
|
||||
|
||||
assert len(errors) == 1
|
||||
assert errors[0].response.status == 404
|
||||
|
||||
def test_server_errors(self, sample_entries):
|
||||
"""Test getting server errors (5xx)."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
errors = analyzer.server_errors()
|
||||
|
||||
assert len(errors) == 1
|
||||
assert errors[0].response.status == 500
|
||||
|
||||
def test_search(self, sample_entries):
|
||||
"""Test search across URL and bodies."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
results = analyzer.search("users")
|
||||
|
||||
assert len(results) == 5
|
||||
|
||||
def test_get_entry_by_id(self, sample_entries):
|
||||
"""Test getting entry by ID."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
entry = analyzer.get_entry_by_id("entry-GET-200")
|
||||
|
||||
assert entry is not None
|
||||
assert entry.request.method == "GET"
|
||||
|
||||
def test_get_entry_by_id_not_found(self, sample_entries):
|
||||
"""Test getting non-existent entry."""
|
||||
analyzer = TrafficAnalyzer(sample_entries)
|
||||
entry = analyzer.get_entry_by_id("non-existent")
|
||||
|
||||
assert entry is None
|
||||
|
||||
|
||||
class TestStatsGenerator:
|
||||
"""Tests for StatsGenerator."""
|
||||
|
||||
def test_total_requests(self, sample_entries):
|
||||
"""Test total request count."""
|
||||
generator = StatsGenerator(sample_entries)
|
||||
stats = generator.generate()
|
||||
|
||||
assert stats.total_requests == 7
|
||||
|
||||
def test_method_distribution(self, sample_entries):
|
||||
"""Test method distribution."""
|
||||
generator = StatsGenerator(sample_entries)
|
||||
stats = generator.generate()
|
||||
|
||||
assert stats.method_distribution.get("GET") == 3
|
||||
assert stats.method_distribution.get("POST") == 2
|
||||
assert stats.method_distribution.get("PUT") == 1
|
||||
assert stats.method_distribution.get("DELETE") == 1
|
||||
|
||||
def test_status_breakdown(self, sample_entries):
|
||||
"""Test status code breakdown."""
|
||||
generator = StatsGenerator(sample_entries)
|
||||
stats = generator.generate()
|
||||
|
||||
assert stats.status_breakdown.get(200) == 3
|
||||
assert stats.status_breakdown.get(201) == 1
|
||||
assert stats.status_breakdown.get(204) == 1
|
||||
assert stats.status_breakdown.get(404) == 1
|
||||
assert stats.status_breakdown.get(500) == 1
|
||||
|
||||
def test_response_time_stats(self, sample_entries):
|
||||
"""Test response time statistics."""
|
||||
generator = StatsGenerator(sample_entries)
|
||||
stats = generator.generate()
|
||||
|
||||
assert stats.response_time_stats["min"] == 25.0
|
||||
assert stats.response_time_stats["max"] == 200.0
|
||||
assert stats.response_time_stats["avg"] == pytest.approx(80.0, rel=1)
|
||||
|
||||
def test_endpoint_count(self, sample_entries):
|
||||
"""Test endpoint counting."""
|
||||
generator = StatsGenerator(sample_entries)
|
||||
stats = generator.generate()
|
||||
|
||||
assert "/users" in stats.endpoint_count
|
||||
assert "/posts" in stats.endpoint_count
|
||||
|
||||
def test_to_dict(self, sample_entries):
|
||||
"""Test converting stats to dictionary."""
|
||||
generator = StatsGenerator(sample_entries)
|
||||
stats_dict = generator.to_dict()
|
||||
|
||||
assert stats_dict["total_requests"] == 7
|
||||
assert "method_distribution" in stats_dict
|
||||
assert "status_breakdown" in stats_dict
|
||||
129
old_tests/test_diff_engine.py
Normal file
129
old_tests/test_diff_engine.py
Normal file
@@ -0,0 +1,129 @@
|
||||
"""Tests for diff engine."""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
|
||||
from http_log_explorer.analyzers import DiffEngine
|
||||
from http_log_explorer.models import HTTPEntry, Request, Response
|
||||
|
||||
|
||||
def make_entry(
|
||||
method: str = "GET",
|
||||
url: str = "https://api.example.com/test",
|
||||
status: int = 200,
|
||||
req_headers: dict | None = None,
|
||||
resp_headers: dict | None = None,
|
||||
req_body: str | None = None,
|
||||
resp_body: str | None = None,
|
||||
) -> HTTPEntry:
|
||||
"""Create a test HTTPEntry."""
|
||||
return HTTPEntry(
|
||||
id=f"entry-{method}-{status}",
|
||||
request=Request(
|
||||
method=method,
|
||||
url=url,
|
||||
headers=req_headers or {"Content-Type": "application/json"},
|
||||
body=req_body,
|
||||
),
|
||||
response=Response(
|
||||
status=status,
|
||||
status_text="OK",
|
||||
headers=resp_headers or {"Content-Type": "application/json"},
|
||||
body=resp_body,
|
||||
),
|
||||
timestamp=datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
class TestDiffEngine:
|
||||
"""Tests for DiffEngine."""
|
||||
|
||||
def test_diff_same_entries(self):
|
||||
"""Test diffing identical entries."""
|
||||
entry1 = make_entry()
|
||||
entry2 = make_entry()
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
|
||||
assert not engine.has_differences(result)
|
||||
assert result.status1 == result.status2
|
||||
|
||||
def test_diff_status_change(self):
|
||||
"""Test diffing entries with different status codes."""
|
||||
entry1 = make_entry(status=200)
|
||||
entry2 = make_entry(status=404)
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
|
||||
assert result.status_changed
|
||||
assert result.status1 == 200
|
||||
assert result.status2 == 404
|
||||
|
||||
def test_diff_url_change(self):
|
||||
"""Test diffing entries with different URLs."""
|
||||
entry1 = make_entry(url="https://api.example.com/users")
|
||||
entry2 = make_entry(url="https://api.example.com/posts")
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
|
||||
assert result.url_changed
|
||||
|
||||
def test_diff_headers_change(self):
|
||||
"""Test diffing entries with different headers."""
|
||||
entry1 = make_entry(
|
||||
req_headers={"Content-Type": "application/json", "X-Request-ID": "123"}
|
||||
)
|
||||
entry2 = make_entry(
|
||||
req_headers={"Content-Type": "application/json", "X-Request-ID": "456"}
|
||||
)
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
|
||||
assert len(result.request_headers_diff) > 0
|
||||
|
||||
def test_diff_headers_added_removed(self):
|
||||
"""Test detecting added/removed headers."""
|
||||
entry1 = make_entry(req_headers={"Content-Type": "application/json"})
|
||||
entry2 = make_entry(req_headers={"Authorization": "Bearer token"})
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
|
||||
assert any("+ Authorization:" in s for s in result.request_headers_diff)
|
||||
assert any("- Content-Type:" in s for s in result.request_headers_diff)
|
||||
|
||||
def test_diff_body_change(self):
|
||||
"""Test diffing entries with different bodies."""
|
||||
entry1 = make_entry(resp_body='{"id": 1, "name": "Alice"}')
|
||||
entry2 = make_entry(resp_body='{"id": 1, "name": "Bob"}')
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
|
||||
assert len(result.response_body_diff) > 0
|
||||
|
||||
def test_unified_diff_output(self):
|
||||
"""Test unified diff output formatting."""
|
||||
entry1 = make_entry(status=200)
|
||||
entry2 = make_entry(status=404)
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
output = engine.unified_diff_output(result)
|
||||
|
||||
assert "Status: 200 -> 404" in output
|
||||
|
||||
def test_no_differences(self):
|
||||
"""Test output when no differences exist."""
|
||||
entry1 = make_entry()
|
||||
entry2 = make_entry()
|
||||
|
||||
engine = DiffEngine()
|
||||
result = engine.diff(entry1, entry2)
|
||||
output = engine.unified_diff_output(result)
|
||||
|
||||
assert "No differences found" in output
|
||||
191
old_tests/test_exporters.py
Normal file
191
old_tests/test_exporters.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""Tests for exporters."""
|
||||
|
||||
import json
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
|
||||
from http_log_explorer.exporters import CodeExporter, CurlExporter, JSONExporter
|
||||
from http_log_explorer.models import HTTPEntry, Request, Response
|
||||
|
||||
|
||||
def make_entry(
|
||||
method: str = "GET",
|
||||
url: str = "https://api.example.com/test",
|
||||
status: int = 200,
|
||||
req_headers: dict | None = None,
|
||||
resp_body: str | None = None,
|
||||
) -> HTTPEntry:
|
||||
"""Create a test HTTPEntry."""
|
||||
return HTTPEntry(
|
||||
id=f"entry-{method}-{status}",
|
||||
request=Request(
|
||||
method=method,
|
||||
url=url,
|
||||
headers=req_headers or {"Content-Type": "application/json"},
|
||||
body='{"key": "value"}',
|
||||
),
|
||||
response=Response(
|
||||
status=status,
|
||||
status_text="OK",
|
||||
headers={"Content-Type": "application/json"},
|
||||
body=resp_body or '{"result": "success"}',
|
||||
content_type="application/json",
|
||||
),
|
||||
timestamp=datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_entries():
|
||||
"""Create sample entries for testing."""
|
||||
return [
|
||||
make_entry("GET", "https://api.example.com/users", 200),
|
||||
make_entry("POST", "https://api.example.com/users", 201),
|
||||
make_entry("GET", "https://api.example.com/posts", 404),
|
||||
]
|
||||
|
||||
|
||||
class TestJSONExporter:
|
||||
"""Tests for JSONExporter."""
|
||||
|
||||
def test_export(self, sample_entries):
|
||||
"""Test JSON export."""
|
||||
exporter = JSONExporter()
|
||||
result = exporter.export(sample_entries)
|
||||
|
||||
data = json.loads(result)
|
||||
assert len(data) == 3
|
||||
assert data[0]["request"]["method"] == "GET"
|
||||
|
||||
def test_export_compact(self, sample_entries):
|
||||
"""Test compact JSON export."""
|
||||
exporter = JSONExporter()
|
||||
result = exporter.export_compact(sample_entries)
|
||||
|
||||
assert "\n" not in result
|
||||
assert '"id"' in result
|
||||
|
||||
def test_export_summary(self, sample_entries):
|
||||
"""Test summary export."""
|
||||
exporter = JSONExporter()
|
||||
result = exporter.export_summary(sample_entries)
|
||||
|
||||
data = json.loads(result)
|
||||
assert len(data) == 3
|
||||
assert "id" in data[0]
|
||||
assert "method" in data[0]
|
||||
assert "status" in data[0]
|
||||
|
||||
def test_export_single_entry(self):
|
||||
"""Test exporting single entry."""
|
||||
exporter = JSONExporter()
|
||||
entry = make_entry()
|
||||
result = exporter.export([entry])
|
||||
|
||||
data = json.loads(result)
|
||||
assert len(data) == 1
|
||||
|
||||
|
||||
class TestCurlExporter:
|
||||
"""Tests for CurlExporter."""
|
||||
|
||||
def test_export_single(self):
|
||||
"""Test exporting single entry as curl."""
|
||||
exporter = CurlExporter()
|
||||
entry = make_entry("GET", "https://api.example.com/users")
|
||||
result = exporter.export(entry)
|
||||
|
||||
assert "curl" in result
|
||||
assert "-X GET" in result
|
||||
assert "https://api.example.com/users" in result
|
||||
|
||||
def test_export_with_headers(self):
|
||||
"""Test exporting with headers."""
|
||||
exporter = CurlExporter()
|
||||
entry = make_entry(req_headers={"Authorization": "Bearer token"})
|
||||
result = exporter.export(entry)
|
||||
|
||||
assert "-H" in result
|
||||
assert "Authorization" in result
|
||||
|
||||
def test_export_post_with_body(self):
|
||||
"""Test exporting POST with body."""
|
||||
exporter = CurlExporter()
|
||||
entry = make_entry("POST", "https://api.example.com/users")
|
||||
entry.request.body = '{"name": "Test"}'
|
||||
result = exporter.export(entry)
|
||||
|
||||
assert "-X POST" in result
|
||||
assert "-d" in result
|
||||
|
||||
def test_export_batch(self, sample_entries):
|
||||
"""Test batch export."""
|
||||
exporter = CurlExporter()
|
||||
results = exporter.export_batch(sample_entries)
|
||||
|
||||
assert len(results) == 3
|
||||
assert all("curl" in r for r in results)
|
||||
|
||||
|
||||
class TestCodeExporter:
|
||||
"""Tests for CodeExporter."""
|
||||
|
||||
def test_export_python(self):
|
||||
"""Test Python export."""
|
||||
exporter = CodeExporter()
|
||||
entry = make_entry()
|
||||
result = exporter.export_python(entry)
|
||||
|
||||
assert "import requests" in result
|
||||
assert "requests.get" in result or "requests.post" in result
|
||||
|
||||
def test_export_python_post(self):
|
||||
"""Test Python export for POST."""
|
||||
exporter = CodeExporter()
|
||||
entry = make_entry("POST", "https://api.example.com/users")
|
||||
result = exporter.export_python(entry)
|
||||
|
||||
assert "requests.post" in result
|
||||
|
||||
def test_export_python_with_body(self):
|
||||
"""Test Python export with request body."""
|
||||
exporter = CodeExporter()
|
||||
entry = make_entry("POST", "https://api.example.com/users")
|
||||
entry.request.body = '{"name": "Test"}'
|
||||
result = exporter.export_python(entry)
|
||||
|
||||
assert "data=" in result or "json=" in result
|
||||
|
||||
def test_export_javascript(self):
|
||||
"""Test JavaScript export."""
|
||||
exporter = CodeExporter()
|
||||
entry = make_entry()
|
||||
result = exporter.export_javascript(entry)
|
||||
|
||||
assert "axios" in result
|
||||
assert "const config" in result
|
||||
|
||||
def test_export_go(self):
|
||||
"""Test Go export."""
|
||||
exporter = CodeExporter()
|
||||
entry = make_entry()
|
||||
result = exporter.export_go(entry)
|
||||
|
||||
assert "package main" in result
|
||||
assert "net/http" in result
|
||||
assert "http.NewRequest" in result
|
||||
|
||||
def test_export_batch_python(self, sample_entries):
|
||||
"""Test batch Python export."""
|
||||
exporter = CodeExporter()
|
||||
results = exporter.export_batch(sample_entries, "python")
|
||||
|
||||
assert len(results) == 3
|
||||
assert all("import requests" in r for r in results)
|
||||
|
||||
def test_export_unsupported_language(self, sample_entries):
|
||||
"""Test error on unsupported language."""
|
||||
exporter = CodeExporter()
|
||||
|
||||
with pytest.raises(ValueError, match="Unsupported language"):
|
||||
exporter.export_batch(sample_entries, "ruby")
|
||||
163
old_tests/test_openapi_generator.py
Normal file
163
old_tests/test_openapi_generator.py
Normal file
@@ -0,0 +1,163 @@
|
||||
"""Tests for OpenAPI generator."""
|
||||
|
||||
import pytest
|
||||
from datetime import datetime
|
||||
|
||||
from http_log_explorer.generators import OpenAPIGenerator
|
||||
from http_log_explorer.models import HTTPEntry, Request, Response
|
||||
|
||||
|
||||
def make_entry(
|
||||
method: str = "GET",
|
||||
url: str = "https://api.example.com/test",
|
||||
status: int = 200,
|
||||
req_body: str | None = None,
|
||||
resp_body: str | None = None,
|
||||
content_type: str = "application/json",
|
||||
) -> HTTPEntry:
|
||||
"""Create a test HTTPEntry."""
|
||||
return HTTPEntry(
|
||||
id=f"entry-{method}-{status}",
|
||||
request=Request(
|
||||
method=method,
|
||||
url=url,
|
||||
headers={"Content-Type": content_type},
|
||||
body=req_body,
|
||||
),
|
||||
response=Response(
|
||||
status=status,
|
||||
status_text="OK",
|
||||
headers={"Content-Type": content_type},
|
||||
body=resp_body,
|
||||
content_type=content_type,
|
||||
),
|
||||
timestamp=datetime.now(),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def sample_entries():
|
||||
"""Create sample entries for OpenAPI generation."""
|
||||
return [
|
||||
make_entry(
|
||||
"GET",
|
||||
"https://api.example.com/users",
|
||||
200,
|
||||
resp_body='[{"id": 1, "name": "Alice"}]',
|
||||
),
|
||||
make_entry(
|
||||
"POST",
|
||||
"https://api.example.com/users",
|
||||
201,
|
||||
req_body='{"name": "Bob"}',
|
||||
resp_body='{"id": 2, "name": "Bob"}',
|
||||
),
|
||||
make_entry(
|
||||
"GET",
|
||||
"https://api.example.com/users/1",
|
||||
200,
|
||||
resp_body='{"id": 1, "name": "Alice", "email": "alice@example.com"}',
|
||||
),
|
||||
make_entry(
|
||||
"PUT",
|
||||
"https://api.example.com/users/1",
|
||||
200,
|
||||
req_body='{"name": "Alice Smith"}',
|
||||
resp_body='{"id": 1, "name": "Alice Smith"}',
|
||||
),
|
||||
make_entry(
|
||||
"DELETE",
|
||||
"https://api.example.com/users/1",
|
||||
204,
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
class TestOpenAPIGenerator:
|
||||
"""Tests for OpenAPIGenerator."""
|
||||
|
||||
def test_generate_spec_structure(self, sample_entries):
|
||||
"""Test that generated spec has correct structure."""
|
||||
generator = OpenAPIGenerator(sample_entries)
|
||||
spec = generator.generate(title="Test API", version="1.0.0")
|
||||
|
||||
assert spec["openapi"] == "3.0.3"
|
||||
assert spec["info"]["title"] == "Test API"
|
||||
assert spec["info"]["version"] == "1.0.0"
|
||||
assert "paths" in spec
|
||||
assert "components" in spec
|
||||
|
||||
def test_paths_inferred(self, sample_entries):
|
||||
"""Test that paths are correctly inferred."""
|
||||
generator = OpenAPIGenerator(sample_entries)
|
||||
spec = generator.generate()
|
||||
|
||||
assert "/users" in spec["paths"]
|
||||
assert "/users/{id}" in spec["paths"]
|
||||
|
||||
def test_methods_inferred(self, sample_entries):
|
||||
"""Test that HTTP methods are correctly inferred."""
|
||||
generator = OpenAPIGenerator(sample_entries)
|
||||
spec = generator.generate()
|
||||
|
||||
users_path = spec["paths"]["/users"]
|
||||
assert "get" in users_path
|
||||
assert "post" in users_path
|
||||
|
||||
def test_schemas_extracted(self, sample_entries):
|
||||
"""Test that JSON schemas are extracted from bodies."""
|
||||
generator = OpenAPIGenerator(sample_entries)
|
||||
spec = generator.generate()
|
||||
|
||||
assert "components" in spec
|
||||
assert "schemas" in spec["components"]
|
||||
assert len(spec["components"]["schemas"]) > 0
|
||||
|
||||
def test_responses_included(self, sample_entries):
|
||||
"""Test that responses are included in spec."""
|
||||
generator = OpenAPIGenerator(sample_entries)
|
||||
spec = generator.generate()
|
||||
|
||||
users_get = spec["paths"]["/users"]["get"]
|
||||
assert "responses" in users_get
|
||||
assert "200" in users_get["responses"]
|
||||
|
||||
def test_to_json(self, sample_entries):
|
||||
"""Test JSON serialization."""
|
||||
generator = OpenAPIGenerator(sample_entries)
|
||||
spec = generator.generate()
|
||||
json_str = generator.to_json(spec)
|
||||
|
||||
assert '"openapi"' in json_str
|
||||
assert '"paths"' in json_str
|
||||
|
||||
def test_generate_empty_entries(self):
|
||||
"""Test generating spec from empty entries."""
|
||||
generator = OpenAPIGenerator([])
|
||||
spec = generator.generate()
|
||||
|
||||
assert spec["paths"] == {}
|
||||
assert spec["components"]["schemas"] == {}
|
||||
|
||||
def test_query_parameters_inferred(self):
|
||||
"""Test that query parameters are inferred."""
|
||||
entries = [
|
||||
make_entry(
|
||||
"GET",
|
||||
"https://api.example.com/users",
|
||||
200,
|
||||
resp_body='[{"id": 1}]',
|
||||
),
|
||||
]
|
||||
entries[0].request.query_params["page"] = "1"
|
||||
entries[0].request.query_params["limit"] = "10"
|
||||
|
||||
generator = OpenAPIGenerator(entries)
|
||||
spec = generator.generate()
|
||||
|
||||
assert "/users" in spec["paths"]
|
||||
users_get = spec["paths"]["/users"]["get"]
|
||||
if "parameters" in users_get:
|
||||
param_names = [p["name"] for p in users_get["parameters"]]
|
||||
assert "page" in param_names
|
||||
assert "limit" in param_names
|
||||
222
old_tests/test_parsers.py
Normal file
222
old_tests/test_parsers.py
Normal file
@@ -0,0 +1,222 @@
|
||||
"""Tests for HTTP parsers."""
|
||||
|
||||
import pytest
|
||||
|
||||
from http_log_explorer.models import HTTPEntry
|
||||
from http_log_explorer.parsers import get_parser
|
||||
from http_log_explorer.parsers.curl_parser import CurlParser
|
||||
from http_log_explorer.parsers.devtools_parser import DevToolsParser
|
||||
from http_log_explorer.parsers.har_parser import HARParser
|
||||
|
||||
|
||||
SAMPLE_HAR = """
|
||||
{
|
||||
"log": {
|
||||
"version": "1.2",
|
||||
"creator": {
|
||||
"name": "Test",
|
||||
"version": "1.0"
|
||||
},
|
||||
"entries": [
|
||||
{
|
||||
"startedDateTime": "2024-01-01T12:00:00Z",
|
||||
"time": 150,
|
||||
"request": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/users/123",
|
||||
"httpVersion": "HTTP/1.1",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"},
|
||||
{"name": "Authorization", "value": "Bearer token123"}
|
||||
],
|
||||
"queryString": [{"name": "include", "value": "profile"}],
|
||||
"cookies": []
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"httpVersion": "HTTP/1.1",
|
||||
"headers": [
|
||||
{"name": "Content-Type", "value": "application/json"}
|
||||
],
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "{\\"id\\": 123, \\"name\\": \\"John\\"}"
|
||||
}
|
||||
},
|
||||
"serverIPAddress": "192.168.1.1",
|
||||
"connection": "keep-alive"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
SAMPLE_CURL = """
|
||||
> GET /api/users HTTP/1.1
|
||||
> Host: api.example.com
|
||||
> User-Agent: curl/7.88.1
|
||||
> Accept: */*
|
||||
>
|
||||
< HTTP/1.1 200 OK
|
||||
< Content-Type: application/json
|
||||
< Content-Length: 45
|
||||
<
|
||||
{"id": 1, "name": "Test User"}
|
||||
"""
|
||||
|
||||
SAMPLE_CURL_POST = """
|
||||
> POST /api/users HTTP/1.1
|
||||
> Host: api.example.com
|
||||
> Content-Type: application/json
|
||||
> Content-Length: 25
|
||||
>
|
||||
{"name": "New User"}
|
||||
< HTTP/1.1 201 Created
|
||||
< Content-Type: application/json
|
||||
< Content-Length: 35
|
||||
<
|
||||
{"id": 2, "name": "New User"}
|
||||
"""
|
||||
|
||||
SAMPLE_DEVTOOLS = """
|
||||
{
|
||||
"log": {
|
||||
"version": "1.2",
|
||||
"entries": [
|
||||
{
|
||||
"startedDateTime": "2024-01-01T12:00:00Z",
|
||||
"time": 100,
|
||||
"request": {
|
||||
"method": "GET",
|
||||
"url": "https://api.example.com/posts",
|
||||
"httpVersion": "HTTP/1.1",
|
||||
"headers": {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"queryString": []
|
||||
},
|
||||
"response": {
|
||||
"status": 200,
|
||||
"statusText": "OK",
|
||||
"httpVersion": "HTTP/1.1",
|
||||
"headers": {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"content": {
|
||||
"mimeType": "application/json",
|
||||
"text": "[{\\"id\\": 1}]"
|
||||
}
|
||||
},
|
||||
"serverIPAddress": "10.0.0.1"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
|
||||
class TestHARParser:
|
||||
"""Tests for HAR parser."""
|
||||
|
||||
def test_can_parse_har(self):
|
||||
"""Test HAR detection."""
|
||||
parser = HARParser()
|
||||
assert parser.can_parse(SAMPLE_HAR)
|
||||
|
||||
def test_parse_har_entry(self):
|
||||
"""Test parsing HAR content."""
|
||||
parser = HARParser()
|
||||
entries = parser.parse(SAMPLE_HAR)
|
||||
|
||||
assert len(entries) == 1
|
||||
entry = entries[0]
|
||||
assert entry.request.method == "GET"
|
||||
assert entry.request.url == "https://api.example.com/users/123"
|
||||
assert entry.response.status == 200
|
||||
assert entry.request.query_params.get("include") == "profile"
|
||||
|
||||
def test_parse_invalid_har(self):
|
||||
"""Test parsing invalid HAR raises error."""
|
||||
parser = HARParser()
|
||||
with pytest.raises(ValueError):
|
||||
parser.parse("not valid har content")
|
||||
|
||||
|
||||
class TestCurlParser:
|
||||
"""Tests for curl parser."""
|
||||
|
||||
def test_can_parse_curl(self):
|
||||
"""Test curl detection."""
|
||||
parser = CurlParser()
|
||||
assert parser.can_parse(SAMPLE_CURL)
|
||||
|
||||
def test_parse_curl_get(self):
|
||||
"""Test parsing curl GET output."""
|
||||
parser = CurlParser()
|
||||
entries = parser.parse(SAMPLE_CURL)
|
||||
|
||||
assert len(entries) == 1
|
||||
entry = entries[0]
|
||||
assert entry.request.method == "GET"
|
||||
assert "/api/users" in entry.request.url
|
||||
assert entry.response.status == 200
|
||||
|
||||
def test_parse_curl_post(self):
|
||||
"""Test parsing curl POST output."""
|
||||
parser = CurlParser()
|
||||
entries = parser.parse(SAMPLE_CURL_POST)
|
||||
|
||||
assert len(entries) == 1
|
||||
entry = entries[0]
|
||||
assert entry.request.method == "POST"
|
||||
assert entry.response.status == 201
|
||||
|
||||
def test_cannot_parse_har_as_curl(self):
|
||||
"""Test that HAR is not detected as curl."""
|
||||
parser = CurlParser()
|
||||
assert not parser.can_parse(SAMPLE_HAR)
|
||||
|
||||
|
||||
class TestDevToolsParser:
|
||||
"""Tests for DevTools parser."""
|
||||
|
||||
def test_can_parse_devtools(self):
|
||||
"""Test DevTools detection."""
|
||||
parser = DevToolsParser()
|
||||
assert parser.can_parse(SAMPLE_DEVTOOLS)
|
||||
|
||||
def test_parse_devtools_entry(self):
|
||||
"""Test parsing DevTools content."""
|
||||
parser = DevToolsParser()
|
||||
entries = parser.parse(SAMPLE_DEVTOOLS)
|
||||
|
||||
assert len(entries) == 1
|
||||
entry = entries[0]
|
||||
assert entry.request.method == "GET"
|
||||
assert "posts" in entry.request.url
|
||||
assert entry.response.status == 200
|
||||
|
||||
|
||||
class TestParserFactory:
|
||||
"""Tests for parser factory."""
|
||||
|
||||
def test_get_parser_har(self):
|
||||
"""Test factory returns HARParser for HAR content."""
|
||||
parser = get_parser(SAMPLE_HAR)
|
||||
assert isinstance(parser, HARParser)
|
||||
|
||||
def test_get_parser_curl(self):
|
||||
"""Test factory returns CurlParser for curl content."""
|
||||
parser = get_parser(SAMPLE_CURL)
|
||||
assert isinstance(parser, CurlParser)
|
||||
|
||||
def test_get_parser_devtools(self):
|
||||
"""Test factory returns DevToolsParser for DevTools content."""
|
||||
parser = get_parser(SAMPLE_DEVTOOLS)
|
||||
assert isinstance(parser, DevToolsParser)
|
||||
|
||||
def test_unsupported_format(self):
|
||||
"""Test factory raises error for unsupported format."""
|
||||
with pytest.raises(ValueError, match="Unsupported format"):
|
||||
get_parser("random garbage content")
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user