Fix CI/CD: Add Gitea Actions workflow and fix linting issues
Some checks failed
CI / test (push) Failing after 13s

This commit is contained in:
Developer
2026-02-05 09:02:49 +00:00
commit d8325c4be2
111 changed files with 19657 additions and 0 deletions

12
mcp_servers/__init__.py Normal file
View File

@@ -0,0 +1,12 @@
"""
MCP Servers for 7000%AUTO
Provides external API access to AI agents via Model Context Protocol
"""
from .search_mcp import mcp as search_mcp
from .x_mcp import mcp as x_mcp
from .database_mcp import mcp as database_mcp
from .gitea_mcp import mcp as gitea_mcp
from .devtest_mcp import mcp as devtest_mcp
__all__ = ['search_mcp', 'x_mcp', 'database_mcp', 'gitea_mcp', 'devtest_mcp']

340
mcp_servers/database_mcp.py Normal file
View File

@@ -0,0 +1,340 @@
"""Database MCP Server for 7000%AUTO
Provides database operations for idea management
"""
import logging
from mcp.server.fastmcp import FastMCP
logger = logging.getLogger(__name__)
mcp = FastMCP("Database Server")
# Database initialization flag for MCP server process
_db_ready = False
async def _init_db_if_needed():
"""Initialize database if not already initialized. MCP servers run in separate processes."""
global _db_ready
if not _db_ready:
try:
from database.db import init_db
await init_db()
_db_ready = True
logger.info("Database initialized in MCP server")
except Exception as e:
logger.error(f"Failed to initialize database in MCP server: {e}")
raise
@mcp.tool()
async def get_previous_ideas(limit: int = 50) -> dict:
"""
Get list of previously generated ideas.
Args:
limit: Maximum number of ideas to return (default 50)
Returns:
Dictionary with list of ideas
"""
try:
await _init_db_if_needed()
from database import get_db, Idea
from sqlalchemy import select
async with get_db() as session:
query = select(Idea).order_by(Idea.created_at.desc()).limit(limit)
result = await session.execute(query)
ideas = result.scalars().all()
return {
"success": True,
"ideas": [
{
"id": idea.id,
"title": idea.title,
"description": idea.description[:200],
"source": idea.source,
"used": idea.used
}
for idea in ideas
],
"count": len(ideas)
}
except Exception as e:
logger.error(f"Error getting previous ideas: {e}")
return {"success": False, "error": str(e), "ideas": []}
@mcp.tool()
async def check_idea_exists(title: str) -> dict:
"""
Check if a similar idea already exists.
Args:
title: Title to check for similarity
Returns:
Dictionary with exists flag and similar ideas if found
"""
try:
await _init_db_if_needed()
from database import get_db, Idea
from sqlalchemy import select
title_lower = title.lower()
title_words = set(title_lower.split())
async with get_db() as session:
# Get all ideas for comparison
query = select(Idea)
result = await session.execute(query)
ideas = result.scalars().all()
similar = []
for idea in ideas:
idea_title_lower = idea.title.lower()
idea_words = set(idea_title_lower.split())
# Check exact match
if title_lower == idea_title_lower:
similar.append({
"id": idea.id,
"title": idea.title,
"match_type": "exact"
})
continue
# Check partial match (title contains or is contained)
if title_lower in idea_title_lower or idea_title_lower in title_lower:
similar.append({
"id": idea.id,
"title": idea.title,
"match_type": "partial"
})
continue
# Check word overlap (>50%)
overlap = len(title_words & idea_words)
total = len(title_words | idea_words)
if total > 0 and overlap / total > 0.5:
similar.append({
"id": idea.id,
"title": idea.title,
"match_type": "similar"
})
return {
"success": True,
"exists": len(similar) > 0,
"similar_ideas": similar[:5],
"count": len(similar)
}
except Exception as e:
logger.error(f"Error checking idea existence: {e}")
return {"success": False, "error": str(e), "exists": False}
@mcp.tool()
async def save_idea(title: str, description: str, source: str) -> dict:
"""
Save a new idea to the database.
Args:
title: Idea title
description: Idea description
source: Source of the idea (arxiv, reddit, x, hn, ph)
Returns:
Dictionary with saved idea details
"""
try:
await _init_db_if_needed()
from database import create_idea
idea = await create_idea(
title=title,
description=description,
source=source
)
return {
"success": True,
"idea": {
"id": idea.id,
"title": idea.title,
"description": idea.description,
"source": idea.source,
"created_at": idea.created_at.isoformat() if idea.created_at else None
}
}
except Exception as e:
logger.error(f"Error saving idea: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_database_stats() -> dict:
"""
Get database statistics.
Returns:
Dictionary with database stats
"""
try:
await _init_db_if_needed()
from database import get_stats
stats = await get_stats()
return {
"success": True,
"stats": stats
}
except Exception as e:
logger.error(f"Error getting database stats: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def submit_idea(
project_id: int,
title: str,
description: str,
source: str,
tech_stack: list[str] = None,
target_audience: str = None,
key_features: list[str] = None,
complexity: str = None,
estimated_time: str = None,
inspiration: str = None
) -> dict:
"""
Submit a generated project idea. Use this tool to finalize and save your idea.
The idea will be saved directly to the database for the given project.
Args:
project_id: The project ID to associate this idea with (required)
title: Short project name (required)
description: Detailed description of the project (required)
source: Source of inspiration - arxiv, reddit, x, hn, or ph (required)
tech_stack: List of technologies to use (e.g., ["python", "fastapi"])
target_audience: Who would use this project
key_features: List of key features
complexity: low, medium, or high
estimated_time: Estimated implementation time (e.g., "2-4 hours")
inspiration: Brief note on what inspired this idea
Returns:
Dictionary with success status
"""
try:
await _init_db_if_needed()
from database.db import set_project_idea_json
# Build the complete idea dict
idea_data = {
"title": title,
"description": description,
"source": source,
"tech_stack": tech_stack or [],
"target_audience": target_audience or "",
"key_features": key_features or [],
"complexity": complexity or "medium",
"estimated_time": estimated_time or "",
"inspiration": inspiration or "",
}
# Save to database
success = await set_project_idea_json(project_id, idea_data)
if success:
logger.info(f"Idea submitted for project {project_id}: {title}")
return {"success": True, "message": f"Idea '{title}' saved successfully"}
else:
logger.error(f"Project {project_id} not found")
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error submitting idea: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def submit_plan(
project_id: int,
project_name: str,
overview: str,
display_name: str = None,
tech_stack: dict = None,
file_structure: dict = None,
features: list[dict] = None,
implementation_steps: list[dict] = None,
testing_strategy: dict = None,
configuration: dict = None,
error_handling: dict = None,
readme_sections: list[str] = None
) -> dict:
"""
Submit an implementation plan. Use this tool to finalize your project plan.
The plan will be saved directly to the database for the given project.
Args:
project_id: The project ID to associate this plan with (required)
project_name: kebab-case project name (required)
overview: 2-3 sentence summary of what will be built (required)
display_name: Human readable project name
tech_stack: Technology stack details with language, runtime, framework, key_dependencies
file_structure: File structure with root_files and directories
features: List of features with name, priority, description, implementation_notes
implementation_steps: Ordered list of implementation steps
testing_strategy: Testing approach with unit_tests, integration_tests, test_files, test_commands
configuration: Config details with env_variables and config_files
error_handling: Error handling strategies
readme_sections: List of README section titles
Returns:
Dictionary with success status
"""
try:
await _init_db_if_needed()
from database.db import set_project_plan_json
# Build the complete plan dict
plan_data = {
"project_name": project_name,
"display_name": display_name or project_name.replace("-", " ").title(),
"overview": overview,
"tech_stack": tech_stack or {},
"file_structure": file_structure or {},
"features": features or [],
"implementation_steps": implementation_steps or [],
"testing_strategy": testing_strategy or {},
"configuration": configuration or {},
"error_handling": error_handling or {},
"readme_sections": readme_sections or []
}
# Save to database
success = await set_project_plan_json(project_id, plan_data)
if success:
logger.info(f"Plan submitted for project {project_id}: {project_name}")
return {"success": True, "message": f"Plan '{project_name}' saved successfully"}
else:
logger.error(f"Project {project_id} not found")
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error submitting plan: {e}")
return {"success": False, "error": str(e)}
if __name__ == "__main__":
mcp.run()

635
mcp_servers/devtest_mcp.py Normal file
View File

@@ -0,0 +1,635 @@
"""Developer-Tester Communication MCP Server for 7000%AUTO
Provides structured communication between Developer and Tester agents via MCP tools.
This enables Developer and Tester to share:
- Test results (PASS/FAIL with detailed bug reports)
- Implementation status (completed/fixing with file changes)
- Project context (plan, current iteration)
"""
import logging
from datetime import datetime
from mcp.server.fastmcp import FastMCP
logger = logging.getLogger(__name__)
mcp = FastMCP("DevTest Communication Server")
# Database initialization flag for MCP server process
_db_ready = False
async def _init_db_if_needed():
"""Initialize database if not already initialized. MCP servers run in separate processes."""
global _db_ready
if not _db_ready:
try:
from database.db import init_db
await init_db()
_db_ready = True
logger.info("Database initialized in DevTest MCP server")
except Exception as e:
logger.error(f"Failed to initialize database in DevTest MCP server: {e}")
raise
@mcp.tool()
async def submit_test_result(
project_id: int,
status: str,
summary: str,
checks_performed: list[dict] = None,
bugs: list[dict] = None,
code_quality: dict = None,
ready_for_upload: bool = False
) -> dict:
"""
Submit test results after testing the implementation. Use this tool to report test outcomes.
The Developer will read these results to fix any bugs.
Args:
project_id: The project ID being tested (required)
status: Test status - "PASS" or "FAIL" (required)
summary: Brief summary of test results (required)
checks_performed: List of checks with {check, result, details} format
bugs: List of bugs found with {id, severity, type, file, line, issue, error_message, suggestion} format
code_quality: Quality assessment with {error_handling, documentation, test_coverage} ratings
ready_for_upload: Whether the project is ready for upload (true only if PASS)
Returns:
Dictionary with success status
Example for PASS:
submit_test_result(
project_id=1,
status="PASS",
summary="All tests passed successfully",
checks_performed=[
{"check": "linting", "result": "pass", "details": "No issues found"},
{"check": "unit_tests", "result": "pass", "details": "15/15 tests passed"}
],
ready_for_upload=True
)
Example for FAIL:
submit_test_result(
project_id=1,
status="FAIL",
summary="Found 2 critical issues",
checks_performed=[
{"check": "linting", "result": "pass", "details": "No issues"},
{"check": "type_check", "result": "fail", "details": "3 type errors"}
],
bugs=[
{
"id": 1,
"severity": "critical",
"type": "type_error",
"file": "src/main.py",
"line": 42,
"issue": "Missing return type annotation",
"error_message": "error: Function is missing return type annotation",
"suggestion": "Add -> str return type"
}
],
ready_for_upload=False
)
"""
try:
await _init_db_if_needed()
from database.db import set_project_test_result_json
# Validate status
if status not in ("PASS", "FAIL"):
return {"success": False, "error": "status must be 'PASS' or 'FAIL'"}
# Build test result data
test_result_data = {
"status": status,
"summary": summary,
"checks_performed": checks_performed or [],
"bugs": bugs or [],
"code_quality": code_quality or {},
"ready_for_upload": ready_for_upload and status == "PASS",
"submitted_at": datetime.utcnow().isoformat(),
}
# Save to database
success = await set_project_test_result_json(project_id, test_result_data)
if success:
logger.info(f"Test result submitted for project {project_id}: {status}")
return {
"success": True,
"message": f"Test result '{status}' submitted successfully",
"bugs_count": len(bugs) if bugs else 0
}
else:
logger.error(f"Project {project_id} not found")
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error submitting test result: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_test_result(project_id: int) -> dict:
"""
Get the latest test result for a project. Use this tool to see what the Tester found.
Args:
project_id: The project ID to get test results for (required)
Returns:
Dictionary with test result data including status, bugs, and suggestions
"""
try:
await _init_db_if_needed()
from database.db import get_project_test_result_json
test_result = await get_project_test_result_json(project_id)
if test_result:
return {
"success": True,
"test_result": test_result
}
else:
return {
"success": True,
"test_result": None,
"message": "No test result found for this project"
}
except Exception as e:
logger.error(f"Error getting test result: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def submit_implementation_status(
project_id: int,
status: str,
files_created: list[dict] = None,
files_modified: list[dict] = None,
dependencies_installed: list[str] = None,
commands_run: list[str] = None,
bugs_addressed: list[dict] = None,
notes: str = None,
ready_for_testing: bool = True
) -> dict:
"""
Submit implementation status after coding or fixing bugs. Use this tool to inform the Tester.
Args:
project_id: The project ID being worked on (required)
status: Status - "completed", "fixed", "in_progress", or "blocked" (required)
files_created: List of files created with {path, lines, purpose} format
files_modified: List of files modified with {path, changes} format
dependencies_installed: List of installed dependencies
commands_run: List of commands executed
bugs_addressed: List of bugs fixed with {original_issue, fix_applied, file, line} format
notes: Any important notes about the implementation
ready_for_testing: Whether the code is ready for testing (default: True)
Returns:
Dictionary with success status
Example for new implementation:
submit_implementation_status(
project_id=1,
status="completed",
files_created=[
{"path": "src/main.py", "lines": 150, "purpose": "Main entry point"}
],
dependencies_installed=["fastapi", "uvicorn"],
ready_for_testing=True
)
Example for bug fix:
submit_implementation_status(
project_id=1,
status="fixed",
bugs_addressed=[
{
"original_issue": "TypeError in parse_input()",
"fix_applied": "Added null check before processing",
"file": "src/parser.py",
"line": 42
}
],
ready_for_testing=True
)
"""
try:
await _init_db_if_needed()
from database.db import set_project_implementation_status_json
# Validate status
valid_statuses = ("completed", "fixed", "in_progress", "blocked")
if status not in valid_statuses:
return {"success": False, "error": f"status must be one of: {valid_statuses}"}
# Build implementation status data
implementation_data = {
"status": status,
"files_created": files_created or [],
"files_modified": files_modified or [],
"dependencies_installed": dependencies_installed or [],
"commands_run": commands_run or [],
"bugs_addressed": bugs_addressed or [],
"notes": notes or "",
"ready_for_testing": ready_for_testing,
"submitted_at": datetime.utcnow().isoformat(),
}
# Save to database
success = await set_project_implementation_status_json(project_id, implementation_data)
if success:
logger.info(f"Implementation status submitted for project {project_id}: {status}")
return {
"success": True,
"message": f"Implementation status '{status}' submitted successfully"
}
else:
logger.error(f"Project {project_id} not found")
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error submitting implementation status: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_implementation_status(project_id: int) -> dict:
"""
Get the latest implementation status for a project. Use this tool to see what the Developer did.
Args:
project_id: The project ID to get implementation status for (required)
Returns:
Dictionary with implementation status data
"""
try:
await _init_db_if_needed()
from database.db import get_project_implementation_status_json
impl_status = await get_project_implementation_status_json(project_id)
if impl_status:
return {
"success": True,
"implementation_status": impl_status
}
else:
return {
"success": True,
"implementation_status": None,
"message": "No implementation status found for this project"
}
except Exception as e:
logger.error(f"Error getting implementation status: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_project_context(project_id: int) -> dict:
"""
Get full project context including idea, plan, and current dev-test state.
Use this tool to understand the complete project situation.
Args:
project_id: The project ID to get context for (required)
Returns:
Dictionary with complete project context
"""
try:
await _init_db_if_needed()
from database.db import (
get_project_by_id,
get_project_idea_json,
get_project_plan_json,
get_project_test_result_json,
get_project_implementation_status_json
)
project = await get_project_by_id(project_id)
if not project:
return {"success": False, "error": f"Project {project_id} not found"}
idea = await get_project_idea_json(project_id)
plan = await get_project_plan_json(project_id)
test_result = await get_project_test_result_json(project_id)
impl_status = await get_project_implementation_status_json(project_id)
return {
"success": True,
"project": {
"id": project.id,
"name": project.name,
"status": project.status,
"dev_test_iterations": project.dev_test_iterations,
"current_agent": project.current_agent,
},
"idea": idea,
"plan": plan,
"test_result": test_result,
"implementation_status": impl_status
}
except Exception as e:
logger.error(f"Error getting project context: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def submit_ci_result(
project_id: int,
status: str,
repo_name: str,
gitea_url: str,
run_id: int = None,
run_url: str = None,
summary: str = None,
failed_jobs: list[dict] = None,
error_logs: str = None
) -> dict:
"""
Submit CI/CD (Gitea Actions) result after checking workflow status.
Use this tool to report CI/CD status to Developer for fixes if needed.
Args:
project_id: The project ID (required)
status: CI status - "PASS", "FAIL", or "PENDING" (required)
repo_name: Repository name (required)
gitea_url: Repository URL on Gitea (required)
run_id: Workflow run ID (if available)
run_url: URL to the workflow run (if available)
summary: Brief summary of CI result
failed_jobs: List of failed jobs with {name, conclusion, steps} format
error_logs: Relevant error logs or messages
Returns:
Dictionary with success status
Example for PASS:
submit_ci_result(
project_id=1,
status="PASS",
repo_name="my-project",
gitea_url="https://gitea.example.com/user/my-project",
summary="All CI checks passed successfully"
)
Example for FAIL:
submit_ci_result(
project_id=1,
status="FAIL",
repo_name="my-project",
gitea_url="https://gitea.example.com/user/my-project",
run_id=123,
run_url="https://gitea.example.com/user/my-project/actions/runs/123",
summary="CI failed: test job failed",
failed_jobs=[{"name": "test", "conclusion": "failure", "steps": [...]}],
error_logs="Error: pytest failed with exit code 1"
)
"""
try:
await _init_db_if_needed()
from database.db import set_project_ci_result_json
# Validate status
if status not in ("PASS", "FAIL", "PENDING"):
return {"success": False, "error": "status must be 'PASS', 'FAIL', or 'PENDING'"}
# Build CI result data
ci_result_data = {
"status": status,
"repo_name": repo_name,
"gitea_url": gitea_url,
"run_id": run_id,
"run_url": run_url,
"summary": summary or "",
"failed_jobs": failed_jobs or [],
"error_logs": error_logs or "",
"submitted_at": datetime.utcnow().isoformat(),
}
# Save to database
success = await set_project_ci_result_json(project_id, ci_result_data)
if success:
logger.info(f"CI result submitted for project {project_id}: {status}")
return {
"success": True,
"message": f"CI result '{status}' submitted successfully",
"needs_fix": status == "FAIL"
}
else:
logger.error(f"Project {project_id} not found")
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error submitting CI result: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_ci_result(project_id: int) -> dict:
"""
Get the latest CI/CD result for a project. Use this to see if CI passed or failed.
Args:
project_id: The project ID to get CI result for (required)
Returns:
Dictionary with CI result data including status, failed jobs, and error logs
"""
try:
await _init_db_if_needed()
from database.db import get_project_ci_result_json
ci_result = await get_project_ci_result_json(project_id)
if ci_result:
return {
"success": True,
"ci_result": ci_result
}
else:
return {
"success": True,
"ci_result": None,
"message": "No CI result found for this project"
}
except Exception as e:
logger.error(f"Error getting CI result: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def submit_upload_status(
project_id: int,
status: str,
repo_name: str,
gitea_url: str,
files_pushed: list[str] = None,
commit_sha: str = None,
message: str = None
) -> dict:
"""
Submit upload status after pushing code to Gitea.
Use this to inform Tester that code has been uploaded and needs CI check.
Args:
project_id: The project ID (required)
status: Upload status - "completed", "failed", or "in_progress" (required)
repo_name: Repository name (required)
gitea_url: Repository URL on Gitea (required)
files_pushed: List of files that were pushed
commit_sha: Commit SHA of the push
message: Any additional message
Returns:
Dictionary with success status
"""
try:
await _init_db_if_needed()
from database.db import set_project_upload_status_json
# Validate status
valid_statuses = ("completed", "failed", "in_progress")
if status not in valid_statuses:
return {"success": False, "error": f"status must be one of: {valid_statuses}"}
# Build upload status data
upload_status_data = {
"status": status,
"repo_name": repo_name,
"gitea_url": gitea_url,
"files_pushed": files_pushed or [],
"commit_sha": commit_sha or "",
"message": message or "",
"submitted_at": datetime.utcnow().isoformat(),
}
# Save to database
success = await set_project_upload_status_json(project_id, upload_status_data)
if success:
logger.info(f"Upload status submitted for project {project_id}: {status}")
return {
"success": True,
"message": f"Upload status '{status}' submitted successfully"
}
else:
logger.error(f"Project {project_id} not found")
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error submitting upload status: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_upload_status(project_id: int) -> dict:
"""
Get the latest upload status for a project.
Use this to see what the Uploader did and get the Gitea repository URL.
Args:
project_id: The project ID to get upload status for (required)
Returns:
Dictionary with upload status data including repo URL
"""
try:
await _init_db_if_needed()
from database.db import get_project_upload_status_json
upload_status = await get_project_upload_status_json(project_id)
if upload_status:
return {
"success": True,
"upload_status": upload_status
}
else:
return {
"success": True,
"upload_status": None,
"message": "No upload status found for this project"
}
except Exception as e:
logger.error(f"Error getting upload status: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def clear_devtest_state(project_id: int) -> dict:
"""
Clear test result and implementation status for a new dev-test iteration.
Use this at the start of each iteration to reset state.
Args:
project_id: The project ID to clear state for (required)
Returns:
Dictionary with success status
"""
try:
await _init_db_if_needed()
from database.db import clear_project_devtest_state
success = await clear_project_devtest_state(project_id)
if success:
logger.info(f"DevTest state cleared for project {project_id}")
return {"success": True, "message": "DevTest state cleared for new iteration"}
else:
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error clearing devtest state: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def clear_ci_state(project_id: int) -> dict:
"""
Clear CI result and upload status for a new CI iteration.
Use this at the start of each Uploader-Tester-Developer CI loop iteration.
Args:
project_id: The project ID to clear CI state for (required)
Returns:
Dictionary with success status
"""
try:
await _init_db_if_needed()
from database.db import clear_project_ci_state
success = await clear_project_ci_state(project_id)
if success:
logger.info(f"CI state cleared for project {project_id}")
return {"success": True, "message": "CI state cleared for new iteration"}
else:
return {"success": False, "error": f"Project {project_id} not found"}
except Exception as e:
logger.error(f"Error clearing CI state: {e}")
return {"success": False, "error": str(e)}
if __name__ == "__main__":
mcp.run()

619
mcp_servers/gitea_mcp.py Normal file
View File

@@ -0,0 +1,619 @@
"""
Gitea MCP Server for 7000%AUTO
Provides Gitea repository management functionality
"""
import base64
import logging
import os
from typing import Dict
import httpx
from mcp.server.fastmcp import FastMCP
logger = logging.getLogger(__name__)
mcp = FastMCP("Gitea Server")
GITEA_TOKEN = os.getenv("GITEA_TOKEN", "")
GITEA_URL = os.getenv("GITEA_URL", "https://7000pct.gitea.bloupla.net")
GITEA_USERNAME = os.getenv("GITEA_USERNAME", "")
def get_api_base_url() -> str:
"""Get the Gitea API base URL"""
return f"{GITEA_URL.rstrip('/')}/api/v1"
def get_auth_headers() -> Dict[str, str]:
"""Get authentication headers for Gitea API"""
return {
"Authorization": f"token {GITEA_TOKEN}",
"Content-Type": "application/json",
"Accept": "application/json",
}
async def get_gitea_username() -> str:
"""Get Gitea username from env or fetch from API"""
if GITEA_USERNAME:
return GITEA_USERNAME
if not GITEA_TOKEN:
return ""
try:
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
response = await client.get("/user")
if response.status_code == 200:
return response.json().get("login", "")
except Exception as e:
logger.error(f"Failed to get Gitea username: {e}")
return ""
@mcp.tool()
async def create_repo(name: str, description: str, private: bool = False) -> dict:
"""
Create a new Gitea repository.
Args:
name: Repository name (kebab-case recommended)
description: Repository description
private: Whether the repo should be private (default False)
Returns:
Dictionary with repository URL and details
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
response = await client.post(
"/user/repos",
json={
"name": name,
"description": description,
"private": private,
"auto_init": True,
"default_branch": "main",
}
)
if response.status_code in (200, 201):
repo_data = response.json()
logger.info(f"Created repository: {repo_data.get('html_url')}")
return {
"success": True,
"repo": {
"name": repo_data.get("name"),
"full_name": repo_data.get("full_name"),
"url": repo_data.get("html_url"),
"clone_url": repo_data.get("clone_url"),
"description": repo_data.get("description"),
}
}
else:
error_msg = response.json().get("message", response.text)
logger.error(f"Gitea API error: {error_msg}")
return {"success": False, "error": error_msg}
except Exception as e:
logger.error(f"Error creating repo: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def push_files(repo: str, files: dict, message: str, branch: str = "main") -> dict:
"""
Push multiple files to a Gitea repository.
Args:
repo: Repository name (username/repo or just repo name)
files: Dictionary of {path: content} for files to push
message: Commit message
branch: Target branch (default "main")
Returns:
Dictionary with commit details
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
# Determine owner and repo name
if "/" in repo:
owner, repo_name = repo.split("/", 1)
else:
owner = await get_gitea_username()
repo_name = repo
if not owner:
return {"success": False, "error": "Could not determine repository owner"}
pushed_files = []
last_commit = None
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
for file_path, content in files.items():
# Check if file exists to determine if we need to update or create
check_response = await client.get(f"/repos/{owner}/{repo_name}/contents/{file_path}")
file_data = {
"content": base64.b64encode(content.encode()).decode(),
"message": message,
"branch": branch,
}
if check_response.status_code == 200:
# File exists, need to include SHA for update
existing = check_response.json()
file_data["sha"] = existing.get("sha")
response = await client.put(
f"/repos/{owner}/{repo_name}/contents/{file_path}",
json=file_data
)
else:
# File doesn't exist, create it
response = await client.post(
f"/repos/{owner}/{repo_name}/contents/{file_path}",
json=file_data
)
if response.status_code in (200, 201):
result = response.json()
last_commit = result.get("commit", {})
pushed_files.append(file_path)
else:
error_msg = response.json().get("message", response.text)
logger.error(f"Failed to push {file_path}: {error_msg}")
if pushed_files:
logger.info(f"Pushed {len(pushed_files)} files to {owner}/{repo_name}")
return {
"success": True,
"commit": {
"sha": last_commit.get("sha", "") if last_commit else "",
"message": message,
"url": f"{GITEA_URL}/{owner}/{repo_name}/commit/{last_commit.get('sha', '')}" if last_commit else ""
},
"files_pushed": pushed_files
}
else:
return {"success": False, "error": "No files were pushed"}
except Exception as e:
logger.error(f"Error pushing files: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def create_release(
repo: str,
tag: str,
name: str,
body: str,
draft: bool = False,
prerelease: bool = False
) -> dict:
"""
Create a release on Gitea.
Args:
repo: Repository name
tag: Tag name (e.g., "v1.0.0")
name: Release name
body: Release notes/body
draft: Whether this is a draft release
prerelease: Whether this is a prerelease
Returns:
Dictionary with release URL
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
# Determine owner and repo name
if "/" in repo:
owner, repo_name = repo.split("/", 1)
else:
owner = await get_gitea_username()
repo_name = repo
if not owner:
return {"success": False, "error": "Could not determine repository owner"}
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
response = await client.post(
f"/repos/{owner}/{repo_name}/releases",
json={
"tag_name": tag,
"name": name,
"body": body,
"draft": draft,
"prerelease": prerelease,
"target_commitish": "main",
}
)
if response.status_code in (200, 201):
release_data = response.json()
logger.info(f"Created release {tag} for {owner}/{repo_name}")
return {
"success": True,
"release": {
"tag": tag,
"name": name,
"url": release_data.get("html_url", f"{GITEA_URL}/{owner}/{repo_name}/releases/tag/{tag}"),
"id": release_data.get("id"),
}
}
else:
error_msg = response.json().get("message", response.text)
logger.error(f"Gitea API error: {error_msg}")
return {"success": False, "error": error_msg}
except Exception as e:
logger.error(f"Error creating release: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def setup_actions(repo: str, workflow_content: str, workflow_name: str = "ci.yml") -> dict:
"""
Set up Gitea Actions workflow.
Args:
repo: Repository name
workflow_content: YAML content for the workflow
workflow_name: Workflow file name (default "ci.yml")
Returns:
Dictionary with workflow path
"""
try:
workflow_path = f".gitea/workflows/{workflow_name}"
result = await push_files(
repo=repo,
files={workflow_path: workflow_content},
message=f"Add Gitea Actions workflow: {workflow_name}"
)
if result.get("success"):
return {
"success": True,
"workflow": {
"path": workflow_path,
"name": workflow_name
}
}
return result
except Exception as e:
logger.error(f"Error setting up actions: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_workflow_runs(repo: str, status: str = None, branch: str = None, limit: int = 10) -> dict:
"""
Get workflow runs (Gitea Actions) for a repository.
Args:
repo: Repository name (username/repo or just repo name)
status: Filter by status (queued, in_progress, success, failure, cancelled, skipped, timedout)
branch: Filter by branch name
limit: Maximum number of runs to return (default 10, max 100)
Returns:
Dictionary with workflow runs list
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
# Determine owner and repo name
if "/" in repo:
owner, repo_name = repo.split("/", 1)
else:
owner = await get_gitea_username()
repo_name = repo
if not owner:
return {"success": False, "error": "Could not determine repository owner"}
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
params = {"per_page": min(limit, 100)}
if status:
params["status"] = status
if branch:
params["branch"] = branch
response = await client.get(
f"/repos/{owner}/{repo_name}/actions/runs",
params=params
)
if response.status_code == 200:
data = response.json()
runs = data.get("workflow_runs", data) if isinstance(data, dict) else data
# Simplify the runs data
simplified_runs = []
for run in (runs if isinstance(runs, list) else []):
simplified_runs.append({
"id": run.get("id"),
"name": run.get("display_title") or run.get("name"),
"status": run.get("status"),
"conclusion": run.get("conclusion"),
"branch": run.get("head_branch"),
"commit_sha": run.get("head_sha", "")[:7],
"started_at": run.get("run_started_at"),
"url": f"{GITEA_URL}/{owner}/{repo_name}/actions/runs/{run.get('id')}"
})
return {
"success": True,
"repo": f"{owner}/{repo_name}",
"runs": simplified_runs,
"total": len(simplified_runs)
}
else:
error_msg = response.json().get("message", response.text)
return {"success": False, "error": error_msg}
except Exception as e:
logger.error(f"Error getting workflow runs: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_latest_workflow_status(repo: str, branch: str = "main") -> dict:
"""
Get the status of the latest workflow run for a repository.
Use this to check if CI/CD passed or failed after uploading code.
Args:
repo: Repository name (username/repo or just repo name)
branch: Branch to check (default "main")
Returns:
Dictionary with latest run status (passed/failed/pending/none)
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
# Determine owner and repo name
if "/" in repo:
owner, repo_name = repo.split("/", 1)
else:
owner = await get_gitea_username()
repo_name = repo
if not owner:
return {"success": False, "error": "Could not determine repository owner"}
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
response = await client.get(
f"/repos/{owner}/{repo_name}/actions/runs",
params={"branch": branch, "per_page": 1}
)
if response.status_code == 200:
data = response.json()
runs = data.get("workflow_runs", data) if isinstance(data, dict) else data
if not runs or (isinstance(runs, list) and len(runs) == 0):
return {
"success": True,
"status": "none",
"message": "No workflow runs found",
"repo": f"{owner}/{repo_name}"
}
latest_run = runs[0] if isinstance(runs, list) else runs
run_status = latest_run.get("status", "unknown")
conclusion = latest_run.get("conclusion")
# Determine overall status
if run_status in ("queued", "in_progress", "waiting"):
overall_status = "pending"
elif conclusion == "success":
overall_status = "passed"
elif conclusion in ("failure", "timedout", "action_required"):
overall_status = "failed"
elif conclusion in ("cancelled", "skipped"):
overall_status = "cancelled"
else:
overall_status = "unknown"
return {
"success": True,
"status": overall_status,
"run_status": run_status,
"conclusion": conclusion,
"run_id": latest_run.get("id"),
"run_name": latest_run.get("display_title") or latest_run.get("name"),
"branch": latest_run.get("head_branch"),
"commit_sha": latest_run.get("head_sha", "")[:7],
"url": f"{GITEA_URL}/{owner}/{repo_name}/actions/runs/{latest_run.get('id')}",
"repo": f"{owner}/{repo_name}"
}
elif response.status_code == 404:
return {
"success": True,
"status": "none",
"message": "Actions not enabled or no runs found",
"repo": f"{owner}/{repo_name}"
}
else:
error_msg = response.json().get("message", response.text)
return {"success": False, "error": error_msg}
except Exception as e:
logger.error(f"Error getting latest workflow status: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_workflow_run_jobs(repo: str, run_id: int) -> dict:
"""
Get jobs and their status for a specific workflow run.
Use this to see which specific jobs failed in a CI/CD run.
Args:
repo: Repository name (username/repo or just repo name)
run_id: Workflow run ID
Returns:
Dictionary with job details including status and log URLs
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
# Determine owner and repo name
if "/" in repo:
owner, repo_name = repo.split("/", 1)
else:
owner = await get_gitea_username()
repo_name = repo
if not owner:
return {"success": False, "error": "Could not determine repository owner"}
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
response = await client.get(
f"/repos/{owner}/{repo_name}/actions/runs/{run_id}/jobs"
)
if response.status_code == 200:
data = response.json()
jobs = data.get("jobs", data) if isinstance(data, dict) else data
simplified_jobs = []
for job in (jobs if isinstance(jobs, list) else []):
simplified_jobs.append({
"id": job.get("id"),
"name": job.get("name"),
"status": job.get("status"),
"conclusion": job.get("conclusion"),
"started_at": job.get("started_at"),
"completed_at": job.get("completed_at"),
"steps": [
{
"name": step.get("name"),
"status": step.get("status"),
"conclusion": step.get("conclusion")
}
for step in job.get("steps", [])
]
})
return {
"success": True,
"run_id": run_id,
"repo": f"{owner}/{repo_name}",
"jobs": simplified_jobs
}
else:
error_msg = response.json().get("message", response.text)
return {"success": False, "error": error_msg}
except Exception as e:
logger.error(f"Error getting workflow run jobs: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def get_repo_info(repo: str) -> dict:
"""
Get repository information.
Args:
repo: Repository name
Returns:
Dictionary with repository details
"""
if not GITEA_TOKEN:
return {"success": False, "error": "Gitea token not configured"}
try:
# Determine owner and repo name
if "/" in repo:
owner, repo_name = repo.split("/", 1)
else:
owner = await get_gitea_username()
repo_name = repo
if not owner:
return {"success": False, "error": "Could not determine repository owner"}
async with httpx.AsyncClient(
base_url=get_api_base_url(),
headers=get_auth_headers(),
timeout=30.0,
) as client:
response = await client.get(f"/repos/{owner}/{repo_name}")
if response.status_code == 200:
repo_data = response.json()
return {
"success": True,
"repo": {
"name": repo_data.get("name"),
"full_name": repo_data.get("full_name"),
"url": repo_data.get("html_url"),
"description": repo_data.get("description"),
"stars": repo_data.get("stars_count", 0),
"forks": repo_data.get("forks_count", 0),
"default_branch": repo_data.get("default_branch"),
"language": repo_data.get("language"),
}
}
else:
error_msg = response.json().get("message", response.text)
return {"success": False, "error": error_msg}
except Exception as e:
return {"success": False, "error": str(e)}
if __name__ == "__main__":
mcp.run()

207
mcp_servers/search_mcp.py Normal file
View File

@@ -0,0 +1,207 @@
"""
Search MCP Server for 7000%AUTO
Provides search functionality across arXiv, Reddit, Hacker News, Product Hunt
"""
import logging
import xml.etree.ElementTree as ET
import httpx
from mcp.server.fastmcp import FastMCP
logger = logging.getLogger(__name__)
mcp = FastMCP("Search Server")
@mcp.tool()
async def search_arxiv(query: str, max_results: int = 5) -> dict:
"""
Search arXiv papers for the given query.
Args:
query: Search query string
max_results: Maximum number of results to return (default 5)
Returns:
Dictionary with papers list containing title, summary, authors, link, published date
"""
try:
url = "http://export.arxiv.org/api/query"
params = {
"search_query": f"all:{query}",
"start": 0,
"max_results": max_results,
"sortBy": "submittedDate",
"sortOrder": "descending"
}
async with httpx.AsyncClient(timeout=30) as client:
response = await client.get(url, params=params)
response.raise_for_status()
# Parse XML response
root = ET.fromstring(response.text)
ns = {"atom": "http://www.w3.org/2005/Atom"}
papers = []
for entry in root.findall("atom:entry", ns):
title = entry.find("atom:title", ns)
summary = entry.find("atom:summary", ns)
published = entry.find("atom:published", ns)
link = entry.find("atom:id", ns)
authors = []
for author in entry.findall("atom:author", ns):
name = author.find("atom:name", ns)
if name is not None:
authors.append(name.text)
papers.append({
"title": title.text.strip() if title is not None else "",
"summary": summary.text.strip()[:500] if summary is not None else "",
"authors": authors[:3],
"link": link.text if link is not None else "",
"published": published.text if published is not None else ""
})
return {"success": True, "papers": papers, "count": len(papers)}
except Exception as e:
logger.error(f"arXiv search failed: {e}")
return {"success": False, "error": str(e), "papers": []}
@mcp.tool()
async def search_reddit(subreddit: str, query: str, limit: int = 10) -> dict:
"""
Search Reddit posts in a specific subreddit.
Args:
subreddit: Subreddit name (e.g., "programming")
query: Search query string
limit: Maximum number of results (default 10)
Returns:
Dictionary with posts list containing title, score, url, comments count
"""
try:
url = f"https://www.reddit.com/r/{subreddit}/search.json"
params = {
"q": query,
"restrict_sr": "on",
"sort": "relevance",
"t": "month",
"limit": limit
}
headers = {"User-Agent": "7000AUTO/1.0"}
async with httpx.AsyncClient(timeout=30) as client:
response = await client.get(url, params=params, headers=headers)
response.raise_for_status()
data = response.json()
posts = []
for child in data.get("data", {}).get("children", []):
post = child.get("data", {})
posts.append({
"title": post.get("title", ""),
"score": post.get("score", 0),
"url": f"https://reddit.com{post.get('permalink', '')}",
"comments": post.get("num_comments", 0),
"created_utc": post.get("created_utc", 0)
})
return {"success": True, "posts": posts, "count": len(posts)}
except Exception as e:
logger.error(f"Reddit search failed: {e}")
return {"success": False, "error": str(e), "posts": []}
@mcp.tool()
async def search_hackernews(query: str, limit: int = 10) -> dict:
"""
Search Hacker News via Algolia API.
Args:
query: Search query string
limit: Maximum number of results (default 10)
Returns:
Dictionary with stories list containing title, points, url, comments count
"""
try:
url = "https://hn.algolia.com/api/v1/search"
params = {
"query": query,
"tags": "story",
"hitsPerPage": limit
}
async with httpx.AsyncClient(timeout=30) as client:
response = await client.get(url, params=params)
response.raise_for_status()
data = response.json()
stories = []
for hit in data.get("hits", []):
stories.append({
"title": hit.get("title", ""),
"points": hit.get("points", 0),
"url": hit.get("url", f"https://news.ycombinator.com/item?id={hit.get('objectID', '')}"),
"comments": hit.get("num_comments", 0),
"author": hit.get("author", ""),
"created_at": hit.get("created_at", "")
})
return {"success": True, "stories": stories, "count": len(stories)}
except Exception as e:
logger.error(f"Hacker News search failed: {e}")
return {"success": False, "error": str(e), "stories": []}
@mcp.tool()
async def search_producthunt(days: int = 7) -> dict:
"""
Get recent Product Hunt posts via RSS feed.
Args:
days: Number of days to look back (default 7)
Returns:
Dictionary with products list containing title, tagline, url
"""
try:
# Product Hunt doesn't have a free API, use RSS feed
url = "https://www.producthunt.com/feed"
async with httpx.AsyncClient(timeout=30) as client:
response = await client.get(url)
response.raise_for_status()
# Parse RSS XML
root = ET.fromstring(response.text)
products = []
for item in root.findall(".//item")[:20]:
title = item.find("title")
link = item.find("link")
description = item.find("description")
products.append({
"title": title.text if title is not None else "",
"tagline": description.text[:200] if description is not None and description.text else "",
"url": link.text if link is not None else ""
})
return {"success": True, "products": products, "count": len(products)}
except Exception as e:
logger.error(f"Product Hunt search failed: {e}")
return {"success": False, "error": str(e), "products": []}
if __name__ == "__main__":
mcp.run()

176
mcp_servers/x_mcp.py Normal file
View File

@@ -0,0 +1,176 @@
"""
X/Twitter MCP Server for 7000%AUTO
Provides Twitter posting and search functionality
"""
import logging
import os
from typing import Optional
import tweepy
from mcp.server.fastmcp import FastMCP
logger = logging.getLogger(__name__)
mcp = FastMCP("X API Server")
# Twitter API credentials from environment
API_KEY = os.getenv("X_API_KEY", "")
API_SECRET = os.getenv("X_API_SECRET", "")
ACCESS_TOKEN = os.getenv("X_ACCESS_TOKEN", "")
ACCESS_TOKEN_SECRET = os.getenv("X_ACCESS_TOKEN_SECRET", "")
BEARER_TOKEN = os.getenv("X_BEARER_TOKEN", "")
def get_client() -> Optional[tweepy.Client]:
"""Get authenticated Twitter client"""
if not all([API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET]):
return None
return tweepy.Client(
consumer_key=API_KEY,
consumer_secret=API_SECRET,
access_token=ACCESS_TOKEN,
access_token_secret=ACCESS_TOKEN_SECRET,
bearer_token=BEARER_TOKEN if BEARER_TOKEN else None,
wait_on_rate_limit=True
)
@mcp.tool()
async def post_tweet(text: str) -> dict:
"""
Post a tweet to X/Twitter.
Args:
text: Tweet text (max 280 characters)
Returns:
Dictionary with tweet URL and status
"""
try:
if len(text) > 280:
return {
"success": False,
"error": f"Tweet exceeds 280 characters (got {len(text)})"
}
client = get_client()
if not client:
return {
"success": False,
"error": "Twitter API credentials not configured"
}
response = client.create_tweet(text=text)
tweet_id = response.data["id"]
# Get username for URL construction
me = client.get_me()
username = me.data.username if me.data else "user"
tweet_url = f"https://twitter.com/{username}/status/{tweet_id}"
logger.info(f"Posted tweet: {tweet_url}")
return {
"success": True,
"tweet_id": tweet_id,
"url": tweet_url,
"text": text,
"character_count": len(text)
}
except tweepy.TweepyException as e:
logger.error(f"Twitter API error: {e}")
return {"success": False, "error": str(e)}
except Exception as e:
logger.error(f"Error posting tweet: {e}")
return {"success": False, "error": str(e)}
@mcp.tool()
async def search_tweets(query: str, max_results: int = 10) -> dict:
"""
Search recent tweets on X/Twitter.
Args:
query: Search query string
max_results: Maximum number of results (default 10, max 100)
Returns:
Dictionary with list of tweets
"""
try:
client = get_client()
if not client:
return {
"success": False,
"error": "Twitter API credentials not configured"
}
max_results = min(max_results, 100)
response = client.search_recent_tweets(
query=query,
max_results=max_results,
tweet_fields=["created_at", "public_metrics", "author_id"]
)
tweets = []
if response.data:
for tweet in response.data:
tweets.append({
"id": tweet.id,
"text": tweet.text,
"created_at": tweet.created_at.isoformat() if tweet.created_at else "",
"metrics": tweet.public_metrics if hasattr(tweet, "public_metrics") else {}
})
return {
"success": True,
"tweets": tweets,
"count": len(tweets)
}
except tweepy.TweepyException as e:
logger.error(f"Twitter search error: {e}")
return {"success": False, "error": str(e), "tweets": []}
except Exception as e:
logger.error(f"Error searching tweets: {e}")
return {"success": False, "error": str(e), "tweets": []}
@mcp.tool()
async def get_rate_limit_status() -> dict:
"""
Get current rate limit status for the Twitter API.
Returns:
Dictionary with rate limit information
"""
try:
client = get_client()
if not client:
return {
"success": False,
"error": "Twitter API credentials not configured"
}
# Basic check by getting user info
me = client.get_me()
return {
"success": True,
"authenticated": True,
"username": me.data.username if me.data else None
}
except tweepy.TweepyException as e:
return {"success": False, "error": str(e)}
except Exception as e:
return {"success": False, "error": str(e)}
if __name__ == "__main__":
mcp.run()