jira-ai-fixer/app/services/analysis.py

349 lines
14 KiB
Python

"""Analysis service - AI-powered issue analysis."""
import httpx
import json
import base64
from datetime import datetime
from typing import Optional, Dict, Any, List
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select
from app.core.config import settings
from app.models.organization import Organization
class AnalysisService:
@classmethod
def decrypt_key(cls, encrypted: str) -> str:
"""Simple deobfuscation."""
try:
return base64.b64decode(encrypted.encode()).decode()
except:
return ""
@classmethod
async def get_org_ai_config(cls, db: AsyncSession, org_id: int) -> Dict[str, Any]:
"""Get AI configuration from organization settings."""
result = await db.execute(select(Organization).where(Organization.id == org_id))
org = result.scalar_one_or_none()
if org and org.ai_api_key_encrypted:
return {
"provider": org.ai_provider or "openrouter",
"api_key": cls.decrypt_key(org.ai_api_key_encrypted),
"model": org.ai_model or "meta-llama/llama-3.3-70b-instruct",
"auto_analyze": org.ai_auto_analyze if org.ai_auto_analyze is not None else True,
"auto_create_pr": org.ai_auto_create_pr if org.ai_auto_create_pr is not None else True,
"confidence_threshold": org.ai_confidence_threshold or 70,
}
# Fallback to env config
return {
"provider": "openrouter",
"api_key": settings.OPENROUTER_API_KEY or "",
"model": "meta-llama/llama-3.3-70b-instruct",
"auto_analyze": True,
"auto_create_pr": True,
"confidence_threshold": 70,
}
@classmethod
async def fetch_repository_files(cls, repo: str, path: str = "") -> List[Dict[str, str]]:
"""Fetch files from Gitea repository."""
files = []
async with httpx.AsyncClient() as client:
try:
url = f"{settings.GITEA_URL}/api/v1/repos/{repo}/contents/{path}"
headers = {}
if settings.GITEA_TOKEN:
headers["Authorization"] = f"token {settings.GITEA_TOKEN}"
response = await client.get(url, headers=headers, timeout=30)
if response.status_code != 200:
return files
items = response.json()
for item in items:
if item["type"] == "file" and item["name"].endswith((".cbl", ".cob", ".py", ".java", ".js", ".ts", ".tsx", ".jsx")):
content_resp = await client.get(item["download_url"], headers=headers, timeout=30)
if content_resp.status_code == 200:
files.append({
"path": item["path"],
"content": content_resp.text[:10000] # Limit size
})
elif item["type"] == "dir":
sub_files = await cls.fetch_repository_files(repo, item["path"])
files.extend(sub_files)
except Exception as e:
print(f"Error fetching repo: {e}")
return files[:20] # Limit to 20 files
@classmethod
def build_prompt(cls, issue: Dict[str, Any], files: List[Dict[str, str]]) -> str:
"""Build analysis prompt for LLM."""
files_context = "\n\n".join([
f"### {f['path']}\n```\n{f['content']}\n```"
for f in files
]) if files else "No source code files available."
return f"""You are an expert software engineer analyzing a support issue.
## Issue Details
**Title:** {issue.get('title', 'N/A')}
**Description:** {issue.get('description', 'N/A')}
**Priority:** {issue.get('priority', 'N/A')}
## Source Code Files
{files_context}
## Your Task
Analyze the issue and identify:
1. Root cause of the problem
2. Which files are affected
3. Suggested code fix
## Response Format (JSON)
{{
"root_cause": "Detailed explanation of what's causing the issue",
"affected_files": ["file1.py", "file2.py"],
"suggested_fix": "Code changes needed to fix the issue",
"confidence": 0.85,
"explanation": "Step-by-step explanation of the fix"
}}
Respond ONLY with valid JSON."""
@classmethod
async def call_llm(cls, prompt: str, ai_config: Dict[str, Any]) -> Dict[str, Any]:
"""Call the configured LLM provider."""
provider = ai_config.get("provider", "openrouter")
api_key = ai_config.get("api_key", "")
model = ai_config.get("model", "meta-llama/llama-3.3-70b-instruct")
if not api_key:
return {
"root_cause": "No API key configured. Go to Settings > AI Configuration.",
"affected_files": [],
"suggested_fix": "",
"confidence": 0,
"explanation": "Please configure an LLM API key in Settings."
}
async with httpx.AsyncClient() as client:
try:
if provider == "openrouter":
response = await client.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
"HTTP-Referer": "https://jira-fixer.startdata.com.br",
"X-Title": "JIRA AI Fixer"
},
json={
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2,
"max_tokens": 2000
},
timeout=120
)
elif provider == "anthropic":
response = await client.post(
"https://api.anthropic.com/v1/messages",
headers={
"x-api-key": api_key,
"Content-Type": "application/json",
"anthropic-version": "2023-06-01"
},
json={
"model": model,
"max_tokens": 2000,
"messages": [{"role": "user", "content": prompt}]
},
timeout=120
)
elif provider == "openai":
response = await client.post(
"https://api.openai.com/v1/chat/completions",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
},
json={
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2,
"max_tokens": 2000
},
timeout=120
)
elif provider == "groq":
response = await client.post(
"https://api.groq.com/openai/v1/chat/completions",
headers={
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json"
},
json={
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2,
"max_tokens": 2000
},
timeout=120
)
else:
return {
"root_cause": f"Unsupported provider: {provider}",
"affected_files": [],
"suggested_fix": "",
"confidence": 0,
"explanation": "Please select a supported AI provider."
}
if response.status_code == 200:
data = response.json()
# Extract content based on provider
if provider == "anthropic":
content = data["content"][0]["text"]
else:
content = data["choices"][0]["message"]["content"]
# Parse JSON from response
try:
if "```json" in content:
content = content.split("```json")[1].split("```")[0]
elif "```" in content:
content = content.split("```")[1].split("```")[0]
return json.loads(content.strip())
except json.JSONDecodeError:
return {
"root_cause": content[:500],
"affected_files": [],
"suggested_fix": "",
"confidence": 0.3,
"explanation": "Could not parse structured response"
}
else:
error_msg = response.text[:200]
try:
error_data = response.json()
error_msg = error_data.get("error", {}).get("message", error_msg)
except:
pass
return {
"root_cause": f"API error: {response.status_code}",
"affected_files": [],
"suggested_fix": "",
"confidence": 0,
"explanation": error_msg
}
except httpx.TimeoutException:
return {
"root_cause": "Analysis timeout",
"affected_files": [],
"suggested_fix": "",
"confidence": 0,
"explanation": "The AI request timed out. Try again."
}
except Exception as e:
return {
"root_cause": f"Analysis error: {str(e)}",
"affected_files": [],
"suggested_fix": "",
"confidence": 0,
"explanation": str(e)
}
@classmethod
async def analyze(cls, issue: Dict[str, Any], repo: Optional[str] = None, ai_config: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
"""Run AI analysis on an issue."""
# Use provided config or default
if ai_config is None:
ai_config = {
"provider": "openrouter",
"api_key": settings.OPENROUTER_API_KEY or "",
"model": "meta-llama/llama-3.3-70b-instruct",
}
# Fetch code context
files = []
if repo:
files = await cls.fetch_repository_files(repo)
# Build prompt
prompt = cls.build_prompt(issue, files)
# Call LLM
return await cls.call_llm(prompt, ai_config)
@classmethod
async def create_pull_request(
cls,
repo: str,
branch: str,
title: str,
description: str,
file_changes: List[Dict[str, str]]
) -> Optional[str]:
"""Create a pull request with suggested fix."""
if not settings.GITEA_TOKEN:
return None
async with httpx.AsyncClient() as client:
headers = {"Authorization": f"token {settings.GITEA_TOKEN}"}
try:
# 1. Get default branch
repo_resp = await client.get(
f"{settings.GITEA_URL}/api/v1/repos/{repo}",
headers=headers,
timeout=30
)
if repo_resp.status_code != 200:
return None
default_branch = repo_resp.json().get("default_branch", "main")
# 2. Get latest commit SHA
ref_resp = await client.get(
f"{settings.GITEA_URL}/api/v1/repos/{repo}/git/refs/heads/{default_branch}",
headers=headers,
timeout=30
)
if ref_resp.status_code != 200:
return None
sha = ref_resp.json()["object"]["sha"]
# 3. Create branch
await client.post(
f"{settings.GITEA_URL}/api/v1/repos/{repo}/git/refs",
headers=headers,
json={"ref": f"refs/heads/{branch}", "sha": sha},
timeout=30
)
# 4. Create PR
pr_resp = await client.post(
f"{settings.GITEA_URL}/api/v1/repos/{repo}/pulls",
headers=headers,
json={
"title": title,
"body": description,
"head": branch,
"base": default_branch
},
timeout=30
)
if pr_resp.status_code in (200, 201):
pr_data = pr_resp.json()
return pr_data.get("html_url")
except Exception as e:
print(f"PR creation error: {e}")
return None