jira-ai-fixer/api/services/llm.py

194 lines
6.1 KiB
Python

"""
LLM Service - Orchestration for AI models.
"""
from typing import Optional, Dict, Any, List
import httpx
import json
import logging
import os
logger = logging.getLogger(__name__)
class LLMService:
"""
LLM orchestration service supporting multiple providers.
Providers:
- Azure OpenAI (production, compliance)
- OpenRouter (development, free models)
"""
def __init__(
self,
provider: str = "openrouter",
azure_endpoint: Optional[str] = None,
azure_key: Optional[str] = None,
azure_model: str = "gpt-4o",
openrouter_key: Optional[str] = None,
openrouter_model: str = "meta-llama/llama-3.3-70b-instruct:free",
):
self.provider = provider
self.azure_endpoint = azure_endpoint
self.azure_key = azure_key
self.azure_model = azure_model
self.openrouter_key = openrouter_key
self.openrouter_model = openrouter_model
async def analyze_issue(
self,
issue_description: str,
code_context: str,
business_rules: Optional[str] = None,
similar_fixes: Optional[List[Dict[str, Any]]] = None,
) -> Dict[str, Any]:
"""
Analyze an issue and generate fix suggestions.
Returns:
{
"root_cause": str,
"affected_files": List[str],
"proposed_fix": str,
"confidence": float,
"explanation": str,
}
"""
prompt = self._build_analysis_prompt(
issue_description,
code_context,
business_rules,
similar_fixes,
)
response = await self._call_llm(prompt)
return self._parse_analysis_response(response)
def _build_analysis_prompt(
self,
issue_description: str,
code_context: str,
business_rules: Optional[str],
similar_fixes: Optional[List[Dict[str, Any]]],
) -> str:
"""Build the analysis prompt."""
prompt = f"""Você é um especialista em sistemas de pagamento mainframe, especificamente nos produtos JIRA Acquirer (ACQ-MF) e Interchange (ICG-MF).
## Contexto do Sistema
{business_rules or "Nenhuma regra de negócio específica fornecida."}
## Issue Reportada
{issue_description}
## Código Atual
{code_context}
"""
if similar_fixes:
prompt += "## Histórico de Fixes Similares\n"
for i, fix in enumerate(similar_fixes[:3], 1):
prompt += f"""
### Exemplo {i}
Problema: {fix.get('problem', 'N/A')}
Solução: {fix.get('solution', 'N/A')}
"""
prompt += """
## Tarefa
Analise a issue e:
1. Identifique a causa raiz provável
2. Localize o(s) programa(s) afetado(s)
3. Proponha uma correção específica
4. Explique o impacto da alteração
## Regras
- Mantenha compatibilidade COBOL-85
- Preserve a estrutura de copybooks existente
- Não altere interfaces com outros sistemas sem menção explícita
- Documente todas as alterações propostas
## Formato de Resposta
Responda em JSON válido:
{
"root_cause": "Descrição da causa raiz identificada",
"affected_files": ["arquivo1.cbl", "arquivo2.cbl"],
"proposed_fix": "Código COBOL com a correção proposta",
"confidence": 0.85,
"explanation": "Explicação detalhada do impacto"
}
"""
return prompt
async def _call_llm(self, prompt: str) -> str:
"""Call the configured LLM provider."""
if self.provider == "azure":
return await self._call_azure(prompt)
else:
return await self._call_openrouter(prompt)
async def _call_azure(self, prompt: str) -> str:
"""Call Azure OpenAI."""
url = f"{self.azure_endpoint}/openai/deployments/{self.azure_model}/chat/completions?api-version=2024-02-01"
async with httpx.AsyncClient() as client:
response = await client.post(
url,
headers={
"api-key": self.azure_key,
"Content-Type": "application/json",
},
json={
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2,
"max_tokens": 4096,
},
timeout=120.0,
)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
async def _call_openrouter(self, prompt: str) -> str:
"""Call OpenRouter API."""
async with httpx.AsyncClient() as client:
response = await client.post(
"https://openrouter.ai/api/v1/chat/completions",
headers={
"Authorization": f"Bearer {self.openrouter_key}",
"Content-Type": "application/json",
},
json={
"model": self.openrouter_model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.2,
"max_tokens": 4096,
},
timeout=120.0,
)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
def _parse_analysis_response(self, response: str) -> Dict[str, Any]:
"""Parse LLM response into structured format."""
try:
# Try to extract JSON from response
start = response.find("{")
end = response.rfind("}") + 1
if start >= 0 and end > start:
json_str = response[start:end]
return json.loads(json_str)
except json.JSONDecodeError:
logger.warning("Failed to parse LLM response as JSON")
# Fallback: return raw response
return {
"root_cause": "Unable to parse structured response",
"affected_files": [],
"proposed_fix": response,
"confidence": 0.3,
"explanation": "Response could not be parsed automatically",
}