This commit is contained in:
2026-02-23 04:20:22 -03:00
parent a1141759a5
commit af06309dad
20 changed files with 592 additions and 1 deletions

View File

@@ -53,7 +53,7 @@
"name": "ia", "name": "ia",
"slug": "ia", "slug": "ia",
"title": "IA", "title": "IA",
"status": "planned", "status": "live",
"system": "artery" "system": "artery"
} }
] ]

View File

@@ -0,0 +1,4 @@
AI_API_URL=https://api.openai.com/v1
AI_API_KEY=your_api_key_here
AI_MODEL=gpt-4o
API_PORT=8005

View File

@@ -0,0 +1 @@
# IA Vein - AI-powered practice tutor

View File

@@ -0,0 +1,12 @@
"""Run IA vein: python -m ia"""
import uvicorn
from .core.config import settings
if __name__ == "__main__":
uvicorn.run(
"ia.main:app",
host="0.0.0.0",
port=settings.api_port,
reload=True,
)

View File

@@ -0,0 +1,74 @@
"""
Generic API routes for IA vein.
Provides /health and /chat endpoints.
Use-case-specific routes are mounted separately.
"""
import json
import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, Header
from ..core.client import chat_completion, health_check as client_health_check, AIClientError
from ..core.config import settings
from ..models.chat import ChatRequest, ChatResponse
logger = logging.getLogger(__name__)
router = APIRouter()
def get_api_key(x_ai_token: str | None = None) -> str:
"""Resolve API key from header or config. Shared by all routes."""
if x_ai_token and x_ai_token.strip():
return x_ai_token.strip()
if settings.ai_api_key:
return settings.ai_api_key
raise HTTPException(401, "No AI API key configured")
_decoder = json.JSONDecoder()
def parse_json_response(content: str) -> dict | None:
"""Extract first valid JSON object from AI response using the JSON parser itself."""
for i, ch in enumerate(content):
if ch == "{":
try:
obj, _ = _decoder.raw_decode(content, i)
if isinstance(obj, dict):
return obj
except json.JSONDecodeError:
continue
return None
@router.get("/health")
async def health(x_ai_token: Optional[str] = Header(None)):
"""Test AI API connection."""
try:
key = get_api_key(x_ai_token)
result = await client_health_check(key)
return result
except AIClientError as e:
raise HTTPException(503, str(e))
@router.post("/chat")
async def chat(
req: ChatRequest,
x_ai_token: Optional[str] = Header(None),
):
"""Generic chat completion endpoint."""
try:
key = get_api_key(x_ai_token)
messages = [{"role": m.role, "content": m.content} for m in req.messages]
content = await chat_completion(
messages,
api_key=key,
temperature=req.temperature,
max_tokens=req.max_tokens,
)
return ChatResponse(content=content, parsed=parse_json_response(content))
except AIClientError as e:
raise HTTPException(503, str(e))

View File

@@ -0,0 +1,73 @@
"""
AI API client - OpenAI-compatible chat completions via httpx.
"""
import httpx
from .config import settings
class AIClientError(Exception):
pass
async def chat_completion(
messages: list[dict],
model: str | None = None,
temperature: float = 0.7,
max_tokens: int = 1024,
api_key: str | None = None,
) -> str:
"""Send chat completion request to OpenAI-compatible API."""
url = f"{settings.ai_api_url}/chat/completions"
key = api_key or settings.ai_api_key
if not key:
raise AIClientError("No API key configured")
headers = {
"Authorization": f"Bearer {key}",
"Content-Type": "application/json",
}
payload = {
"model": model or settings.ai_model,
"messages": messages,
"temperature": temperature,
"max_tokens": max_tokens,
}
async with httpx.AsyncClient(timeout=30.0) as client:
try:
response = await client.post(url, json=payload, headers=headers)
response.raise_for_status()
data = response.json()
return data["choices"][0]["message"]["content"]
except httpx.HTTPStatusError as e:
raise AIClientError(
f"API error {e.response.status_code}: {e.response.text}"
)
except Exception as e:
raise AIClientError(f"Request failed: {e}")
async def health_check(api_key: str | None = None) -> dict:
"""Test API connection."""
url = f"{settings.ai_api_url}/models"
key = api_key or settings.ai_api_key
if not key:
raise AIClientError("No API key configured")
headers = {"Authorization": f"Bearer {key}"}
async with httpx.AsyncClient(timeout=10.0) as client:
try:
response = await client.get(url, headers=headers)
response.raise_for_status()
return {
"status": "ok",
"provider": settings.ai_api_url,
"model": settings.ai_model,
}
except Exception as e:
raise AIClientError(f"Health check failed: {e}")

View File

@@ -0,0 +1,24 @@
"""
IA Vein configuration loaded from .env file.
"""
from pathlib import Path
from pydantic_settings import BaseSettings
ENV_FILE = Path(__file__).parent.parent / ".env"
class IAConfig(BaseSettings):
ai_api_url: str = "https://api.openai.com/v1"
ai_api_key: str = ""
ai_model: str = "gpt-4o"
api_port: int = 8005
model_config = {
"env_file": ENV_FILE,
"env_file_encoding": "utf-8",
"extra": "ignore",
}
settings = IAConfig()

View File

@@ -0,0 +1,33 @@
"""
IA Vein - FastAPI app.
Generic AI vein with use-case-specific routers mounted as sub-routes.
"""
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from .api.routes import router as generic_router
from .usecases.practice.routes import router as practice_router
from .core.config import settings
app = FastAPI(title="IA Vein", version="0.1.0")
app.add_middleware(
CORSMiddleware,
allow_origins=["https://mcrn.ar", "http://localhost:8000", "http://localhost:8765", "http://127.0.0.1:8000"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Generic: /ia/health, /ia/chat
app.include_router(generic_router, prefix="/ia")
# Use case: /ia/practice/*
app.include_router(practice_router, prefix="/ia/practice")
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=settings.api_port)

View File

@@ -0,0 +1,21 @@
"""
Generic request/response models for IA vein.
"""
from pydantic import BaseModel
class ChatMessage(BaseModel):
role: str # "system" | "user" | "assistant"
content: str
class ChatRequest(BaseModel):
messages: list[ChatMessage]
temperature: float = 0.7
max_tokens: int = 1024
class ChatResponse(BaseModel):
content: str
parsed: dict | None = None

View File

@@ -0,0 +1,5 @@
fastapi>=0.104.0
uvicorn>=0.24.0
pydantic>=2.0.0
pydantic-settings>=2.0.0
httpx>=0.25.0

View File

@@ -0,0 +1,18 @@
#!/usr/bin/env python
"""Run the IA vein API."""
import sys
from pathlib import Path
sys.path.insert(0, str(Path(__file__).parent))
import uvicorn
from core.config import settings
if __name__ == "__main__":
uvicorn.run(
"main:app",
host="0.0.0.0",
port=settings.api_port,
reload=True,
)

View File

@@ -0,0 +1,43 @@
"""
Format practice item context data as text for AI prompt assembly.
"""
def _fmt_str(label, val):
return f"{label}: {val}"
def _fmt_list(label, val):
return f"{label}:\n" + "\n".join(f" - {v}" for v in val)
def _fmt_join(label, val):
return f"{label}: {', '.join(val)}"
def _fmt_complexity(label, val):
return f"{label}: Time {val.get('time', '?')}, Space {val.get('space', '?')}"
FIELDS = [
("oneLiner", "Summary", _fmt_str),
("howItWorks", "How it works", _fmt_str),
("structure", "Structure", _fmt_str),
("whenToUse", "When to use", _fmt_list),
("participants", "Participants", _fmt_join),
("complexity", "Complexity", _fmt_complexity),
]
def format_item_context(item: dict) -> str:
"""Format algorithm/pattern data as context string for prompts."""
lines = [
f"Name: {item.get('name', 'Unknown')}",
f"Category: {item.get('category', 'Unknown')}",
f"Topic: {item.get('topic', 'unknown')}",
]
for key, label, fmt in FIELDS:
val = item.get(key)
if val:
lines.append(fmt(label, val))
return "\n".join(lines)

View File

@@ -0,0 +1,57 @@
"""
Request models for the practice tutor use case.
"""
from pydantic import BaseModel
class ItemContext(BaseModel):
"""Algorithm or pattern data sent from frontend."""
id: str
name: str
category: str
topic: str = "algorithms"
whenToUse: list[str] = []
howItWorks: str = ""
structure: str = ""
participants: list[str] = []
complexity: dict = {}
oneLiner: str = ""
class IdentifyRequest(BaseModel):
item: ItemContext
lang: str = "en"
class ReviewExplanationRequest(BaseModel):
item: ItemContext
userExplanation: str
lang: str = "en"
class ReviewCodeRequest(BaseModel):
item: ItemContext
userCode: str
language: str = "python"
referenceCode: str = ""
lang: str = "en"
class ReviewStructureRequest(BaseModel):
item: ItemContext
userExplanation: str
lang: str = "en"
class HintRequest(BaseModel):
item: ItemContext
mode: str = "identify"
context: str = ""
lang: str = "en"
class SuggestNextRequest(BaseModel):
progress: dict
items: list[ItemContext]
lang: str = "en"

View File

@@ -0,0 +1,88 @@
"""
System prompts for the practice tutor use case.
Each function returns a system prompt string for a specific drill mode.
"""
def identify(lang: str = "en") -> str:
if lang == "es":
return (
"Eres un tutor de algoritmos y patrones de diseno. "
"Genera una descripcion de un problema que se resuelve con la tecnica especificada. "
"La descripcion debe ser un escenario realista, sin nombrar la tecnica directamente. "
'Responde en JSON: {"problem": "...", "hint": "..."}'
)
return (
"You are an algorithm and design pattern tutor. "
"Generate a problem description that is solved by the specified technique. "
"The description should be a realistic scenario without naming the technique directly. "
'Respond in JSON: {"problem": "...", "hint": "..."}'
)
def review_explanation(lang: str = "en") -> str:
if lang == "es":
return (
"Eres un tutor evaluando la explicacion de un estudiante sobre un algoritmo o patron. "
"Compara con los datos de referencia. Se constructivo pero preciso. "
'Responde en JSON: {"score": "good|partial|weak", "feedback": "...", "missing": ["..."]}'
)
return (
"You are a tutor evaluating a student's explanation of an algorithm or pattern. "
"Compare against the reference data. Be constructive but precise. "
'Respond in JSON: {"score": "good|partial|weak", "feedback": "...", "missing": ["..."]}'
)
def review_code(lang: str = "en") -> str:
if lang == "es":
return (
"Eres un tutor de codigo evaluando la implementacion de un estudiante. "
"Evalua: correctitud, eficiencia, estilo, y manejo de edge cases. "
'Responde en JSON: {"verdict": "correct|partial|incorrect", "feedback": "...", "improvements": ["..."]}'
)
return (
"You are a code tutor evaluating a student's implementation. "
"Evaluate: correctness, efficiency, style, and edge case handling. "
'Respond in JSON: {"verdict": "correct|partial|incorrect", "feedback": "...", "improvements": ["..."]}'
)
def review_structure(lang: str = "en") -> str:
if lang == "es":
return (
"Eres un tutor evaluando la explicacion de un estudiante sobre la estructura de un patron de diseno. "
"Compara participantes, relaciones y responsabilidades con la referencia. "
'Responde en JSON: {"score": "good|partial|weak", "feedback": "...", "missing": ["..."]}'
)
return (
"You are a tutor evaluating a student's explanation of a design pattern's structure. "
"Compare participants, relationships, and responsibilities against the reference. "
'Respond in JSON: {"score": "good|partial|weak", "feedback": "...", "missing": ["..."]}'
)
def hint(lang: str = "en") -> str:
if lang == "es":
return (
"Eres un tutor. Da una pista sutil sin revelar la respuesta completa. "
"Guia al estudiante hacia el enfoque correcto."
)
return (
"You are a tutor. Give a subtle hint without revealing the full answer. "
"Guide the student toward the correct approach."
)
def suggest_next(lang: str = "en") -> str:
if lang == "es":
return (
"Eres un tutor. Basandote en el progreso del estudiante, sugiere que practicar. "
"Prioriza tecnicas debiles. "
'Responde en JSON: {"suggestions": [{"id": "...", "reason": "..."}], "encouragement": "..."}'
)
return (
"You are a tutor. Based on student progress, suggest what to practice next. "
"Prioritize weak techniques. "
'Respond in JSON: {"suggestions": [{"id": "...", "reason": "..."}], "encouragement": "..."}'
)

View File

@@ -0,0 +1,138 @@
"""
Practice tutor use case routes.
Mounted under /ia/practice/ by main.py.
Uses the generic AI client from core.
"""
import json
import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, Header
from ...core.client import chat_completion, AIClientError
from ...api.routes import get_api_key, parse_json_response
from ...models.chat import ChatResponse
from . import prompts
from .models import (
IdentifyRequest,
ReviewExplanationRequest,
ReviewCodeRequest,
ReviewStructureRequest,
HintRequest,
SuggestNextRequest,
)
from .formatter import format_item_context
logger = logging.getLogger(__name__)
router = APIRouter()
@router.post("/identify")
async def generate_identify_question(
req: IdentifyRequest,
x_ai_token: Optional[str] = Header(None),
):
try:
key = get_api_key(x_ai_token)
ctx = format_item_context(req.item.model_dump())
messages = [
{"role": "system", "content": prompts.identify(req.lang)},
{"role": "user", "content": f"Generate a problem description for:\n\n{ctx}"},
]
content = await chat_completion(messages, api_key=key, temperature=0.8)
return ChatResponse(content=content, parsed=parse_json_response(content))
except AIClientError as e:
raise HTTPException(503, str(e))
@router.post("/review-explanation")
async def review_explanation(
req: ReviewExplanationRequest,
x_ai_token: Optional[str] = Header(None),
):
try:
key = get_api_key(x_ai_token)
ctx = format_item_context(req.item.model_dump())
messages = [
{"role": "system", "content": prompts.review_explanation(req.lang)},
{"role": "user", "content": f"Reference:\n{ctx}\n\nStudent's explanation:\n{req.userExplanation}"},
]
content = await chat_completion(messages, api_key=key, temperature=0.3)
return ChatResponse(content=content, parsed=parse_json_response(content))
except AIClientError as e:
raise HTTPException(503, str(e))
@router.post("/review-code")
async def review_code(
req: ReviewCodeRequest,
x_ai_token: Optional[str] = Header(None),
):
try:
key = get_api_key(x_ai_token)
ctx = format_item_context(req.item.model_dump())
ref = f"\n\nReference ({req.language}):\n```\n{req.referenceCode}\n```" if req.referenceCode else ""
messages = [
{"role": "system", "content": prompts.review_code(req.lang)},
{"role": "user", "content": f"Algorithm:\n{ctx}{ref}\n\nStudent's code ({req.language}):\n```\n{req.userCode}\n```"},
]
content = await chat_completion(messages, api_key=key, temperature=0.3)
return ChatResponse(content=content, parsed=parse_json_response(content))
except AIClientError as e:
raise HTTPException(503, str(e))
@router.post("/review-structure")
async def review_structure(
req: ReviewStructureRequest,
x_ai_token: Optional[str] = Header(None),
):
try:
key = get_api_key(x_ai_token)
ctx = format_item_context(req.item.model_dump())
messages = [
{"role": "system", "content": prompts.review_structure(req.lang)},
{"role": "user", "content": f"Reference:\n{ctx}\n\nStudent's explanation:\n{req.userExplanation}"},
]
content = await chat_completion(messages, api_key=key, temperature=0.3)
return ChatResponse(content=content, parsed=parse_json_response(content))
except AIClientError as e:
raise HTTPException(503, str(e))
@router.post("/hint")
async def get_hint(
req: HintRequest,
x_ai_token: Optional[str] = Header(None),
):
try:
key = get_api_key(x_ai_token)
ctx = format_item_context(req.item.model_dump())
messages = [
{"role": "system", "content": prompts.hint(req.lang)},
{"role": "user", "content": f"Mode: {req.mode}\n{ctx}\n\nStuck on: {req.context or 'general approach'}"},
]
content = await chat_completion(messages, api_key=key, temperature=0.7, max_tokens=256)
return ChatResponse(content=content)
except AIClientError as e:
raise HTTPException(503, str(e))
@router.post("/suggest-next")
async def suggest_next(
req: SuggestNextRequest,
x_ai_token: Optional[str] = Header(None),
):
try:
key = get_api_key(x_ai_token)
progress_str = json.dumps(req.progress, indent=2)
names = [i.name for i in req.items]
messages = [
{"role": "system", "content": prompts.suggest_next(req.lang)},
{"role": "user", "content": f"Available: {', '.join(names)}\n\nProgress:\n{progress_str}"},
]
content = await chat_completion(messages, api_key=key, temperature=0.5)
return ChatResponse(content=content, parsed=parse_json_response(content))
except AIClientError as e:
raise HTTPException(503, str(e))