wire llms, ui tweaks

This commit is contained in:
2026-04-12 11:32:36 -03:00
parent 4de44baf98
commit 0f122fa8f7
22 changed files with 960 additions and 203 deletions

View File

@@ -100,17 +100,36 @@ def get_pending_rebookings(hub: str, limit: int = 20) -> list[dict]:
@mcp.tool()
def generate_narrative(context: dict) -> str:
async def generate_narrative(context: dict) -> str:
"""Synthesizes aggregated operational context into a structured
handover brief for ops managers.
Uses Claude Sonnet via AWS Bedrock Converse API.
Uses Claude via Anthropic SDK (or Bedrock when USE_BEDROCK=true).
Output: prioritized, concise, structured by IMMEDIATE / MONITOR / FYI.
NOTE: In v1, this returns a structured template from the context data.
LLM integration will be added when Bedrock is wired up.
Falls back to template if no API key is configured.
"""
# V1: structured template — will be replaced with Bedrock call
try:
from mcp_servers.shared_llm import generate, _get_provider
hub = context.get("hub", "ALL")
shift_time = context.get("shift_time", datetime.now(timezone.utc).strftime("%H:%M UTC"))
system_prompt = (
f"You are an airline operations shift handover briefing system. "
f"Generate a concise handover brief for {hub} at {shift_time}. "
f"Structure as: HEADER, then IMMEDIATE ACTION (items needing action within 2h), "
f"MONITOR (items that could escalate), FYI (resolved or low-risk). "
f"Be concise — ops managers scan, they don't read paragraphs. "
f"Use the data provided. Do not invent details."
)
text = await generate(system_prompt, json.dumps(context, indent=2))
return json.dumps({"text": text, "provider": _get_provider()})
except Exception:
return json.dumps({"text": _template_narrative(context), "provider": "template"})
def _template_narrative(context: dict) -> str:
"""Structured template fallback when LLM is unavailable."""
sections = []
immediate = context.get("immediate", [])
monitor = context.get("monitor", [])
@@ -119,8 +138,7 @@ def generate_narrative(context: dict) -> str:
hub = context.get("hub", "ALL")
shift_time = context.get("shift_time", datetime.now(timezone.utc).strftime("%H:%M UTC"))
header = f"SHIFT HANDOVER BRIEF — {hub} / {shift_time}"
sections.append(header)
sections.append(f"SHIFT HANDOVER BRIEF — {hub} / {shift_time}")
sections.append(f"Generated: {datetime.now(timezone.utc).strftime('%H:%M UTC')}")
sections.append("")

View File

@@ -5,7 +5,6 @@ notification prompt template (multi-tone).
"""
import json
from datetime import datetime, timezone
from fastmcp import FastMCP
@@ -25,17 +24,33 @@ mcp = FastMCP(
@mcp.tool()
def generate_notification(context: dict) -> str:
async def generate_notification(context: dict) -> str:
"""Synthesizes flight disruption context into an empathetic,
actionable passenger notification.
Uses Claude Sonnet via AWS Bedrock Converse API.
Uses Claude via Anthropic SDK (or Bedrock when USE_BEDROCK=true).
Output: clear, human, no jargon, includes gate/time/status.
NOTE: In v1, this returns a structured template from the context data.
LLM integration will be added when Bedrock is wired up.
Falls back to template if no API key is configured.
"""
# V1: structured template — will be replaced with Bedrock call
try:
from mcp_servers.shared_llm import generate, _get_provider
system_prompt = (
"You are a passenger notification system for Stellar Air. "
"Write a clear, empathetic notification about this flight disruption. "
"Explain WHY the delay or cancellation happened using the operational data provided. "
"Tell the passenger what's happening next: new boarding time, gate, status. "
"Be human and reassuring. No aviation jargon. No speculation. "
"If data is missing for a section, omit it — don't make things up."
)
text = await generate(system_prompt, json.dumps(context, indent=2))
return json.dumps({"text": text, "provider": _get_provider()})
except Exception:
return json.dumps({"text": _template_notification(context), "provider": "template"})
def _template_notification(context: dict) -> str:
"""Structured template fallback when LLM is unavailable."""
flight_id = context.get("flight_id", "")
origin = context.get("origin", "")
destination = context.get("destination", "")

108
mcp_servers/shared_llm.py Normal file
View File

@@ -0,0 +1,108 @@
"""Shared LLM client for MCP server narrative tools.
Multi-provider support — selected via LLM_PROVIDER env var:
groq (default) — Groq API, OpenAI-compatible. Fast, free tier.
Needs GROQ_API_KEY.
anthropic — Direct Anthropic SDK. Needs ANTHROPIC_API_KEY.
bedrock — AWS Bedrock Converse API. Needs AWS credentials.
openai — Any OpenAI-compatible endpoint.
Set OPENAI_API_KEY and optionally OPENAI_BASE_URL.
Usage:
LLM_PROVIDER=groq GROQ_API_KEY=gsk_... python -m mcp_servers.shared
"""
import os
from typing import Literal
Provider = Literal["groq", "anthropic", "bedrock", "openai"]
def _get_provider() -> Provider:
p = os.getenv("LLM_PROVIDER", "groq").lower()
if p in ("groq", "anthropic", "bedrock", "openai"):
return p
return "groq"
async def generate(system_prompt: str, user_content: str, max_tokens: int = 1024) -> str:
"""Call an LLM and return the text response."""
provider = _get_provider()
if provider == "anthropic":
return await _generate_anthropic(system_prompt, user_content, max_tokens)
elif provider == "bedrock":
return await _generate_bedrock(system_prompt, user_content, max_tokens)
else:
# groq, openai, or any OpenAI-compatible provider
return await _generate_openai_compat(system_prompt, user_content, max_tokens)
async def _generate_openai_compat(
system_prompt: str, user_content: str, max_tokens: int
) -> str:
"""OpenAI-compatible API (Groq, OpenAI, local, etc)."""
import openai
provider = _get_provider()
if provider == "groq":
api_key = os.getenv("GROQ_API_KEY")
base_url = "https://api.groq.com/openai/v1"
model = os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile")
else:
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
model = os.getenv("OPENAI_MODEL", "gpt-4o")
client = openai.AsyncOpenAI(api_key=api_key, base_url=base_url)
response = await client.chat.completions.create(
model=model,
max_tokens=max_tokens,
temperature=0.7,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_content},
],
)
return response.choices[0].message.content
async def _generate_anthropic(
system_prompt: str, user_content: str, max_tokens: int
) -> str:
import anthropic
client = anthropic.AsyncAnthropic()
response = await client.messages.create(
model=os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514"),
max_tokens=max_tokens,
temperature=0.7,
system=system_prompt,
messages=[{"role": "user", "content": user_content}],
)
return response.content[0].text
async def _generate_bedrock(
system_prompt: str, user_content: str, max_tokens: int
) -> str:
import json
import boto3
bedrock = boto3.client(
"bedrock-runtime",
region_name=os.getenv("AWS_DEFAULT_REGION", "us-east-1"),
)
response = bedrock.converse(
modelId=os.getenv("BEDROCK_MODEL_ID", "anthropic.claude-sonnet-4-20250514-v1:0"),
system=[{"text": system_prompt}],
messages=[{"role": "user", "content": [{"text": user_content}]}],
inferenceConfig={"maxTokens": max_tokens, "temperature": 0.7},
)
return response["output"]["message"]["content"][0]["text"]