wire llms, ui tweaks

This commit is contained in:
2026-04-12 11:32:36 -03:00
parent 4de44baf98
commit 0f122fa8f7
22 changed files with 960 additions and 203 deletions

View File

@@ -1,11 +1,24 @@
# AWS Bedrock (production pattern)
# LLM Provider (groq | anthropic | bedrock | openai)
LLM_PROVIDER=groq
# Groq (default — fast, free tier)
GROQ_API_KEY=gsk_...
GROQ_MODEL=llama-3.3-70b-versatile
# Anthropic (direct SDK)
ANTHROPIC_API_KEY=sk-ant-...
ANTHROPIC_MODEL=claude-sonnet-4-20250514
# AWS Bedrock
AWS_ACCESS_KEY_ID=AKIA...
AWS_SECRET_ACCESS_KEY=...
AWS_DEFAULT_REGION=us-east-1
USE_BEDROCK=false
BEDROCK_MODEL_ID=anthropic.claude-sonnet-4-20250514-v1:0
# Anthropic (fallback for local dev)
ANTHROPIC_API_KEY=sk-ant-...
# OpenAI-compatible (any endpoint)
OPENAI_API_KEY=sk-...
OPENAI_BASE_URL=https://api.openai.com/v1
OPENAI_MODEL=gpt-4o
# Kong Konnect (optional)
KONG_PROXY_URL=

View File

@@ -148,10 +148,25 @@ async def run_fce(
}
t0 = time.time()
notification_text = await mcp.call_tool("passenger", "generate_notification", {"context": context})
raw_result = await mcp.call_tool("passenger", "generate_notification", {"context": context})
latency = int((time.time() - t0) * 1000)
await emit("tool_call_end", tool="generate_notification", latency_ms=latency, is_live=False)
# Parse structured response (text + provider)
llm_provider = "template"
if isinstance(raw_result, dict) and "text" in raw_result:
notification_text = raw_result["text"]
llm_provider = raw_result.get("provider", "unknown")
elif isinstance(raw_result, str):
try:
parsed = json.loads(raw_result)
notification_text = parsed.get("text", raw_result)
llm_provider = parsed.get("provider", "unknown")
except (json.JSONDecodeError, TypeError):
notification_text = raw_result
else:
notification_text = str(raw_result)
await emit("node_exit", node="synthesize")
# ── Node 4: Format Output ──
@@ -164,14 +179,15 @@ async def run_fce(
if airport_status:
data_sources.append("faa_status_live")
if crew_notes:
data_sources.append("get_crew_notes")
data_sources.append("crew_notes")
notification = {
"flight_id": flight_id,
"type": "DELAY_NOTIFICATION" if status == "DELAYED" else f"{status}_NOTIFICATION",
"status": status,
"delay_minutes": delay_minutes,
"notification_text": notification_text if isinstance(notification_text, str) else str(notification_text),
"notification_text": notification_text,
"llm_provider": llm_provider,
"generated_at": datetime.now(timezone.utc).isoformat(),
"data_sources": data_sources,
"human_approved": True, # auto-approve in demo

View File

@@ -242,7 +242,7 @@ async def run_handover(
hub_label = ", ".join(target_hubs) if len(target_hubs) < 5 else "ALL HUBS"
t0 = time.time()
brief_text = await mcp.call_tool("ops", "generate_narrative", {
raw_result = await mcp.call_tool("ops", "generate_narrative", {
"context": {
"hub": hub_label,
"shift_time": shift_time,
@@ -254,6 +254,22 @@ async def run_handover(
latency = int((time.time() - t0) * 1000)
await emit("tool_call_end", tool="generate_narrative", latency_ms=latency, is_live=False)
# Parse structured response
import json as _json
llm_provider = "template"
if isinstance(raw_result, dict) and "text" in raw_result:
brief_text = raw_result["text"]
llm_provider = raw_result.get("provider", "unknown")
elif isinstance(raw_result, str):
try:
parsed = _json.loads(raw_result)
brief_text = parsed.get("text", raw_result)
llm_provider = parsed.get("provider", "unknown")
except (_json.JSONDecodeError, TypeError):
brief_text = raw_result
else:
brief_text = str(raw_result)
await emit("node_exit", node="synthesize")
# ── Node 4: Format Output ──
@@ -263,7 +279,8 @@ async def run_handover(
result = {
"type": "HANDOVER_BRIEF",
"hubs": target_hubs,
"brief_text": brief_text if isinstance(brief_text, str) else str(brief_text),
"brief_text": brief_text,
"llm_provider": llm_provider,
"summary": {
"immediate_count": len(immediate),
"monitor_count": len(monitor),

View File

@@ -11,20 +11,38 @@ from typing import Any
from fastmcp import Client
# Server configurations for stdio transport
SERVERS = {
"shared": {
def _env() -> dict:
"""Forward LLM-related env vars to MCP server subprocesses."""
import os
env = {}
for key in (
"LLM_PROVIDER", "GROQ_API_KEY", "GROQ_MODEL",
"ANTHROPIC_API_KEY", "ANTHROPIC_MODEL",
"OPENAI_API_KEY", "OPENAI_BASE_URL", "OPENAI_MODEL",
"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION",
"BEDROCK_MODEL_ID", "USE_BEDROCK",
"PATH",
):
val = os.getenv(key)
if val:
env[key] = val
return env
def _server_config(module: str) -> dict:
"""Build server config with current env vars (called at connect time, not import time)."""
return {
"command": "uv",
"args": ["run", "python", "-m", "mcp_servers.shared"],
},
"ops": {
"command": "uv",
"args": ["run", "python", "-m", "mcp_servers.ops"],
},
"passenger": {
"command": "uv",
"args": ["run", "python", "-m", "mcp_servers.passenger"],
},
"args": ["run", "python", "-m", module],
"env": _env(),
}
SERVER_MODULES = {
"shared": "mcp_servers.shared",
"ops": "mcp_servers.ops",
"passenger": "mcp_servers.passenger",
}
# Agent profiles — which servers each agent connects to
@@ -43,9 +61,9 @@ class MCPMultiClient:
async def connect(self, server_names: list[str]) -> None:
"""Connect to the specified MCP servers."""
for name in server_names:
if name not in SERVERS:
raise ValueError(f"Unknown server: {name}. Available: {list(SERVERS.keys())}")
config = {"mcpServers": {"default": SERVERS[name]}}
if name not in SERVER_MODULES:
raise ValueError(f"Unknown server: {name}. Available: {list(SERVER_MODULES.keys())}")
config = {"mcpServers": {"default": _server_config(SERVER_MODULES[name])}}
client = Client(config)
await client.__aenter__()
self._clients[name] = client

View File

@@ -165,6 +165,78 @@ async def set_active_scenario(req: ScenarioUpdate):
return {"error": str(e)}
# ── LLM config routes ──
@app.get("/config/llm")
async def get_llm_config():
"""Current LLM provider configuration."""
import os
provider = os.getenv("LLM_PROVIDER", "groq")
return {
"provider": provider,
"providers": {
"groq": {
"configured": bool(os.getenv("GROQ_API_KEY")),
"model": os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile"),
},
"anthropic": {
"configured": bool(os.getenv("ANTHROPIC_API_KEY")),
"model": os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514"),
},
"bedrock": {
"configured": bool(os.getenv("AWS_ACCESS_KEY_ID")),
"model": os.getenv("BEDROCK_MODEL_ID", "anthropic.claude-sonnet-4-20250514-v1:0"),
},
"openai": {
"configured": bool(os.getenv("OPENAI_API_KEY")),
"model": os.getenv("OPENAI_MODEL", "gpt-4o"),
"base_url": os.getenv("OPENAI_BASE_URL", ""),
},
"template": {
"configured": True,
"model": "none (structured fallback)",
},
},
}
class LLMConfigUpdate(BaseModel):
provider: str
api_key: str | None = None
model: str | None = None
base_url: str | None = None
@app.put("/config/llm")
async def set_llm_config(req: LLMConfigUpdate):
"""Switch LLM provider at runtime. Sets env vars for MCP subprocesses."""
import os
os.environ["LLM_PROVIDER"] = req.provider
if req.provider == "groq" and req.api_key:
os.environ["GROQ_API_KEY"] = req.api_key
if req.model:
os.environ["GROQ_MODEL"] = req.model
elif req.provider == "anthropic" and req.api_key:
os.environ["ANTHROPIC_API_KEY"] = req.api_key
if req.model:
os.environ["ANTHROPIC_MODEL"] = req.model
elif req.provider == "openai" and req.api_key:
os.environ["OPENAI_API_KEY"] = req.api_key
if req.model:
os.environ["OPENAI_MODEL"] = req.model
if req.base_url:
os.environ["OPENAI_BASE_URL"] = req.base_url
elif req.provider == "bedrock":
if req.model:
os.environ["BEDROCK_MODEL_ID"] = req.model
# No need to update server configs — _server_config() reads env at connect time
return await get_llm_config()
# ── Scenario data routes ──
@app.get("/scenarios/data/flights")

View File

@@ -5,5 +5,7 @@ metadata:
namespace: unt
data:
DEFAULT_SCENARIO: "weather_disruption_ord"
USE_BEDROCK: "false"
LLM_PROVIDER: "groq"
GROQ_API_KEY: "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"
GROQ_MODEL: "llama-3.3-70b-versatile"
LANGFUSE_HOST: "http://langfuse:3000"

View File

@@ -18,6 +18,10 @@ server {
proxy_pass http://api:8000;
}
location /config {
proxy_pass http://api:8000;
}
location /ws/ {
proxy_pass http://api:8000;
proxy_http_version 1.1;

View File

@@ -100,17 +100,36 @@ def get_pending_rebookings(hub: str, limit: int = 20) -> list[dict]:
@mcp.tool()
def generate_narrative(context: dict) -> str:
async def generate_narrative(context: dict) -> str:
"""Synthesizes aggregated operational context into a structured
handover brief for ops managers.
Uses Claude Sonnet via AWS Bedrock Converse API.
Uses Claude via Anthropic SDK (or Bedrock when USE_BEDROCK=true).
Output: prioritized, concise, structured by IMMEDIATE / MONITOR / FYI.
NOTE: In v1, this returns a structured template from the context data.
LLM integration will be added when Bedrock is wired up.
Falls back to template if no API key is configured.
"""
# V1: structured template — will be replaced with Bedrock call
try:
from mcp_servers.shared_llm import generate, _get_provider
hub = context.get("hub", "ALL")
shift_time = context.get("shift_time", datetime.now(timezone.utc).strftime("%H:%M UTC"))
system_prompt = (
f"You are an airline operations shift handover briefing system. "
f"Generate a concise handover brief for {hub} at {shift_time}. "
f"Structure as: HEADER, then IMMEDIATE ACTION (items needing action within 2h), "
f"MONITOR (items that could escalate), FYI (resolved or low-risk). "
f"Be concise — ops managers scan, they don't read paragraphs. "
f"Use the data provided. Do not invent details."
)
text = await generate(system_prompt, json.dumps(context, indent=2))
return json.dumps({"text": text, "provider": _get_provider()})
except Exception:
return json.dumps({"text": _template_narrative(context), "provider": "template"})
def _template_narrative(context: dict) -> str:
"""Structured template fallback when LLM is unavailable."""
sections = []
immediate = context.get("immediate", [])
monitor = context.get("monitor", [])
@@ -119,8 +138,7 @@ def generate_narrative(context: dict) -> str:
hub = context.get("hub", "ALL")
shift_time = context.get("shift_time", datetime.now(timezone.utc).strftime("%H:%M UTC"))
header = f"SHIFT HANDOVER BRIEF — {hub} / {shift_time}"
sections.append(header)
sections.append(f"SHIFT HANDOVER BRIEF — {hub} / {shift_time}")
sections.append(f"Generated: {datetime.now(timezone.utc).strftime('%H:%M UTC')}")
sections.append("")

View File

@@ -5,7 +5,6 @@ notification prompt template (multi-tone).
"""
import json
from datetime import datetime, timezone
from fastmcp import FastMCP
@@ -25,17 +24,33 @@ mcp = FastMCP(
@mcp.tool()
def generate_notification(context: dict) -> str:
async def generate_notification(context: dict) -> str:
"""Synthesizes flight disruption context into an empathetic,
actionable passenger notification.
Uses Claude Sonnet via AWS Bedrock Converse API.
Uses Claude via Anthropic SDK (or Bedrock when USE_BEDROCK=true).
Output: clear, human, no jargon, includes gate/time/status.
NOTE: In v1, this returns a structured template from the context data.
LLM integration will be added when Bedrock is wired up.
Falls back to template if no API key is configured.
"""
# V1: structured template — will be replaced with Bedrock call
try:
from mcp_servers.shared_llm import generate, _get_provider
system_prompt = (
"You are a passenger notification system for Stellar Air. "
"Write a clear, empathetic notification about this flight disruption. "
"Explain WHY the delay or cancellation happened using the operational data provided. "
"Tell the passenger what's happening next: new boarding time, gate, status. "
"Be human and reassuring. No aviation jargon. No speculation. "
"If data is missing for a section, omit it — don't make things up."
)
text = await generate(system_prompt, json.dumps(context, indent=2))
return json.dumps({"text": text, "provider": _get_provider()})
except Exception:
return json.dumps({"text": _template_notification(context), "provider": "template"})
def _template_notification(context: dict) -> str:
"""Structured template fallback when LLM is unavailable."""
flight_id = context.get("flight_id", "")
origin = context.get("origin", "")
destination = context.get("destination", "")

108
mcp_servers/shared_llm.py Normal file
View File

@@ -0,0 +1,108 @@
"""Shared LLM client for MCP server narrative tools.
Multi-provider support — selected via LLM_PROVIDER env var:
groq (default) — Groq API, OpenAI-compatible. Fast, free tier.
Needs GROQ_API_KEY.
anthropic — Direct Anthropic SDK. Needs ANTHROPIC_API_KEY.
bedrock — AWS Bedrock Converse API. Needs AWS credentials.
openai — Any OpenAI-compatible endpoint.
Set OPENAI_API_KEY and optionally OPENAI_BASE_URL.
Usage:
LLM_PROVIDER=groq GROQ_API_KEY=gsk_... python -m mcp_servers.shared
"""
import os
from typing import Literal
Provider = Literal["groq", "anthropic", "bedrock", "openai"]
def _get_provider() -> Provider:
p = os.getenv("LLM_PROVIDER", "groq").lower()
if p in ("groq", "anthropic", "bedrock", "openai"):
return p
return "groq"
async def generate(system_prompt: str, user_content: str, max_tokens: int = 1024) -> str:
"""Call an LLM and return the text response."""
provider = _get_provider()
if provider == "anthropic":
return await _generate_anthropic(system_prompt, user_content, max_tokens)
elif provider == "bedrock":
return await _generate_bedrock(system_prompt, user_content, max_tokens)
else:
# groq, openai, or any OpenAI-compatible provider
return await _generate_openai_compat(system_prompt, user_content, max_tokens)
async def _generate_openai_compat(
system_prompt: str, user_content: str, max_tokens: int
) -> str:
"""OpenAI-compatible API (Groq, OpenAI, local, etc)."""
import openai
provider = _get_provider()
if provider == "groq":
api_key = os.getenv("GROQ_API_KEY")
base_url = "https://api.groq.com/openai/v1"
model = os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile")
else:
api_key = os.getenv("OPENAI_API_KEY")
base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1")
model = os.getenv("OPENAI_MODEL", "gpt-4o")
client = openai.AsyncOpenAI(api_key=api_key, base_url=base_url)
response = await client.chat.completions.create(
model=model,
max_tokens=max_tokens,
temperature=0.7,
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_content},
],
)
return response.choices[0].message.content
async def _generate_anthropic(
system_prompt: str, user_content: str, max_tokens: int
) -> str:
import anthropic
client = anthropic.AsyncAnthropic()
response = await client.messages.create(
model=os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514"),
max_tokens=max_tokens,
temperature=0.7,
system=system_prompt,
messages=[{"role": "user", "content": user_content}],
)
return response.content[0].text
async def _generate_bedrock(
system_prompt: str, user_content: str, max_tokens: int
) -> str:
import json
import boto3
bedrock = boto3.client(
"bedrock-runtime",
region_name=os.getenv("AWS_DEFAULT_REGION", "us-east-1"),
)
response = bedrock.converse(
modelId=os.getenv("BEDROCK_MODEL_ID", "anthropic.claude-sonnet-4-20250514-v1:0"),
system=[{"text": system_prompt}],
messages=[{"role": "user", "content": [{"text": user_content}]}],
inferenceConfig={"maxTokens": max_tokens, "temperature": 0.7},
)
return response["output"]["message"]["content"][0]["text"]

View File

@@ -10,6 +10,7 @@ dependencies = [
"langchain-anthropic",
"boto3",
"anthropic",
"openai",
"fastapi",
"uvicorn[standard]",
"pydantic>=2.0",

View File

@@ -25,6 +25,9 @@ class Endpoints:
def crew_notes(flight_id: str) -> str:
return f"/scenarios/data/crew-notes/{flight_id}"
# Config
CONFIG_LLM = "/config/llm"
# Agents
AGENT_FCE = "/agents/fce"
AGENT_HANDOVER = "/agents/handover"

View File

@@ -194,9 +194,15 @@ class TestPassengerServer:
"gate": "H14",
}
})
text = _parse_result(result)
data = _parse_result(result)
# Response is JSON with text + provider
if isinstance(data, dict):
assert "text" in data
assert "provider" in data
text = data["text"]
else:
text = str(data)
assert "UA432" in text
assert "DELAYED" in text or "delayed" in text
@pytest.mark.asyncio
async def test_list_prompts(self):

View File

@@ -1,5 +1,5 @@
<script setup lang="ts">
import { ref, provide } from 'vue'
import { ref, provide, computed } from 'vue'
import { useRouter, useRoute } from 'vue-router'
import ScenarioSelector from './components/ScenarioSelector.vue'
@@ -7,11 +7,37 @@ const router = useRouter()
const route = useRoute()
const scenarioVersion = ref(0)
// View mode for the main ops page
const showOps = ref(true)
const showInternals = ref(false)
function toggleOps() {
if (route.path !== '/') router.push('/')
if (showOps.value && showInternals.value) {
showOps.value = false
} else {
showOps.value = true
}
}
function toggleInternals() {
if (route.path !== '/') router.push('/')
if (showInternals.value && showOps.value) {
showInternals.value = false
} else {
showInternals.value = true
}
}
function onScenarioChange() {
scenarioVersion.value++
}
const isMainPage = computed(() => route.path === '/')
provide('scenarioVersion', scenarioVersion)
provide('showOps', showOps)
provide('showInternals', showInternals)
</script>
<template>
@@ -22,9 +48,10 @@ provide('scenarioVersion', scenarioVersion)
<span class="app-subtitle">NOVA Operations Platform</span>
</div>
<nav class="app-nav">
<router-link to="/" :class="{ active: route.path === '/' }">Operations</router-link>
<router-link to="/internals" :class="{ active: route.path === '/internals' }">Internals</router-link>
<a href="#" :class="{ active: isMainPage && showOps }" @click.prevent="toggleOps">Operations</a>
<a href="#" :class="{ active: isMainPage && showInternals }" @click.prevent="toggleInternals">Internals</a>
<router-link to="/data" :class="{ active: route.path === '/data' }">Data</router-link>
<router-link to="/settings" :class="{ active: route.path === '/settings' }">Settings</router-link>
<a href="/docs/" class="docs-link" target="_blank">Docs</a>
</nav>
<ScenarioSelector @change="onScenarioChange" />

View File

@@ -7,6 +7,7 @@ defineProps<{
generated_at: string
duration_ms: number
hubs: string[]
llm_provider?: string
}
}>()
</script>
@@ -53,7 +54,12 @@ defineProps<{
<div class="brief-footer">
<span>Hubs: {{ data.hubs.join(', ') }}</span>
<span>{{ data.duration_ms }}ms</span>
<span>
<span v-if="data.llm_provider" :style="{ color: data.llm_provider === 'template' ? 'var(--text-dim)' : 'var(--status-live)' }">
{{ data.llm_provider }}
</span>
{{ data.duration_ms }}ms
</span>
</div>
</div>
</template>

View File

@@ -9,6 +9,7 @@ defineProps<{
generated_at: string
human_approved: boolean
duration_ms: number
llm_provider?: string
}
}>()
@@ -38,6 +39,9 @@ const causeColor: Record<string, string> = {
</template>
</span>
<span class="meta">
<span v-if="data.llm_provider" :class="['provider-tag', data.llm_provider === 'template' ? 'mock' : 'live']">
{{ data.llm_provider }}
</span>
{{ data.duration_ms }}ms
<span v-if="data.human_approved" class="approved">approved</span>
</span>
@@ -117,6 +121,24 @@ const causeColor: Record<string, string> = {
font-family: var(--font-mono);
}
.provider-tag {
padding: 1px 6px;
font-family: var(--font-mono);
font-size: 10px;
background: var(--surface-2);
border: 1px solid var(--surface-3);
margin-right: 8px;
}
.provider-tag.live {
border-color: var(--status-live);
color: var(--status-live);
}
.provider-tag.mock {
color: var(--text-dim);
}
.approved {
color: var(--status-live);
margin-left: 8px;

View File

@@ -0,0 +1,112 @@
import { ref, onUnmounted } from 'vue'
export interface GraphNode {
id: string
status: string
}
export interface AgentRun {
agent: string
run_id: string
}
export interface LogEntry {
level: string
stage: string
msg: string
ts: string
}
export function useAgentEvents() {
const agentStatus = ref<'idle' | 'live' | 'processing' | 'error'>('idle')
const entries = ref<LogEntry[]>([])
const graphNodes = ref<GraphNode[]>([])
const currentRun = ref<AgentRun | null>(null)
let ws: WebSocket | null = null
function connect() {
if (ws) return
const protocol = location.protocol === 'https:' ? 'wss:' : 'ws:'
ws = new WebSocket(`${protocol}//${location.host}/ws/agent-events`)
ws.onopen = () => { agentStatus.value = 'live' }
ws.onmessage = (e) => {
const event = JSON.parse(e.data)
handleEvent(event)
}
ws.onclose = () => {
agentStatus.value = 'idle'
ws = null
setTimeout(connect, 3000)
}
ws.onerror = () => { agentStatus.value = 'error' }
}
function disconnect() {
ws?.close()
ws = null
}
function handleEvent(event: any) {
const ts = event.timestamp || new Date().toISOString()
const time = ts.split('T')[1]?.split('.')[0] || ts
switch (event.type) {
case 'agent_start':
currentRun.value = { agent: event.agent, run_id: event.run_id }
agentStatus.value = 'processing'
graphNodes.value = []
entries.value = [{
level: 'info', stage: 'system',
msg: `Agent ${event.agent} started (${event.run_id})`, ts: time,
}]
break
case 'node_enter':
graphNodes.value.push({ id: event.node, status: 'processing' })
entries.value.push({
level: 'info', stage: event.node,
msg: `→ entering ${event.node}`, ts: time,
})
break
case 'node_exit': {
const node = graphNodes.value.find(n => n.id === event.node)
if (node) node.status = 'done'
break
}
case 'tool_call_end': {
const liveTag = event.is_live ? ' (live)' : ' (mock)'
entries.value.push({
level: 'info', stage: '',
msg: `${event.tool}${event.latency_ms}ms ✓${liveTag}`, ts: time,
})
break
}
case 'tool_call_error':
entries.value.push({
level: 'error', stage: '',
msg: `${event.tool} — FAILED: ${event.error}`, ts: time,
})
break
case 'agent_end':
agentStatus.value = 'live'
entries.value.push({
level: 'info', stage: 'system',
msg: `Agent complete: ${event.output_summary}`, ts: time,
})
break
}
}
onUnmounted(disconnect)
return { agentStatus, entries, graphNodes, currentRun, connect, disconnect }
}

View File

@@ -4,15 +4,15 @@ import 'soleprint-ui/src/tokens.css'
import './styles/mars-tokens.css'
import App from './App.vue'
import OpsNotifications from './pages/OpsNotifications.vue'
import AgentInternals from './pages/AgentInternals.vue'
import ScenarioData from './pages/ScenarioData.vue'
import Settings from './pages/Settings.vue'
const router = createRouter({
history: createWebHistory(),
routes: [
{ path: '/', component: OpsNotifications },
{ path: '/internals', component: AgentInternals },
{ path: '/data', component: ScenarioData },
{ path: '/settings', component: Settings },
],
})

View File

@@ -1,103 +1,11 @@
<script setup lang="ts">
import { ref, onMounted, onUnmounted, nextTick } from 'vue'
import { onMounted } from 'vue'
import { Panel, SplitPane, LogRenderer } from 'soleprint-ui'
import type { LogEntry } from 'soleprint-ui'
import { useAgentEvents } from '../composables/useAgentEvents'
const agentStatus = ref<'idle' | 'live' | 'processing' | 'error'>('idle')
const entries = ref<LogEntry[]>([])
const graphNodes = ref<{ id: string; status: string }[]>([])
const currentRun = ref<{ agent: string; run_id: string } | null>(null)
const { agentStatus, entries, graphNodes, currentRun, connect } = useAgentEvents()
let ws: WebSocket | null = null
function connectWs() {
const protocol = location.protocol === 'https:' ? 'wss:' : 'ws:'
ws = new WebSocket(`${protocol}//${location.host}/ws/agent-events`)
ws.onopen = () => {
agentStatus.value = 'live'
}
ws.onmessage = (e) => {
const event = JSON.parse(e.data)
handleEvent(event)
}
ws.onclose = () => {
agentStatus.value = 'idle'
setTimeout(connectWs, 3000)
}
ws.onerror = () => {
agentStatus.value = 'error'
}
}
function handleEvent(event: any) {
const ts = event.timestamp || new Date().toISOString()
const time = ts.split('T')[1]?.split('.')[0] || ts
switch (event.type) {
case 'agent_start':
currentRun.value = { agent: event.agent, run_id: event.run_id }
agentStatus.value = 'processing'
graphNodes.value = []
entries.value = [{
level: 'info',
stage: 'system',
msg: `Agent ${event.agent} started (${event.run_id})`,
ts: time,
}]
break
case 'node_enter':
graphNodes.value.push({ id: event.node, status: 'processing' })
entries.value.push({
level: 'info',
stage: event.node,
msg: `→ entering ${event.node}`,
ts: time,
})
break
case 'node_exit':
const node = graphNodes.value.find(n => n.id === event.node)
if (node) node.status = 'done'
break
case 'tool_call_end':
const liveTag = event.is_live ? ' (live)' : ' (mock)'
entries.value.push({
level: 'info',
stage: '',
msg: `${event.tool}${event.latency_ms}ms ✓${liveTag}`,
ts: time,
})
break
case 'tool_call_error':
entries.value.push({
level: 'error',
stage: '',
msg: `${event.tool} — FAILED: ${event.error}`,
ts: time,
})
break
case 'agent_end':
agentStatus.value = 'live'
entries.value.push({
level: 'info',
stage: 'system',
msg: `Agent complete: ${event.output_summary}`,
ts: time,
})
break
}
}
onMounted(connectWs)
onUnmounted(() => { ws?.close() })
onMounted(connect)
</script>
<template>
@@ -111,7 +19,7 @@ onUnmounted(() => { ws?.close() })
</div>
<div v-else class="graph-nodes">
<div
v-for="(node, i) in graphNodes"
v-for="node in graphNodes"
:key="node.id"
:class="['graph-node', node.status]"
>
@@ -157,10 +65,7 @@ onUnmounted(() => { ws?.close() })
min-height: 0;
}
.graph-container {
padding: 16px;
height: 100%;
}
.graph-container { padding: 16px; height: 100%; }
.graph-nodes {
display: flex;
@@ -208,14 +113,8 @@ onUnmounted(() => { ws?.close() })
box-shadow: 0 0 8px var(--status-live);
}
.node-label {
font-family: var(--font-mono);
font-size: 13px;
}
.summary-panel {
flex-shrink: 0;
}
.node-label { font-family: var(--font-mono); font-size: 13px; }
.summary-panel { flex-shrink: 0; }
.summary {
display: flex;

View File

@@ -1,8 +1,9 @@
<script setup lang="ts">
import { ref, onMounted, watch, inject } from 'vue'
import { Panel } from 'soleprint-ui'
import { Panel, SplitPane, LogRenderer } from 'soleprint-ui'
import NotificationCard from '../components/NotificationCard.vue'
import HandoverBrief from '../components/HandoverBrief.vue'
import { useAgentEvents } from '../composables/useAgentEvents'
const flights = ref<any[]>([])
const selectedFlight = ref('')
@@ -12,6 +13,13 @@ const notification = ref<any>(null)
const handoverBrief = ref<any>(null)
const scenarioVersion = inject<any>('scenarioVersion')
const showOps = inject<any>('showOps')
const showInternals = inject<any>('showInternals')
const { agentStatus, entries, graphNodes, currentRun, connect } = useAgentEvents()
// Connect WebSocket immediately so we don't miss events
onMounted(connect)
watch(scenarioVersion, () => {
loadFlights()
notification.value = null
@@ -34,6 +42,7 @@ async function runFce() {
if (!selectedFlight.value) return
fceStatus.value = 'processing'
notification.value = null
if (!showInternals.value) showInternals.value = true
const res = await fetch('/agents/fce', {
method: 'POST',
@@ -42,7 +51,6 @@ async function runFce() {
})
const { run_id } = await res.json()
// Poll for result
const poll = setInterval(async () => {
const r = await fetch(`/agents/runs/${run_id}`)
const data = await r.json()
@@ -60,6 +68,7 @@ async function runFce() {
async function runHandover() {
handoverStatus.value = 'processing'
handoverBrief.value = null
if (!showInternals.value) showInternals.value = true
const res = await fetch('/agents/handover', {
method: 'POST',
@@ -86,7 +95,9 @@ onMounted(loadFlights)
</script>
<template>
<div class="ops-page">
<div :class="['ops-layout', { split: showOps && showInternals }]">
<!-- Ops pane (left) -->
<div v-show="showOps" class="ops-pane">
<Panel title="FCE — Behind Every Departure" :status="fceStatus">
<template #actions>
<select v-model="selectedFlight" class="flight-select">
@@ -96,16 +107,9 @@ onMounted(loadFlights)
{{ fceStatus === 'processing' ? 'Running...' : 'Run FCE' }}
</button>
</template>
<div v-if="notification" class="result-area">
<NotificationCard :data="notification" />
</div>
<div v-else-if="fceStatus === 'processing'" class="loading">
Running agent... gathering flight data, weather, crew notes...
</div>
<div v-else class="empty">
Select a flight and click Run FCE to generate a notification.
</div>
<div v-if="notification" class="result-area"><NotificationCard :data="notification" /></div>
<div v-else-if="fceStatus === 'processing'" class="loading">Running agent...</div>
<div v-else class="empty">Select a flight and click Run FCE.</div>
</Panel>
<Panel title="Shift Handover Brief" :status="handoverStatus">
@@ -114,27 +118,83 @@ onMounted(loadFlights)
{{ handoverStatus === 'processing' ? 'Running...' : 'Run Handover' }}
</button>
</template>
<div v-if="handoverBrief" class="result-area"><HandoverBrief :data="handoverBrief" /></div>
<div v-else-if="handoverStatus === 'processing'" class="loading">Running agent...</div>
<div v-else class="empty">Click Run Handover.</div>
</Panel>
</div>
<div v-if="handoverBrief" class="result-area">
<HandoverBrief :data="handoverBrief" />
<!-- Internals pane (right) -->
<div v-show="showInternals" class="internals-pane">
<Panel title="Agent Graph" :status="agentStatus">
<div class="graph-container">
<div v-if="graphNodes.length === 0" class="empty">Waiting for agent run...</div>
<div v-else class="graph-nodes">
<div v-for="node in graphNodes" :key="node.id" :class="['graph-node', node.status]">
<div class="node-dot"></div>
<span class="node-label">{{ node.id }}</span>
</div>
<div v-else-if="handoverStatus === 'processing'" class="loading">
Running agent... scanning all hubs for active issues...
<div class="graph-edge-line"></div>
</div>
<div v-else class="empty">
Click Run Handover to generate a shift handover brief.
</div>
</Panel>
<Panel title="Tool Call Stream" :status="agentStatus" class="stream-panel">
<LogRenderer :entries="entries" :auto-scroll="true" />
</Panel>
<div v-if="currentRun" class="run-summary">
{{ currentRun.agent }} / {{ currentRun.run_id }} / {{ entries.length }} events
</div>
</div>
</div>
</template>
<style scoped>
.ops-page {
.ops-layout {
display: flex;
gap: 16px;
height: calc(100vh - 80px);
position: relative;
}
/* Both visible: 50/50 with divider */
.ops-layout.split > .ops-pane { flex: 1; }
.ops-layout.split > .internals-pane { flex: 1; border-left: var(--panel-border); padding-left: 16px; }
/* Single pane: full width */
.ops-layout > .internals-pane { flex: 1; }
.ops-layout > .ops-pane { flex: 1; }
.ops-pane {
display: flex;
flex-direction: column;
gap: 24px;
overflow: auto;
height: 100%;
min-width: 0;
}
.internals-pane {
display: flex;
flex-direction: column;
gap: 8px;
height: 100%;
overflow: auto;
min-width: 0;
}
.internals-pane > :first-child { flex-shrink: 0; }
.internals-pane > .stream-panel { flex: 1; min-height: 0; }
.run-summary {
padding: 4px 12px;
font-family: var(--font-mono);
font-size: 11px;
color: var(--text-dim);
flex-shrink: 0;
}
.flight-select {
background: var(--surface-2);
color: var(--text-primary);
@@ -152,15 +212,12 @@ onMounted(loadFlights)
font-family: var(--font-mono);
font-size: 12px;
cursor: pointer;
transition: background 0.15s;
}
.run-btn:hover { background: var(--accent-dim); }
.run-btn:disabled { opacity: 0.5; cursor: not-allowed; }
.result-area {
padding: 16px;
}
.result-area { padding: 16px; }
.loading, .empty {
padding: 32px;
@@ -170,7 +227,58 @@ onMounted(loadFlights)
font-size: 13px;
}
.loading {
color: var(--accent);
.loading { color: var(--accent); }
.graph-container { padding: 12px; }
.graph-nodes {
display: flex;
flex-direction: column;
gap: 6px;
position: relative;
padding-left: 12px;
}
.graph-edge-line {
position: absolute;
left: 17px;
top: 10px;
bottom: 10px;
width: 2px;
background: var(--surface-3);
}
.graph-node {
display: flex;
align-items: center;
gap: 10px;
padding: 6px 10px;
background: var(--surface-2);
border: var(--panel-border);
position: relative;
z-index: 1;
}
.node-dot {
width: 7px;
height: 7px;
border-radius: 50%;
background: var(--status-idle);
flex-shrink: 0;
}
.graph-node.processing .node-dot {
background: var(--status-processing);
box-shadow: 0 0 8px var(--status-processing);
}
.graph-node.done .node-dot {
background: var(--status-live);
box-shadow: 0 0 8px var(--status-live);
}
.node-label {
font-family: var(--font-mono);
font-size: 12px;
}
</style>

View File

@@ -0,0 +1,257 @@
<script setup lang="ts">
import { ref, onMounted } from 'vue'
import { Panel } from 'soleprint-ui'
const config = ref<any>(null)
const selectedProvider = ref('')
const apiKey = ref('')
const model = ref('')
const baseUrl = ref('')
const saving = ref(false)
async function loadConfig() {
const res = await fetch('/config/llm')
config.value = await res.json()
selectedProvider.value = config.value.provider
const p = config.value.providers[selectedProvider.value]
model.value = p?.model || ''
baseUrl.value = p?.base_url || ''
apiKey.value = ''
}
function onProviderChange() {
const p = config.value?.providers[selectedProvider.value]
model.value = p?.model || ''
baseUrl.value = p?.base_url || ''
apiKey.value = ''
}
async function save() {
saving.value = true
const body: any = { provider: selectedProvider.value }
if (apiKey.value) body.api_key = apiKey.value
if (model.value) body.model = model.value
if (baseUrl.value) body.base_url = baseUrl.value
const res = await fetch('/config/llm', {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(body),
})
config.value = await res.json()
saving.value = false
}
const providerLabels: Record<string, string> = {
groq: 'Groq (Llama 3.3 70B)',
anthropic: 'Anthropic (Claude)',
bedrock: 'AWS Bedrock (Claude)',
openai: 'OpenAI-compatible',
template: 'Template (no LLM)',
}
onMounted(loadConfig)
</script>
<template>
<div class="settings-page">
<Panel title="LLM Provider" status="idle">
<div v-if="config" class="config-form">
<div class="field">
<label>Provider</label>
<div class="provider-options">
<button
v-for="(label, key) in providerLabels"
:key="key"
:class="['provider-btn', { active: selectedProvider === key, configured: config.providers[key]?.configured }]"
@click="selectedProvider = key; onProviderChange()"
>
<span class="provider-name">{{ label }}</span>
<span v-if="config.providers[key]?.configured" class="configured-dot"></span>
</button>
</div>
</div>
<div v-if="selectedProvider !== 'template'" class="field">
<label>Model</label>
<input v-model="model" class="input" placeholder="model name" />
</div>
<div v-if="selectedProvider !== 'template' && selectedProvider !== 'bedrock'" class="field">
<label>API Key</label>
<input
v-model="apiKey"
type="password"
class="input"
:placeholder="config.providers[selectedProvider]?.configured ? '(configured — leave blank to keep)' : 'enter API key'"
/>
</div>
<div v-if="selectedProvider === 'openai'" class="field">
<label>Base URL</label>
<input v-model="baseUrl" class="input" placeholder="https://api.openai.com/v1" />
</div>
<div class="actions">
<button class="save-btn" @click="save" :disabled="saving">
{{ saving ? 'Saving...' : 'Apply' }}
</button>
<span v-if="config.provider === selectedProvider" class="active-label">active</span>
</div>
<div class="status-table">
<div class="status-header">Provider Status</div>
<div v-for="(info, key) in config.providers" :key="key" class="status-row">
<span :class="['status-name', { active: config.provider === key }]">{{ key }}</span>
<span :class="['status-badge', info.configured ? 'ok' : 'missing']">
{{ info.configured ? 'configured' : 'no key' }}
</span>
<span class="status-model">{{ info.model }}</span>
</div>
</div>
</div>
</Panel>
</div>
</template>
<style scoped>
.settings-page {
max-width: 700px;
}
.config-form {
padding: 16px;
display: flex;
flex-direction: column;
gap: 16px;
}
.field {
display: flex;
flex-direction: column;
gap: 6px;
}
.field label {
font-family: var(--font-mono);
font-size: 11px;
color: var(--text-dim);
text-transform: uppercase;
letter-spacing: 1px;
}
.provider-options {
display: flex;
flex-direction: column;
gap: 4px;
}
.provider-btn {
display: flex;
align-items: center;
gap: 8px;
padding: 8px 12px;
background: var(--surface-2);
border: var(--panel-border);
color: var(--text-secondary);
font-family: var(--font-mono);
font-size: 12px;
cursor: pointer;
text-align: left;
}
.provider-btn:hover { background: var(--surface-3); }
.provider-btn.active { border-color: var(--accent); color: var(--text-primary); background: var(--accent-dim); }
.configured-dot {
width: 6px;
height: 6px;
border-radius: 50%;
background: var(--status-live);
margin-left: auto;
}
.input {
background: var(--surface-0);
color: var(--text-primary);
border: var(--panel-border);
padding: 6px 10px;
font-family: var(--font-mono);
font-size: 13px;
}
.input:focus { outline: 1px solid var(--accent); }
.actions {
display: flex;
align-items: center;
gap: 12px;
}
.save-btn {
background: var(--accent);
color: white;
border: none;
padding: 6px 24px;
font-family: var(--font-mono);
font-size: 12px;
cursor: pointer;
}
.save-btn:hover { background: var(--accent-dim); }
.save-btn:disabled { opacity: 0.5; }
.active-label {
font-family: var(--font-mono);
font-size: 11px;
color: var(--status-live);
}
.status-table {
margin-top: 8px;
border-top: var(--panel-border);
padding-top: 12px;
}
.status-header {
font-family: var(--font-mono);
font-size: 11px;
color: var(--text-dim);
text-transform: uppercase;
letter-spacing: 1px;
margin-bottom: 8px;
}
.status-row {
display: flex;
align-items: center;
gap: 12px;
padding: 4px 0;
font-size: 12px;
}
.status-name {
font-family: var(--font-mono);
width: 80px;
color: var(--text-dim);
}
.status-name.active { color: var(--accent); font-weight: 600; }
.status-badge {
font-family: var(--font-mono);
font-size: 10px;
padding: 1px 6px;
width: 80px;
text-align: center;
}
.status-badge.ok { color: var(--status-live); border: 1px solid var(--status-live); }
.status-badge.missing { color: var(--text-dim); border: 1px solid var(--surface-3); }
.status-model {
font-family: var(--font-mono);
font-size: 11px;
color: var(--text-dim);
}
</style>

33
uv.lock generated
View File

@@ -1091,6 +1091,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/58/78/548fb8e07b1a341746bfbecb32f2c268470f45fa028aacdbd10d9bc73aab/numpy-2.4.4-cp314-cp314t-win_arm64.whl", hash = "sha256:ba203255017337d39f89bdd58417f03c4426f12beed0440cfd933cb15f8669c7", size = 10566643, upload-time = "2026-03-29T13:21:34.339Z" },
]
[[package]]
name = "openai"
version = "2.31.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/94/fe/64b3d035780b3188f86c4f6f1bc202e7bb74757ef028802112273b9dcacf/openai-2.31.0.tar.gz", hash = "sha256:43ca59a88fc973ad1848d86b98d7fac207e265ebbd1828b5e4bdfc85f79427a5", size = 684772, upload-time = "2026-04-08T21:01:41.797Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/66/bc/a8f7c3aa03452fedbb9af8be83e959adba96a6b4a35e416faffcc959c568/openai-2.31.0-py3-none-any.whl", hash = "sha256:44e1344d87e56a493d649b17e2fac519d1368cbb0745f59f1957c4c26de50a0a", size = 1153479, upload-time = "2026-04-08T21:01:39.217Z" },
]
[[package]]
name = "openapi-pydantic"
version = "0.5.1"
@@ -1895,6 +1914,7 @@ dependencies = [
{ name = "langfuse" },
{ name = "langgraph" },
{ name = "mcp", extra = ["cli"] },
{ name = "openai" },
{ name = "pydantic" },
{ name = "uvicorn", extra = ["standard"] },
{ name = "websockets" },
@@ -1921,6 +1941,7 @@ requires-dist = [
{ name = "langfuse" },
{ name = "langgraph" },
{ name = "mcp", extras = ["cli"] },
{ name = "openai" },
{ name = "pydantic", specifier = ">=2.0" },
{ name = "pytest", marker = "extra == 'dev'" },
{ name = "pytest-asyncio", marker = "extra == 'dev'" },
@@ -1939,6 +1960,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d7/c1/eb8f9debc45d3b7918a32ab756658a0904732f75e555402972246b0b8e71/tenacity-9.1.4-py3-none-any.whl", hash = "sha256:6095a360c919085f28c6527de529e76a06ad89b23659fa881ae0649b867a9d55", size = 28926, upload-time = "2026-02-07T10:45:32.24Z" },
]
[[package]]
name = "tqdm"
version = "4.67.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" },
]
[[package]]
name = "typer"
version = "0.24.1"