diff --git a/.env.example b/.env.example index 8969b34..b07e451 100644 --- a/.env.example +++ b/.env.example @@ -1,11 +1,24 @@ -# AWS Bedrock (production pattern) +# LLM Provider (groq | anthropic | bedrock | openai) +LLM_PROVIDER=groq + +# Groq (default — fast, free tier) +GROQ_API_KEY=gsk_... +GROQ_MODEL=llama-3.3-70b-versatile + +# Anthropic (direct SDK) +ANTHROPIC_API_KEY=sk-ant-... +ANTHROPIC_MODEL=claude-sonnet-4-20250514 + +# AWS Bedrock AWS_ACCESS_KEY_ID=AKIA... AWS_SECRET_ACCESS_KEY=... AWS_DEFAULT_REGION=us-east-1 -USE_BEDROCK=false +BEDROCK_MODEL_ID=anthropic.claude-sonnet-4-20250514-v1:0 -# Anthropic (fallback for local dev) -ANTHROPIC_API_KEY=sk-ant-... +# OpenAI-compatible (any endpoint) +OPENAI_API_KEY=sk-... +OPENAI_BASE_URL=https://api.openai.com/v1 +OPENAI_MODEL=gpt-4o # Kong Konnect (optional) KONG_PROXY_URL= diff --git a/agents/fce.py b/agents/fce.py index 24a1ea8..da33ad5 100644 --- a/agents/fce.py +++ b/agents/fce.py @@ -148,10 +148,25 @@ async def run_fce( } t0 = time.time() - notification_text = await mcp.call_tool("passenger", "generate_notification", {"context": context}) + raw_result = await mcp.call_tool("passenger", "generate_notification", {"context": context}) latency = int((time.time() - t0) * 1000) await emit("tool_call_end", tool="generate_notification", latency_ms=latency, is_live=False) + # Parse structured response (text + provider) + llm_provider = "template" + if isinstance(raw_result, dict) and "text" in raw_result: + notification_text = raw_result["text"] + llm_provider = raw_result.get("provider", "unknown") + elif isinstance(raw_result, str): + try: + parsed = json.loads(raw_result) + notification_text = parsed.get("text", raw_result) + llm_provider = parsed.get("provider", "unknown") + except (json.JSONDecodeError, TypeError): + notification_text = raw_result + else: + notification_text = str(raw_result) + await emit("node_exit", node="synthesize") # ── Node 4: Format Output ── @@ -164,14 +179,15 @@ async def run_fce( if airport_status: data_sources.append("faa_status_live") if crew_notes: - data_sources.append("get_crew_notes") + data_sources.append("crew_notes") notification = { "flight_id": flight_id, "type": "DELAY_NOTIFICATION" if status == "DELAYED" else f"{status}_NOTIFICATION", "status": status, "delay_minutes": delay_minutes, - "notification_text": notification_text if isinstance(notification_text, str) else str(notification_text), + "notification_text": notification_text, + "llm_provider": llm_provider, "generated_at": datetime.now(timezone.utc).isoformat(), "data_sources": data_sources, "human_approved": True, # auto-approve in demo diff --git a/agents/handover.py b/agents/handover.py index 45db417..14897bf 100644 --- a/agents/handover.py +++ b/agents/handover.py @@ -242,7 +242,7 @@ async def run_handover( hub_label = ", ".join(target_hubs) if len(target_hubs) < 5 else "ALL HUBS" t0 = time.time() - brief_text = await mcp.call_tool("ops", "generate_narrative", { + raw_result = await mcp.call_tool("ops", "generate_narrative", { "context": { "hub": hub_label, "shift_time": shift_time, @@ -254,6 +254,22 @@ async def run_handover( latency = int((time.time() - t0) * 1000) await emit("tool_call_end", tool="generate_narrative", latency_ms=latency, is_live=False) + # Parse structured response + import json as _json + llm_provider = "template" + if isinstance(raw_result, dict) and "text" in raw_result: + brief_text = raw_result["text"] + llm_provider = raw_result.get("provider", "unknown") + elif isinstance(raw_result, str): + try: + parsed = _json.loads(raw_result) + brief_text = parsed.get("text", raw_result) + llm_provider = parsed.get("provider", "unknown") + except (_json.JSONDecodeError, TypeError): + brief_text = raw_result + else: + brief_text = str(raw_result) + await emit("node_exit", node="synthesize") # ── Node 4: Format Output ── @@ -263,7 +279,8 @@ async def run_handover( result = { "type": "HANDOVER_BRIEF", "hubs": target_hubs, - "brief_text": brief_text if isinstance(brief_text, str) else str(brief_text), + "brief_text": brief_text, + "llm_provider": llm_provider, "summary": { "immediate_count": len(immediate), "monitor_count": len(monitor), diff --git a/agents/shared/mcp_client.py b/agents/shared/mcp_client.py index a32af52..661400b 100644 --- a/agents/shared/mcp_client.py +++ b/agents/shared/mcp_client.py @@ -11,20 +11,38 @@ from typing import Any from fastmcp import Client -# Server configurations for stdio transport -SERVERS = { - "shared": { +def _env() -> dict: + """Forward LLM-related env vars to MCP server subprocesses.""" + import os + + env = {} + for key in ( + "LLM_PROVIDER", "GROQ_API_KEY", "GROQ_MODEL", + "ANTHROPIC_API_KEY", "ANTHROPIC_MODEL", + "OPENAI_API_KEY", "OPENAI_BASE_URL", "OPENAI_MODEL", + "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_DEFAULT_REGION", + "BEDROCK_MODEL_ID", "USE_BEDROCK", + "PATH", + ): + val = os.getenv(key) + if val: + env[key] = val + return env + + +def _server_config(module: str) -> dict: + """Build server config with current env vars (called at connect time, not import time).""" + return { "command": "uv", - "args": ["run", "python", "-m", "mcp_servers.shared"], - }, - "ops": { - "command": "uv", - "args": ["run", "python", "-m", "mcp_servers.ops"], - }, - "passenger": { - "command": "uv", - "args": ["run", "python", "-m", "mcp_servers.passenger"], - }, + "args": ["run", "python", "-m", module], + "env": _env(), + } + + +SERVER_MODULES = { + "shared": "mcp_servers.shared", + "ops": "mcp_servers.ops", + "passenger": "mcp_servers.passenger", } # Agent profiles — which servers each agent connects to @@ -43,9 +61,9 @@ class MCPMultiClient: async def connect(self, server_names: list[str]) -> None: """Connect to the specified MCP servers.""" for name in server_names: - if name not in SERVERS: - raise ValueError(f"Unknown server: {name}. Available: {list(SERVERS.keys())}") - config = {"mcpServers": {"default": SERVERS[name]}} + if name not in SERVER_MODULES: + raise ValueError(f"Unknown server: {name}. Available: {list(SERVER_MODULES.keys())}") + config = {"mcpServers": {"default": _server_config(SERVER_MODULES[name])}} client = Client(config) await client.__aenter__() self._clients[name] = client diff --git a/api/main.py b/api/main.py index 9b2c388..a221307 100644 --- a/api/main.py +++ b/api/main.py @@ -165,6 +165,78 @@ async def set_active_scenario(req: ScenarioUpdate): return {"error": str(e)} +# ── LLM config routes ── + +@app.get("/config/llm") +async def get_llm_config(): + """Current LLM provider configuration.""" + import os + provider = os.getenv("LLM_PROVIDER", "groq") + return { + "provider": provider, + "providers": { + "groq": { + "configured": bool(os.getenv("GROQ_API_KEY")), + "model": os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile"), + }, + "anthropic": { + "configured": bool(os.getenv("ANTHROPIC_API_KEY")), + "model": os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514"), + }, + "bedrock": { + "configured": bool(os.getenv("AWS_ACCESS_KEY_ID")), + "model": os.getenv("BEDROCK_MODEL_ID", "anthropic.claude-sonnet-4-20250514-v1:0"), + }, + "openai": { + "configured": bool(os.getenv("OPENAI_API_KEY")), + "model": os.getenv("OPENAI_MODEL", "gpt-4o"), + "base_url": os.getenv("OPENAI_BASE_URL", ""), + }, + "template": { + "configured": True, + "model": "none (structured fallback)", + }, + }, + } + + +class LLMConfigUpdate(BaseModel): + provider: str + api_key: str | None = None + model: str | None = None + base_url: str | None = None + + +@app.put("/config/llm") +async def set_llm_config(req: LLMConfigUpdate): + """Switch LLM provider at runtime. Sets env vars for MCP subprocesses.""" + import os + + os.environ["LLM_PROVIDER"] = req.provider + + if req.provider == "groq" and req.api_key: + os.environ["GROQ_API_KEY"] = req.api_key + if req.model: + os.environ["GROQ_MODEL"] = req.model + elif req.provider == "anthropic" and req.api_key: + os.environ["ANTHROPIC_API_KEY"] = req.api_key + if req.model: + os.environ["ANTHROPIC_MODEL"] = req.model + elif req.provider == "openai" and req.api_key: + os.environ["OPENAI_API_KEY"] = req.api_key + if req.model: + os.environ["OPENAI_MODEL"] = req.model + if req.base_url: + os.environ["OPENAI_BASE_URL"] = req.base_url + elif req.provider == "bedrock": + if req.model: + os.environ["BEDROCK_MODEL_ID"] = req.model + + # No need to update server configs — _server_config() reads env at connect time + + return await get_llm_config() + + # ── Scenario data routes ── @app.get("/scenarios/data/flights") diff --git a/ctrl/k8s/base/configmap.yaml b/ctrl/k8s/base/configmap.yaml index 0f2d2d1..236f720 100644 --- a/ctrl/k8s/base/configmap.yaml +++ b/ctrl/k8s/base/configmap.yaml @@ -5,5 +5,7 @@ metadata: namespace: unt data: DEFAULT_SCENARIO: "weather_disruption_ord" - USE_BEDROCK: "false" + LLM_PROVIDER: "groq" + GROQ_API_KEY: "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" + GROQ_MODEL: "llama-3.3-70b-versatile" LANGFUSE_HOST: "http://langfuse:3000" diff --git a/ctrl/nginx.conf b/ctrl/nginx.conf index b9e8778..466104a 100644 --- a/ctrl/nginx.conf +++ b/ctrl/nginx.conf @@ -18,6 +18,10 @@ server { proxy_pass http://api:8000; } + location /config { + proxy_pass http://api:8000; + } + location /ws/ { proxy_pass http://api:8000; proxy_http_version 1.1; diff --git a/mcp_servers/ops/server.py b/mcp_servers/ops/server.py index 1c9bac0..db65f83 100644 --- a/mcp_servers/ops/server.py +++ b/mcp_servers/ops/server.py @@ -100,17 +100,36 @@ def get_pending_rebookings(hub: str, limit: int = 20) -> list[dict]: @mcp.tool() -def generate_narrative(context: dict) -> str: +async def generate_narrative(context: dict) -> str: """Synthesizes aggregated operational context into a structured handover brief for ops managers. - Uses Claude Sonnet via AWS Bedrock Converse API. + Uses Claude via Anthropic SDK (or Bedrock when USE_BEDROCK=true). Output: prioritized, concise, structured by IMMEDIATE / MONITOR / FYI. - - NOTE: In v1, this returns a structured template from the context data. - LLM integration will be added when Bedrock is wired up. + Falls back to template if no API key is configured. """ - # V1: structured template — will be replaced with Bedrock call + try: + from mcp_servers.shared_llm import generate, _get_provider + + hub = context.get("hub", "ALL") + shift_time = context.get("shift_time", datetime.now(timezone.utc).strftime("%H:%M UTC")) + + system_prompt = ( + f"You are an airline operations shift handover briefing system. " + f"Generate a concise handover brief for {hub} at {shift_time}. " + f"Structure as: HEADER, then IMMEDIATE ACTION (items needing action within 2h), " + f"MONITOR (items that could escalate), FYI (resolved or low-risk). " + f"Be concise — ops managers scan, they don't read paragraphs. " + f"Use the data provided. Do not invent details." + ) + text = await generate(system_prompt, json.dumps(context, indent=2)) + return json.dumps({"text": text, "provider": _get_provider()}) + except Exception: + return json.dumps({"text": _template_narrative(context), "provider": "template"}) + + +def _template_narrative(context: dict) -> str: + """Structured template fallback when LLM is unavailable.""" sections = [] immediate = context.get("immediate", []) monitor = context.get("monitor", []) @@ -119,8 +138,7 @@ def generate_narrative(context: dict) -> str: hub = context.get("hub", "ALL") shift_time = context.get("shift_time", datetime.now(timezone.utc).strftime("%H:%M UTC")) - header = f"SHIFT HANDOVER BRIEF — {hub} / {shift_time}" - sections.append(header) + sections.append(f"SHIFT HANDOVER BRIEF — {hub} / {shift_time}") sections.append(f"Generated: {datetime.now(timezone.utc).strftime('%H:%M UTC')}") sections.append("") diff --git a/mcp_servers/passenger/server.py b/mcp_servers/passenger/server.py index 62b98ca..908d44b 100644 --- a/mcp_servers/passenger/server.py +++ b/mcp_servers/passenger/server.py @@ -5,7 +5,6 @@ notification prompt template (multi-tone). """ import json -from datetime import datetime, timezone from fastmcp import FastMCP @@ -25,17 +24,33 @@ mcp = FastMCP( @mcp.tool() -def generate_notification(context: dict) -> str: +async def generate_notification(context: dict) -> str: """Synthesizes flight disruption context into an empathetic, actionable passenger notification. - Uses Claude Sonnet via AWS Bedrock Converse API. + Uses Claude via Anthropic SDK (or Bedrock when USE_BEDROCK=true). Output: clear, human, no jargon, includes gate/time/status. - - NOTE: In v1, this returns a structured template from the context data. - LLM integration will be added when Bedrock is wired up. + Falls back to template if no API key is configured. """ - # V1: structured template — will be replaced with Bedrock call + try: + from mcp_servers.shared_llm import generate, _get_provider + + system_prompt = ( + "You are a passenger notification system for Stellar Air. " + "Write a clear, empathetic notification about this flight disruption. " + "Explain WHY the delay or cancellation happened using the operational data provided. " + "Tell the passenger what's happening next: new boarding time, gate, status. " + "Be human and reassuring. No aviation jargon. No speculation. " + "If data is missing for a section, omit it — don't make things up." + ) + text = await generate(system_prompt, json.dumps(context, indent=2)) + return json.dumps({"text": text, "provider": _get_provider()}) + except Exception: + return json.dumps({"text": _template_notification(context), "provider": "template"}) + + +def _template_notification(context: dict) -> str: + """Structured template fallback when LLM is unavailable.""" flight_id = context.get("flight_id", "") origin = context.get("origin", "") destination = context.get("destination", "") diff --git a/mcp_servers/shared_llm.py b/mcp_servers/shared_llm.py new file mode 100644 index 0000000..dbc13a3 --- /dev/null +++ b/mcp_servers/shared_llm.py @@ -0,0 +1,108 @@ +"""Shared LLM client for MCP server narrative tools. + +Multi-provider support — selected via LLM_PROVIDER env var: + + groq (default) — Groq API, OpenAI-compatible. Fast, free tier. + Needs GROQ_API_KEY. + + anthropic — Direct Anthropic SDK. Needs ANTHROPIC_API_KEY. + + bedrock — AWS Bedrock Converse API. Needs AWS credentials. + + openai — Any OpenAI-compatible endpoint. + Set OPENAI_API_KEY and optionally OPENAI_BASE_URL. + +Usage: + LLM_PROVIDER=groq GROQ_API_KEY=gsk_... python -m mcp_servers.shared +""" + +import os +from typing import Literal + +Provider = Literal["groq", "anthropic", "bedrock", "openai"] + + +def _get_provider() -> Provider: + p = os.getenv("LLM_PROVIDER", "groq").lower() + if p in ("groq", "anthropic", "bedrock", "openai"): + return p + return "groq" + + +async def generate(system_prompt: str, user_content: str, max_tokens: int = 1024) -> str: + """Call an LLM and return the text response.""" + provider = _get_provider() + + if provider == "anthropic": + return await _generate_anthropic(system_prompt, user_content, max_tokens) + elif provider == "bedrock": + return await _generate_bedrock(system_prompt, user_content, max_tokens) + else: + # groq, openai, or any OpenAI-compatible provider + return await _generate_openai_compat(system_prompt, user_content, max_tokens) + + +async def _generate_openai_compat( + system_prompt: str, user_content: str, max_tokens: int +) -> str: + """OpenAI-compatible API (Groq, OpenAI, local, etc).""" + import openai + + provider = _get_provider() + + if provider == "groq": + api_key = os.getenv("GROQ_API_KEY") + base_url = "https://api.groq.com/openai/v1" + model = os.getenv("GROQ_MODEL", "llama-3.3-70b-versatile") + else: + api_key = os.getenv("OPENAI_API_KEY") + base_url = os.getenv("OPENAI_BASE_URL", "https://api.openai.com/v1") + model = os.getenv("OPENAI_MODEL", "gpt-4o") + + client = openai.AsyncOpenAI(api_key=api_key, base_url=base_url) + response = await client.chat.completions.create( + model=model, + max_tokens=max_tokens, + temperature=0.7, + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_content}, + ], + ) + return response.choices[0].message.content + + +async def _generate_anthropic( + system_prompt: str, user_content: str, max_tokens: int +) -> str: + import anthropic + + client = anthropic.AsyncAnthropic() + response = await client.messages.create( + model=os.getenv("ANTHROPIC_MODEL", "claude-sonnet-4-20250514"), + max_tokens=max_tokens, + temperature=0.7, + system=system_prompt, + messages=[{"role": "user", "content": user_content}], + ) + return response.content[0].text + + +async def _generate_bedrock( + system_prompt: str, user_content: str, max_tokens: int +) -> str: + import json + + import boto3 + + bedrock = boto3.client( + "bedrock-runtime", + region_name=os.getenv("AWS_DEFAULT_REGION", "us-east-1"), + ) + response = bedrock.converse( + modelId=os.getenv("BEDROCK_MODEL_ID", "anthropic.claude-sonnet-4-20250514-v1:0"), + system=[{"text": system_prompt}], + messages=[{"role": "user", "content": [{"text": user_content}]}], + inferenceConfig={"maxTokens": max_tokens, "temperature": 0.7}, + ) + return response["output"]["message"]["content"][0]["text"] diff --git a/pyproject.toml b/pyproject.toml index d9f365d..28bd298 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,6 +10,7 @@ dependencies = [ "langchain-anthropic", "boto3", "anthropic", + "openai", "fastapi", "uvicorn[standard]", "pydantic>=2.0", diff --git a/tests/endpoints.py b/tests/endpoints.py index 4353926..fa6f5b8 100644 --- a/tests/endpoints.py +++ b/tests/endpoints.py @@ -25,6 +25,9 @@ class Endpoints: def crew_notes(flight_id: str) -> str: return f"/scenarios/data/crew-notes/{flight_id}" + # Config + CONFIG_LLM = "/config/llm" + # Agents AGENT_FCE = "/agents/fce" AGENT_HANDOVER = "/agents/handover" diff --git a/tests/test_mcp_servers.py b/tests/test_mcp_servers.py index 2ba80bf..59a5c0b 100644 --- a/tests/test_mcp_servers.py +++ b/tests/test_mcp_servers.py @@ -194,9 +194,15 @@ class TestPassengerServer: "gate": "H14", } }) - text = _parse_result(result) + data = _parse_result(result) + # Response is JSON with text + provider + if isinstance(data, dict): + assert "text" in data + assert "provider" in data + text = data["text"] + else: + text = str(data) assert "UA432" in text - assert "DELAYED" in text or "delayed" in text @pytest.mark.asyncio async def test_list_prompts(self): diff --git a/ui/app/src/App.vue b/ui/app/src/App.vue index 9c6a823..577aab9 100644 --- a/ui/app/src/App.vue +++ b/ui/app/src/App.vue @@ -1,5 +1,5 @@ diff --git a/ui/app/src/components/NotificationCard.vue b/ui/app/src/components/NotificationCard.vue index 0ec9ce2..02e145f 100644 --- a/ui/app/src/components/NotificationCard.vue +++ b/ui/app/src/components/NotificationCard.vue @@ -9,6 +9,7 @@ defineProps<{ generated_at: string human_approved: boolean duration_ms: number + llm_provider?: string } }>() @@ -38,6 +39,9 @@ const causeColor: Record = { + + {{ data.llm_provider }} + {{ data.duration_ms }}ms approved @@ -117,6 +121,24 @@ const causeColor: Record = { font-family: var(--font-mono); } +.provider-tag { + padding: 1px 6px; + font-family: var(--font-mono); + font-size: 10px; + background: var(--surface-2); + border: 1px solid var(--surface-3); + margin-right: 8px; +} + +.provider-tag.live { + border-color: var(--status-live); + color: var(--status-live); +} + +.provider-tag.mock { + color: var(--text-dim); +} + .approved { color: var(--status-live); margin-left: 8px; diff --git a/ui/app/src/composables/useAgentEvents.ts b/ui/app/src/composables/useAgentEvents.ts new file mode 100644 index 0000000..c2f98ba --- /dev/null +++ b/ui/app/src/composables/useAgentEvents.ts @@ -0,0 +1,112 @@ +import { ref, onUnmounted } from 'vue' + +export interface GraphNode { + id: string + status: string +} + +export interface AgentRun { + agent: string + run_id: string +} + +export interface LogEntry { + level: string + stage: string + msg: string + ts: string +} + +export function useAgentEvents() { + const agentStatus = ref<'idle' | 'live' | 'processing' | 'error'>('idle') + const entries = ref([]) + const graphNodes = ref([]) + const currentRun = ref(null) + + let ws: WebSocket | null = null + + function connect() { + if (ws) return + const protocol = location.protocol === 'https:' ? 'wss:' : 'ws:' + ws = new WebSocket(`${protocol}//${location.host}/ws/agent-events`) + + ws.onopen = () => { agentStatus.value = 'live' } + + ws.onmessage = (e) => { + const event = JSON.parse(e.data) + handleEvent(event) + } + + ws.onclose = () => { + agentStatus.value = 'idle' + ws = null + setTimeout(connect, 3000) + } + + ws.onerror = () => { agentStatus.value = 'error' } + } + + function disconnect() { + ws?.close() + ws = null + } + + function handleEvent(event: any) { + const ts = event.timestamp || new Date().toISOString() + const time = ts.split('T')[1]?.split('.')[0] || ts + + switch (event.type) { + case 'agent_start': + currentRun.value = { agent: event.agent, run_id: event.run_id } + agentStatus.value = 'processing' + graphNodes.value = [] + entries.value = [{ + level: 'info', stage: 'system', + msg: `Agent ${event.agent} started (${event.run_id})`, ts: time, + }] + break + + case 'node_enter': + graphNodes.value.push({ id: event.node, status: 'processing' }) + entries.value.push({ + level: 'info', stage: event.node, + msg: `→ entering ${event.node}`, ts: time, + }) + break + + case 'node_exit': { + const node = graphNodes.value.find(n => n.id === event.node) + if (node) node.status = 'done' + break + } + + case 'tool_call_end': { + const liveTag = event.is_live ? ' (live)' : ' (mock)' + entries.value.push({ + level: 'info', stage: '', + msg: `${event.tool} — ${event.latency_ms}ms ✓${liveTag}`, ts: time, + }) + break + } + + case 'tool_call_error': + entries.value.push({ + level: 'error', stage: '', + msg: `${event.tool} — FAILED: ${event.error}`, ts: time, + }) + break + + case 'agent_end': + agentStatus.value = 'live' + entries.value.push({ + level: 'info', stage: 'system', + msg: `Agent complete: ${event.output_summary}`, ts: time, + }) + break + } + } + + onUnmounted(disconnect) + + return { agentStatus, entries, graphNodes, currentRun, connect, disconnect } +} diff --git a/ui/app/src/main.ts b/ui/app/src/main.ts index 2d43aea..d20edcb 100644 --- a/ui/app/src/main.ts +++ b/ui/app/src/main.ts @@ -4,15 +4,15 @@ import 'soleprint-ui/src/tokens.css' import './styles/mars-tokens.css' import App from './App.vue' import OpsNotifications from './pages/OpsNotifications.vue' -import AgentInternals from './pages/AgentInternals.vue' import ScenarioData from './pages/ScenarioData.vue' +import Settings from './pages/Settings.vue' const router = createRouter({ history: createWebHistory(), routes: [ { path: '/', component: OpsNotifications }, - { path: '/internals', component: AgentInternals }, { path: '/data', component: ScenarioData }, + { path: '/settings', component: Settings }, ], }) diff --git a/ui/app/src/pages/AgentInternals.vue b/ui/app/src/pages/AgentInternals.vue index 9124569..f4af351 100644 --- a/ui/app/src/pages/AgentInternals.vue +++ b/ui/app/src/pages/AgentInternals.vue @@ -1,103 +1,11 @@