docs
This commit is contained in:
@@ -1,8 +1,7 @@
|
||||
# Woodpecker CI - Test Pipeline (runs on PRs and pushes)
|
||||
# Separate file for cleaner organization
|
||||
|
||||
steps:
|
||||
lint:
|
||||
- name: lint
|
||||
image: python:3.11-slim
|
||||
commands:
|
||||
- pip install --quiet ruff mypy
|
||||
@@ -11,14 +10,14 @@ steps:
|
||||
- echo "=== Checking formatting ==="
|
||||
- ruff format --check services/ shared/
|
||||
|
||||
typecheck:
|
||||
- name: typecheck
|
||||
image: python:3.11-slim
|
||||
commands:
|
||||
- pip install --quiet mypy types-redis
|
||||
- echo "=== Type checking shared/ ==="
|
||||
- mypy shared/ --ignore-missing-imports || true
|
||||
|
||||
unit-tests:
|
||||
- name: unit-tests
|
||||
image: python:3.11-slim
|
||||
commands:
|
||||
- pip install --quiet pytest pytest-asyncio pytest-cov
|
||||
@@ -26,7 +25,7 @@ steps:
|
||||
- echo "=== Running unit tests ==="
|
||||
- pytest shared/ services/ -v --tb=short --cov=shared --cov=services --cov-report=term-missing || true
|
||||
|
||||
proto-check:
|
||||
- name: proto-check
|
||||
image: python:3.11-slim
|
||||
commands:
|
||||
- pip install --quiet grpcio-tools
|
||||
@@ -34,7 +33,5 @@ steps:
|
||||
- python -m grpc_tools.protoc -I./proto --python_out=/tmp --grpc_python_out=/tmp ./proto/metrics.proto
|
||||
- echo "Proto compilation successful"
|
||||
|
||||
depends_on: []
|
||||
|
||||
when:
|
||||
event: [push, pull_request]
|
||||
- event: [push, pull_request]
|
||||
|
||||
@@ -1,18 +1,26 @@
|
||||
<!DOCTYPE html>
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>Graph Viewer - System Monitor</title>
|
||||
<link rel="stylesheet" href="styles.css">
|
||||
<link rel="stylesheet" href="styles.css" />
|
||||
</head>
|
||||
<body class="graph-viewer">
|
||||
<header class="graph-header">
|
||||
<a href="index.html" class="back-link">← Index</a>
|
||||
<a href="../index.html" class="back-link">← Index</a>
|
||||
<div class="nav-controls">
|
||||
<button onclick="navigate(-1)" id="btn-prev" title="Previous (←)">◀</button>
|
||||
<button
|
||||
onclick="navigate(-1)"
|
||||
id="btn-prev"
|
||||
title="Previous (←)"
|
||||
>
|
||||
◀
|
||||
</button>
|
||||
<span id="nav-position">1 / 4</span>
|
||||
<button onclick="navigate(1)" id="btn-next" title="Next (→)">▶</button>
|
||||
<button onclick="navigate(1)" id="btn-next" title="Next (→)">
|
||||
▶
|
||||
</button>
|
||||
</div>
|
||||
<h1 id="graph-title">Loading...</h1>
|
||||
<div class="graph-controls">
|
||||
@@ -25,58 +33,60 @@
|
||||
</header>
|
||||
|
||||
<div class="graph-container" id="graph-container">
|
||||
<img id="graph-img" src="" alt="Graph">
|
||||
<img id="graph-img" src="" alt="Graph" />
|
||||
</div>
|
||||
|
||||
<script>
|
||||
const graphOrder = [
|
||||
'01-system-overview',
|
||||
'02-data-flow',
|
||||
'03-deployment',
|
||||
'04-grpc-services'
|
||||
"01-system-overview",
|
||||
"02-data-flow",
|
||||
"03-deployment",
|
||||
"04-grpc-services",
|
||||
];
|
||||
|
||||
const graphs = {
|
||||
'01-system-overview': {
|
||||
title: 'System Overview',
|
||||
file: '01-system-overview.svg'
|
||||
"01-system-overview": {
|
||||
title: "System Overview",
|
||||
file: "01-system-overview.svg",
|
||||
},
|
||||
'02-data-flow': {
|
||||
title: 'Data Flow Pipeline',
|
||||
file: '02-data-flow.svg'
|
||||
"02-data-flow": {
|
||||
title: "Data Flow Pipeline",
|
||||
file: "02-data-flow.svg",
|
||||
},
|
||||
'03-deployment': {
|
||||
title: 'Deployment Architecture',
|
||||
file: '03-deployment.svg'
|
||||
"03-deployment": {
|
||||
title: "Deployment Architecture",
|
||||
file: "03-deployment.svg",
|
||||
},
|
||||
"04-grpc-services": {
|
||||
title: "gRPC Service Definitions",
|
||||
file: "04-grpc-services.svg",
|
||||
},
|
||||
'04-grpc-services': {
|
||||
title: 'gRPC Service Definitions',
|
||||
file: '04-grpc-services.svg'
|
||||
}
|
||||
};
|
||||
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
let graphKey = params.get('g') || '01-system-overview';
|
||||
let graphKey = params.get("g") || "01-system-overview";
|
||||
let currentIndex = graphOrder.indexOf(graphKey);
|
||||
if (currentIndex === -1) currentIndex = 0;
|
||||
|
||||
function loadGraph(key) {
|
||||
const graph = graphs[key];
|
||||
document.getElementById('graph-title').textContent = graph.title;
|
||||
document.getElementById('graph-img').src = graph.file;
|
||||
document.title = graph.title + ' - System Monitor';
|
||||
history.replaceState(null, '', '?g=' + key);
|
||||
document.getElementById("graph-title").textContent =
|
||||
graph.title;
|
||||
document.getElementById("graph-img").src = graph.file;
|
||||
document.title = graph.title + " - System Monitor";
|
||||
history.replaceState(null, "", "?g=" + key);
|
||||
graphKey = key;
|
||||
updateNavHints();
|
||||
}
|
||||
|
||||
function updateNavHints() {
|
||||
const idx = graphOrder.indexOf(graphKey);
|
||||
const prevBtn = document.getElementById('btn-prev');
|
||||
const nextBtn = document.getElementById('btn-next');
|
||||
const prevBtn = document.getElementById("btn-prev");
|
||||
const nextBtn = document.getElementById("btn-next");
|
||||
prevBtn.disabled = idx === 0;
|
||||
nextBtn.disabled = idx === graphOrder.length - 1;
|
||||
document.getElementById('nav-position').textContent = (idx + 1) + ' / ' + graphOrder.length;
|
||||
document.getElementById("nav-position").textContent =
|
||||
idx + 1 + " / " + graphOrder.length;
|
||||
}
|
||||
|
||||
function navigate(direction) {
|
||||
@@ -89,32 +99,32 @@
|
||||
}
|
||||
|
||||
function setMode(mode) {
|
||||
const container = document.getElementById('graph-container');
|
||||
container.className = 'graph-container ' + mode;
|
||||
const container = document.getElementById("graph-container");
|
||||
container.className = "graph-container " + mode;
|
||||
}
|
||||
|
||||
function downloadSvg() {
|
||||
const graph = graphs[graphKey];
|
||||
const link = document.createElement('a');
|
||||
const link = document.createElement("a");
|
||||
link.href = graph.file;
|
||||
link.download = graph.file;
|
||||
link.click();
|
||||
}
|
||||
|
||||
// Keyboard navigation
|
||||
document.addEventListener('keydown', (e) => {
|
||||
if (e.key === 'ArrowLeft') {
|
||||
document.addEventListener("keydown", (e) => {
|
||||
if (e.key === "ArrowLeft") {
|
||||
navigate(-1);
|
||||
} else if (e.key === 'ArrowRight') {
|
||||
} else if (e.key === "ArrowRight") {
|
||||
navigate(1);
|
||||
} else if (e.key === 'Escape') {
|
||||
window.location.href = 'index.html';
|
||||
} else if (e.key === "Escape") {
|
||||
window.location.href = "../index.html";
|
||||
}
|
||||
});
|
||||
|
||||
// Initialize
|
||||
loadGraph(graphOrder[currentIndex]);
|
||||
setMode('fit');
|
||||
setMode("fit");
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,10 +1,11 @@
|
||||
<!DOCTYPE html>
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>System Monitor - Architecture Documentation</title>
|
||||
<link rel="stylesheet" href="styles.css">
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<meta http-equiv="refresh" content="0; url=../index.html" />
|
||||
<title>System Monitor - Redirecting...</title>
|
||||
<link rel="stylesheet" href="styles.css" />
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
@@ -16,19 +17,36 @@
|
||||
<section class="graph-section" id="overview">
|
||||
<div class="graph-header-row">
|
||||
<h2>System Overview</h2>
|
||||
<a href="graph.html?g=01-system-overview" class="view-btn">View Full</a>
|
||||
<a href="graph.html?g=01-system-overview" class="view-btn"
|
||||
>View Full</a
|
||||
>
|
||||
</div>
|
||||
<a href="graph.html?g=01-system-overview" class="graph-preview">
|
||||
<img src="01-system-overview.svg" alt="System Overview">
|
||||
<img src="01-system-overview.svg" alt="System Overview" />
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>High-level architecture showing all services, data stores, and communication patterns.</p>
|
||||
<p>
|
||||
High-level architecture showing all services, data
|
||||
stores, and communication patterns.
|
||||
</p>
|
||||
<h4>Key Components</h4>
|
||||
<ul>
|
||||
<li><strong>Collector</strong>: Runs on each monitored machine, streams metrics via gRPC</li>
|
||||
<li><strong>Aggregator</strong>: Central gRPC server, receives streams, normalizes data</li>
|
||||
<li><strong>Gateway</strong>: FastAPI service, WebSocket for browser, REST for queries</li>
|
||||
<li><strong>Alerts</strong>: Subscribes to events, evaluates thresholds, triggers actions</li>
|
||||
<li>
|
||||
<strong>Collector</strong>: Runs on each monitored
|
||||
machine, streams metrics via gRPC
|
||||
</li>
|
||||
<li>
|
||||
<strong>Aggregator</strong>: Central gRPC server,
|
||||
receives streams, normalizes data
|
||||
</li>
|
||||
<li>
|
||||
<strong>Gateway</strong>: FastAPI service, WebSocket
|
||||
for browser, REST for queries
|
||||
</li>
|
||||
<li>
|
||||
<strong>Alerts</strong>: Subscribes to events,
|
||||
evaluates thresholds, triggers actions
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
@@ -36,17 +54,27 @@
|
||||
<section class="graph-section" id="data-flow">
|
||||
<div class="graph-header-row">
|
||||
<h2>Data Flow Pipeline</h2>
|
||||
<a href="graph.html?g=02-data-flow" class="view-btn">View Full</a>
|
||||
<a href="graph.html?g=02-data-flow" class="view-btn"
|
||||
>View Full</a
|
||||
>
|
||||
</div>
|
||||
<a href="graph.html?g=02-data-flow" class="graph-preview">
|
||||
<img src="02-data-flow.svg" alt="Data Flow">
|
||||
<img src="02-data-flow.svg" alt="Data Flow" />
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>How metrics flow from collection through storage with different retention tiers.</p>
|
||||
<p>
|
||||
How metrics flow from collection through storage with
|
||||
different retention tiers.
|
||||
</p>
|
||||
<h4>Storage Tiers</h4>
|
||||
<table class="details-table">
|
||||
<thead>
|
||||
<tr><th>Tier</th><th>Resolution</th><th>Retention</th><th>Use Case</th></tr>
|
||||
<tr>
|
||||
<th>Tier</th>
|
||||
<th>Resolution</th>
|
||||
<th>Retention</th>
|
||||
<th>Use Case</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
@@ -81,18 +109,32 @@
|
||||
<section class="graph-section" id="deployment">
|
||||
<div class="graph-header-row">
|
||||
<h2>Deployment Architecture</h2>
|
||||
<a href="graph.html?g=03-deployment" class="view-btn">View Full</a>
|
||||
<a href="graph.html?g=03-deployment" class="view-btn"
|
||||
>View Full</a
|
||||
>
|
||||
</div>
|
||||
<a href="graph.html?g=03-deployment" class="graph-preview">
|
||||
<img src="03-deployment.svg" alt="Deployment">
|
||||
<img src="03-deployment.svg" alt="Deployment" />
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>Deployment options from local development to AWS production.</p>
|
||||
<p>
|
||||
Deployment options from local development to AWS
|
||||
production.
|
||||
</p>
|
||||
<h4>Environments</h4>
|
||||
<ul>
|
||||
<li><strong>Local Dev</strong>: Kind + Tilt for K8s, or Docker Compose</li>
|
||||
<li><strong>Demo (EC2)</strong>: Docker Compose on t2.small at sysmonstm.mcrn.ar</li>
|
||||
<li><strong>Lambda Pipeline</strong>: SQS-triggered aggregation for data processing experience</li>
|
||||
<li>
|
||||
<strong>Local Dev</strong>: Kind + Tilt for K8s, or
|
||||
Docker Compose
|
||||
</li>
|
||||
<li>
|
||||
<strong>Demo (EC2)</strong>: Docker Compose on
|
||||
t2.small at sysmonstm.mcrn.ar
|
||||
</li>
|
||||
<li>
|
||||
<strong>Lambda Pipeline</strong>: SQS-triggered
|
||||
aggregation for data processing experience
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
@@ -100,18 +142,29 @@
|
||||
<section class="graph-section" id="grpc">
|
||||
<div class="graph-header-row">
|
||||
<h2>gRPC Service Definitions</h2>
|
||||
<a href="graph.html?g=04-grpc-services" class="view-btn">View Full</a>
|
||||
<a href="graph.html?g=04-grpc-services" class="view-btn"
|
||||
>View Full</a
|
||||
>
|
||||
</div>
|
||||
<a href="graph.html?g=04-grpc-services" class="graph-preview">
|
||||
<img src="04-grpc-services.svg" alt="gRPC Services">
|
||||
<img src="04-grpc-services.svg" alt="gRPC Services" />
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>Protocol Buffer service and message definitions.</p>
|
||||
<h4>Services</h4>
|
||||
<ul>
|
||||
<li><strong>MetricsService</strong>: Client-side streaming for metrics ingestion</li>
|
||||
<li><strong>ControlService</strong>: Bidirectional streaming for collector control</li>
|
||||
<li><strong>ConfigService</strong>: Server-side streaming for config updates</li>
|
||||
<li>
|
||||
<strong>MetricsService</strong>: Client-side
|
||||
streaming for metrics ingestion
|
||||
</li>
|
||||
<li>
|
||||
<strong>ControlService</strong>: Bidirectional
|
||||
streaming for collector control
|
||||
</li>
|
||||
<li>
|
||||
<strong>ConfigService</strong>: Server-side
|
||||
streaming for config updates
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
@@ -201,7 +254,9 @@
|
||||
|
||||
<footer>
|
||||
<p>System Monitoring Platform - Architecture Documentation</p>
|
||||
<p class="date">Generated: <time datetime="2025-12-29">December 2025</time></p>
|
||||
<p class="date">
|
||||
Generated: <time datetime="2025-12-29">December 2025</time>
|
||||
</p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
278
docs/explainer/other-applications.md
Normal file
278
docs/explainer/other-applications.md
Normal file
@@ -0,0 +1,278 @@
|
||||
# Same Patterns, Different Domains
|
||||
|
||||
The architecture behind sysmonstm isn't specific to system monitoring. The patterns - streaming data collection, event-driven processing, tiered storage, real-time dashboards - apply to many domains. This article explores two: payment processing systems and desktop productivity tracking.
|
||||
|
||||
## Payment Processing Systems
|
||||
|
||||
The sysmonstm architecture was intentionally designed to map to payment processing. Here's how each component translates.
|
||||
|
||||
### Domain Mapping
|
||||
|
||||
| sysmonstm | Payment System |
|
||||
|-----------|----------------|
|
||||
| Machine | Payment Processor (Stripe, PayPal, bank API) |
|
||||
| Metrics Stream | Transaction Stream |
|
||||
| Aggregator | Payment Hub |
|
||||
| Alert Thresholds | Fraud Detection Rules |
|
||||
| Alert Service | Risk Management |
|
||||
| Redis (current state) | Transaction Cache |
|
||||
| TimescaleDB (history) | Transaction Ledger |
|
||||
| Event Stream | Audit Trail |
|
||||
|
||||
### How It Would Work
|
||||
|
||||
**Collectors become processor adapters.** Instead of collecting CPU and memory via psutil, each adapter connects to a payment processor's API or webhook endpoint:
|
||||
|
||||
```python
|
||||
# Conceptual - not actual code
|
||||
class StripeAdapter:
|
||||
async def stream_transactions(self):
|
||||
async for event in stripe.webhook_events():
|
||||
yield Transaction(
|
||||
processor="stripe",
|
||||
amount=event.amount,
|
||||
currency=event.currency,
|
||||
status=event.status,
|
||||
customer_id=event.customer,
|
||||
timestamp=event.created,
|
||||
)
|
||||
```
|
||||
|
||||
The gRPC streaming pattern remains identical. Each adapter streams transactions to a central aggregator.
|
||||
|
||||
**The aggregator normalizes data.** Stripe sends amounts in cents. PayPal sends them in dollars. Bank APIs use different currency codes. The aggregator normalizes everything to a consistent format before storage:
|
||||
|
||||
```python
|
||||
# In the aggregator's StreamTransactions handler
|
||||
async for tx in request_iterator:
|
||||
normalized = normalize_transaction(tx)
|
||||
await self.store(normalized)
|
||||
await self.publisher.publish("transactions.raw", normalized)
|
||||
```
|
||||
|
||||
This is the same pattern as `services/aggregator/main.py:47-95` - receive stream, batch, flush to storage, publish events.
|
||||
|
||||
**Alerts become fraud detection.** Instead of "CPU > 80%", rules look like:
|
||||
|
||||
- Transaction amount > $10,000 (large transaction)
|
||||
- More than 5 transactions from same card in 1 minute (velocity check)
|
||||
- Transaction from country different than cardholder's (geographic anomaly)
|
||||
|
||||
The `AlertEvaluator` pattern from `services/alerts/main.py:44-77` handles this:
|
||||
|
||||
```python
|
||||
class FraudEvaluator:
|
||||
RULES = [
|
||||
FraudRule("large_transaction", "amount", "gt", 10000, "review"),
|
||||
FraudRule("velocity", "transactions_per_minute", "gt", 5, "block"),
|
||||
]
|
||||
|
||||
def evaluate(self, transaction: dict) -> list[FraudAlert]:
|
||||
# Same operator-based evaluation as AlertEvaluator
|
||||
pass
|
||||
```
|
||||
|
||||
**The event stream becomes an audit trail.** Financial systems require complete audit logs. Every transaction, every state change, every decision must be recorded. The event abstraction from `shared/events/base.py` already provides this:
|
||||
|
||||
```python
|
||||
await self.publisher.publish(
|
||||
topic="transactions.processed",
|
||||
payload={
|
||||
"transaction_id": tx.id,
|
||||
"processor": tx.processor,
|
||||
"amount": tx.amount,
|
||||
"decision": "approved",
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
Subscribe to these events for compliance reporting, analytics, or real-time monitoring.
|
||||
|
||||
**Tiered storage handles transaction volumes.** Hot transactions (last hour) in Redis for quick lookups. Recent transactions (last month) in PostgreSQL for operational queries. Historical transactions archived to S3 for compliance retention. Same pattern as sysmonstm's Redis + TimescaleDB setup.
|
||||
|
||||
### What Changes
|
||||
|
||||
- **Authentication**: Payment APIs require OAuth, API keys, mTLS. The collector adapters need credential management.
|
||||
- **Idempotency**: Transactions must be processed exactly once. The aggregator needs deduplication.
|
||||
- **Compliance**: PCI-DSS requires encryption, access controls, audit logging. More infrastructure, same patterns.
|
||||
|
||||
### What Stays the Same
|
||||
|
||||
- gRPC streaming from multiple sources to central aggregator
|
||||
- Event-driven processing for decoupled services
|
||||
- Threshold-based alerting
|
||||
- Real-time dashboard via WebSocket
|
||||
- Tiered storage for different access patterns
|
||||
|
||||
## Deskmeter: A Workspace Timer Application
|
||||
|
||||
Deskmeter is a productivity tracking application that monitors desktop workspace switches and task changes. It runs on Linux, tracks time spent on different tasks, and displays the data through a web dashboard.
|
||||
|
||||
Current architecture:
|
||||
- **dmcore daemon**: Polls workspace state every 2 seconds using `wmctrl`
|
||||
- **MongoDB**: Stores workspace switches with timestamps and durations
|
||||
- **Flask web server**: Serves calendar views and task summaries
|
||||
- **GNOME extension**: Shows current task in the top panel
|
||||
|
||||
This works, but sysmonstm patterns could enhance it significantly.
|
||||
|
||||
### Current Deskmeter Implementation
|
||||
|
||||
The core daemon (`dmapp/dmcore/main.py`) polls in a loop:
|
||||
|
||||
```python
|
||||
while True:
|
||||
current_workspace = active_workspace() # Calls wmctrl
|
||||
current_task = state.retrieve("current").get("task")
|
||||
|
||||
# Track the switch
|
||||
last_switch_time = track_workspace_switch(
|
||||
current_workspace,
|
||||
current_task,
|
||||
last_switch_time
|
||||
)
|
||||
|
||||
time.sleep(2)
|
||||
```
|
||||
|
||||
The web server (`dmapp/dmweb/dm.py`) uses Flask with template rendering:
|
||||
|
||||
```python
|
||||
@dmbp.route("/calendar/<string:scope>")
|
||||
def calendar_view(scope="daily", year=None, month=None, day=None):
|
||||
blocks = get_task_blocks_calendar(start, end, task, ...)
|
||||
return render_template("calendar_view.html", blocks=blocks, ...)
|
||||
```
|
||||
|
||||
The dashboard refreshes via page reload or AJAX polling.
|
||||
|
||||
### How sysmonstm Patterns Would Improve It
|
||||
|
||||
**Replace polling with streaming.** Instead of the daemon polling every 2 seconds and the web dashboard polling for updates, use the same event-driven architecture as sysmonstm.
|
||||
|
||||
The daemon becomes an event publisher:
|
||||
|
||||
```python
|
||||
# Conceptual improvement
|
||||
class WorkspaceMonitor:
|
||||
async def run(self):
|
||||
publisher = get_publisher(source="workspace-monitor")
|
||||
await publisher.connect()
|
||||
|
||||
while self.running:
|
||||
workspace = await self.detect_workspace()
|
||||
task = await self.get_current_task()
|
||||
|
||||
if workspace != self.last_workspace or task != self.last_task:
|
||||
await publisher.publish(
|
||||
topic="workspace.switch",
|
||||
payload={
|
||||
"workspace": workspace,
|
||||
"task": task,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
},
|
||||
)
|
||||
self.last_workspace = workspace
|
||||
self.last_task = task
|
||||
|
||||
await asyncio.sleep(2)
|
||||
```
|
||||
|
||||
The web server subscribes to events and pushes to browsers via WebSocket - exactly like `services/gateway/main.py:88-130`:
|
||||
|
||||
```python
|
||||
async def event_listener():
|
||||
async with get_subscriber(topics=["workspace.*"]) as subscriber:
|
||||
async for event in subscriber.consume():
|
||||
await manager.broadcast({
|
||||
"type": "workspace_switch",
|
||||
"data": event.payload,
|
||||
})
|
||||
```
|
||||
|
||||
The GNOME extension could subscribe directly instead of polling an HTTP endpoint.
|
||||
|
||||
**Add multi-machine support.** With sysmonstm's architecture, tracking multiple machines is trivial. Run the workspace monitor daemon on each machine. Each streams events to an aggregator. The dashboard shows all machines.
|
||||
|
||||
```python
|
||||
# Each machine's monitor includes machine_id
|
||||
await publisher.publish(
|
||||
topic="workspace.switch",
|
||||
payload={
|
||||
"machine_id": self.machine_id, # "workstation", "laptop", etc.
|
||||
"workspace": workspace,
|
||||
"task": task,
|
||||
"timestamp": datetime.now().isoformat(),
|
||||
},
|
||||
)
|
||||
```
|
||||
|
||||
The dashboard groups by machine or shows a combined view. Same pattern as sysmonstm's multi-machine monitoring.
|
||||
|
||||
**Add focus alerts.** The alert service pattern from `services/alerts/main.py` applies directly:
|
||||
|
||||
```python
|
||||
# Focus time rules
|
||||
FocusRule("context_switching", "switches_per_hour", "gt", 10, "warning")
|
||||
FocusRule("long_idle", "idle_minutes", "gt", 30, "info")
|
||||
FocusRule("deep_work", "focus_minutes", "gt", 90, "success")
|
||||
```
|
||||
|
||||
When you switch tasks more than 10 times in an hour, get a notification. When you've been focused for 90 minutes, celebrate. The evaluator pattern handles both alerts and achievements.
|
||||
|
||||
**Improve time-series storage.** Deskmeter uses MongoDB for everything. With sysmonstm's tiered approach:
|
||||
|
||||
- **Redis**: Current task, current workspace, last 5 minutes of switches
|
||||
- **TimescaleDB**: Historical switches with automatic downsampling
|
||||
|
||||
Query "what was I doing at 3pm yesterday" hits warm storage. Query "how much time did I spend on project X this month" uses aggregated data. Same queries, faster execution.
|
||||
|
||||
### Implementation Path
|
||||
|
||||
1. **Add event publishing to dmcore.** Keep the polling loop but publish events instead of writing directly to MongoDB.
|
||||
|
||||
2. **Add WebSocket to dmweb.** Subscribe to events, push to connected browsers. The calendar view updates in real-time.
|
||||
|
||||
3. **Add Redis for current state.** Dashboard reads current task from Redis instead of querying MongoDB.
|
||||
|
||||
4. **Add focus alerts.** New service that subscribes to workspace events, evaluates rules, publishes alerts.
|
||||
|
||||
5. **Add multi-machine support.** Run dmcore on multiple machines. Aggregate events centrally.
|
||||
|
||||
Each step is independent. The system works after each one. Same phased approach as sysmonstm.
|
||||
|
||||
### Code Mapping
|
||||
|
||||
| sysmonstm Component | Deskmeter Equivalent |
|
||||
|---------------------|---------------------|
|
||||
| `services/collector/` | `dmapp/dmcore/main.py` - workspace monitoring |
|
||||
| `services/aggregator/` | Event aggregation (new) |
|
||||
| `services/gateway/` | `dmapp/dmweb/dm.py` + WebSocket (enhanced) |
|
||||
| `services/alerts/` | Focus alerts service (new) |
|
||||
| `proto/metrics.proto` | Workspace event schema |
|
||||
| `shared/events/` | Same - reusable |
|
||||
|
||||
The event abstraction from sysmonstm (`shared/events/`) works directly. The configuration pattern from `shared/config.py` works directly. The structured logging from `shared/logging.py` works directly.
|
||||
|
||||
## The Common Thread
|
||||
|
||||
Both payment processing and productivity tracking share the same fundamental pattern:
|
||||
|
||||
1. **Multiple data sources** streaming to a central point
|
||||
2. **Normalization** of different formats into consistent schema
|
||||
3. **Real-time processing** for dashboards and alerts
|
||||
4. **Historical storage** for analysis and compliance
|
||||
5. **Event-driven decoupling** for extensibility
|
||||
|
||||
sysmonstm demonstrates these patterns with system metrics. The patterns transfer to any domain with similar characteristics:
|
||||
|
||||
- IoT sensor networks (temperature, humidity, motion)
|
||||
- Log aggregation (application logs from multiple services)
|
||||
- Social media analytics (tweets, posts, mentions)
|
||||
- Trading systems (market data from multiple exchanges)
|
||||
- Fleet management (GPS, fuel, diagnostics from vehicles)
|
||||
|
||||
The specific metrics change. The thresholds change. The domain vocabulary changes. The architecture stays the same.
|
||||
|
||||
Build it once for metrics. Apply it anywhere.
|
||||
413
docs/explainer/sysmonstm-from-start-to-finish.md
Normal file
413
docs/explainer/sysmonstm-from-start-to-finish.md
Normal file
@@ -0,0 +1,413 @@
|
||||
# Building sysmonstm: From Idea to Working System
|
||||
|
||||
This is the story of building a distributed system monitoring platform. Not a tutorial with sanitized examples, but an explanation of the actual decisions made, the trade-offs considered, and the code that resulted.
|
||||
|
||||
## The Problem
|
||||
|
||||
I have multiple development machines. A workstation, a laptop, sometimes a remote VM. Each one occasionally runs out of disk space, hits memory limits, or has a runaway process eating CPU. The pattern was always the same: something breaks, I SSH in, run `htop`, realize the problem, fix it.
|
||||
|
||||
The obvious solution is a monitoring dashboard. Something that shows all machines in one place, updates in real-time, and alerts before things break.
|
||||
|
||||
But the real motivation was an interview. The job description mentioned gRPC, streaming patterns, event-driven architecture. Building a monitoring system would demonstrate all of these while solving an actual problem.
|
||||
|
||||
## Architecture Decisions
|
||||
|
||||
### Why gRPC Instead of REST
|
||||
|
||||
REST would work fine. Poll each machine every few seconds, aggregate the results. Simple.
|
||||
|
||||
But gRPC offers streaming. Instead of the aggregator asking each machine "what are your metrics right now?", each machine opens a persistent connection and continuously pushes metrics. This is more efficient (one connection instead of repeated requests) and lower latency (metrics arrive as soon as they're collected).
|
||||
|
||||
The proto definition in `proto/metrics.proto` defines this as client-side streaming:
|
||||
|
||||
```protobuf
|
||||
service MetricsService {
|
||||
// Client-side streaming: collector streams metrics to aggregator
|
||||
rpc StreamMetrics(stream Metric) returns (StreamAck) {}
|
||||
}
|
||||
```
|
||||
|
||||
The collector is the client. It streams metrics. The aggregator is the server. It receives them. When the stream ends (collector shuts down, network drops), the aggregator gets a `StreamAck` response.
|
||||
|
||||
### Why This Storage Tier Approach
|
||||
|
||||
Metrics have different access patterns at different ages:
|
||||
|
||||
- **Right now**: The dashboard needs current CPU/memory/disk for all machines. Access pattern: read all, very frequently.
|
||||
- **Last hour**: Graphs showing recent trends. Access pattern: read range, somewhat frequently.
|
||||
- **Last week**: Investigating what happened yesterday. Access pattern: read range, occasionally.
|
||||
- **Last month**: Capacity planning. Access pattern: aggregated queries, rarely.
|
||||
|
||||
Storing everything in one place forces a choice between fast reads (keep it all in memory) and storage efficiency (keep it on disk). The solution is tiered storage:
|
||||
|
||||
- **Redis** (`services/aggregator/storage.py`): Current state only. Each machine's latest metrics, with 5-minute TTL. Dashboard reads hit Redis.
|
||||
- **TimescaleDB** (`scripts/init-db.sql`): Historical data. Raw metrics at 5-second resolution for 24 hours, then automatically downsampled to 1-minute and 1-hour aggregates with longer retention.
|
||||
|
||||
The aggregator writes to both on every batch. Redis for live dashboard. TimescaleDB for history.
|
||||
|
||||
### Why Event-Driven for Alerts
|
||||
|
||||
The alerts service needs to evaluate every metric against threshold rules. Two options:
|
||||
|
||||
1. **Direct call**: Aggregator calls alerts service for each metric batch.
|
||||
2. **Event stream**: Aggregator publishes events. Alerts service subscribes.
|
||||
|
||||
Option 2 decouples them. The aggregator doesn't know or care if the alerts service is running. It publishes events regardless. The alerts service can be restarted, scaled, or replaced without touching the aggregator.
|
||||
|
||||
The event abstraction in `shared/events/base.py` defines the interface:
|
||||
|
||||
```python
|
||||
class EventPublisher(ABC):
|
||||
@abstractmethod
|
||||
async def publish(self, topic: str, payload: dict[str, Any], **kwargs) -> str:
|
||||
pass
|
||||
|
||||
class EventSubscriber(ABC):
|
||||
@abstractmethod
|
||||
async def consume(self) -> AsyncIterator[Event]:
|
||||
pass
|
||||
```
|
||||
|
||||
Currently backed by Redis Pub/Sub (`shared/events/redis_pubsub.py`). The abstraction means switching to Kafka or RabbitMQ later requires implementing a new backend, not changing any service code.
|
||||
|
||||
## Phase 1: MVP - Getting Streaming to Work
|
||||
|
||||
The goal was simple: run a collector, see metrics appear in the aggregator's logs.
|
||||
|
||||
### The Collector
|
||||
|
||||
`services/collector/main.py` is a gRPC client. The core is an async generator that yields metrics forever:
|
||||
|
||||
```python
|
||||
async def _metric_generator(self):
|
||||
"""Async generator that yields metrics at the configured interval."""
|
||||
while self.running:
|
||||
batch = self.collector.collect()
|
||||
protos = self._batch_to_proto(batch)
|
||||
|
||||
for proto in protos:
|
||||
yield proto
|
||||
|
||||
await asyncio.sleep(self.config.collection_interval)
|
||||
```
|
||||
|
||||
This generator is passed directly to the gRPC stub:
|
||||
|
||||
```python
|
||||
response = await self.stub.StreamMetrics(self._metric_generator())
|
||||
```
|
||||
|
||||
The gRPC library handles the streaming. Each `yield` sends a message. The connection stays open until the generator stops or the network fails.
|
||||
|
||||
The actual metric collection happens in `services/collector/metrics.py` using `psutil`:
|
||||
|
||||
```python
|
||||
def _collect_cpu(self) -> list[MetricValue]:
|
||||
metrics = []
|
||||
cpu_percent = psutil.cpu_percent(interval=None)
|
||||
metrics.append(MetricValue("CPU_PERCENT", cpu_percent))
|
||||
|
||||
per_core = psutil.cpu_percent(interval=None, percpu=True)
|
||||
for i, pct in enumerate(per_core):
|
||||
metrics.append(MetricValue(
|
||||
"CPU_PERCENT_PER_CORE",
|
||||
pct,
|
||||
{"core": str(i)}
|
||||
))
|
||||
return metrics
|
||||
```
|
||||
|
||||
### The Aggregator
|
||||
|
||||
`services/aggregator/main.py` is a gRPC server. The `StreamMetrics` method receives the stream:
|
||||
|
||||
```python
|
||||
async def StreamMetrics(self, request_iterator, context):
|
||||
metrics_received = 0
|
||||
current_batch: list[tuple[str, float, dict]] = []
|
||||
|
||||
async for metric in request_iterator:
|
||||
metrics_received += 1
|
||||
|
||||
metric_type = metrics_pb2.MetricType.Name(metric.type)
|
||||
current_batch.append((metric_type, metric.value, dict(metric.labels)))
|
||||
|
||||
if len(current_batch) >= 20:
|
||||
await self._flush_batch(...)
|
||||
current_batch = []
|
||||
```
|
||||
|
||||
The `request_iterator` is an async iterator over incoming metrics. The `async for` loop processes them as they arrive. Batching (flush every 20 metrics) reduces storage writes.
|
||||
|
||||
### Retry Logic
|
||||
|
||||
Networks fail. The collector needs to reconnect. The pattern is exponential backoff:
|
||||
|
||||
```python
|
||||
retry_count = 0
|
||||
max_retries = 10
|
||||
base_delay = 1.0
|
||||
|
||||
while self.running:
|
||||
try:
|
||||
await self.stub.StreamMetrics(self._metric_generator())
|
||||
retry_count = 0 # Success - reset counter
|
||||
except grpc.aio.AioRpcError as e:
|
||||
retry_count += 1
|
||||
delay = min(base_delay * (2**retry_count), 60.0) # Cap at 60 seconds
|
||||
await asyncio.sleep(delay)
|
||||
await self.disconnect()
|
||||
await self.connect()
|
||||
```
|
||||
|
||||
First failure waits 2 seconds. Second waits 4. Third waits 8. Capped at 60 seconds. After 10 failures, give up.
|
||||
|
||||
## Phase 2: Dashboard - Making It Visible
|
||||
|
||||
Metrics in logs are useless. A dashboard makes them useful.
|
||||
|
||||
### The Gateway
|
||||
|
||||
`services/gateway/main.py` is a FastAPI application serving two purposes:
|
||||
|
||||
1. **REST API**: Query current and historical metrics
|
||||
2. **WebSocket**: Push real-time updates to browsers
|
||||
|
||||
The WebSocket connection manager (`services/gateway/main.py:40-67`) tracks active connections:
|
||||
|
||||
```python
|
||||
class ConnectionManager:
|
||||
def __init__(self):
|
||||
self.active_connections: list[WebSocket] = []
|
||||
|
||||
async def broadcast(self, message: dict) -> None:
|
||||
data = json.dumps(message)
|
||||
for connection in self.active_connections:
|
||||
await connection.send_text(data)
|
||||
```
|
||||
|
||||
### Event to WebSocket Bridge
|
||||
|
||||
The gateway subscribes to the same event stream as alerts. When a metric event arrives, it broadcasts to all connected browsers:
|
||||
|
||||
```python
|
||||
async def event_listener():
|
||||
async with get_subscriber(topics=["metrics.raw", "alerts.*"]) as subscriber:
|
||||
async for event in subscriber.consume():
|
||||
await manager.broadcast({
|
||||
"type": "metrics",
|
||||
"data": event.payload,
|
||||
"timestamp": event.timestamp.isoformat(),
|
||||
})
|
||||
```
|
||||
|
||||
This runs as a background task, started in the FastAPI lifespan handler (`services/gateway/main.py:145-175`).
|
||||
|
||||
### Handling Partial Batches
|
||||
|
||||
The aggregator batches metrics (flush every 20). This means a single collection cycle might arrive as multiple events. The dashboard needs complete machine state, not partial updates.
|
||||
|
||||
Solution: merge incoming metrics into a cache (`services/gateway/main.py:108-120`):
|
||||
|
||||
```python
|
||||
machine_metrics_cache: dict[str, dict] = {}
|
||||
|
||||
# In event_listener:
|
||||
machine_id = event.payload.get("machine_id", "")
|
||||
incoming_metrics = event.payload.get("metrics", {})
|
||||
|
||||
if machine_id not in machine_metrics_cache:
|
||||
machine_metrics_cache[machine_id] = {}
|
||||
machine_metrics_cache[machine_id].update(incoming_metrics)
|
||||
```
|
||||
|
||||
New metrics merge with existing. The broadcast includes the full merged state.
|
||||
|
||||
## Phase 3: Alerts - Adding Intelligence
|
||||
|
||||
The alerts service subscribes to metric events and evaluates them against rules.
|
||||
|
||||
### Rule Evaluation
|
||||
|
||||
`services/alerts/main.py` defines an `AlertEvaluator` class:
|
||||
|
||||
```python
|
||||
class AlertEvaluator:
|
||||
OPERATORS = {
|
||||
"gt": lambda v, t: v > t,
|
||||
"lt": lambda v, t: v < t,
|
||||
"gte": lambda v, t: v >= t,
|
||||
"lte": lambda v, t: v <= t,
|
||||
"eq": lambda v, t: v == t,
|
||||
}
|
||||
|
||||
def evaluate(self, machine_id: str, metrics: dict[str, float]) -> list[Alert]:
|
||||
new_alerts = []
|
||||
for metric_type, value in metrics.items():
|
||||
rule = self.rules.get(metric_type)
|
||||
if not rule:
|
||||
continue
|
||||
|
||||
op_func = self.OPERATORS.get(rule.operator)
|
||||
if op_func(value, rule.threshold):
|
||||
# Threshold exceeded
|
||||
new_alerts.append(Alert(...))
|
||||
return new_alerts
|
||||
```
|
||||
|
||||
### Avoiding Duplicate Alerts
|
||||
|
||||
If CPU stays above 80% for an hour, we want one alert, not 720 (one per 5-second check).
|
||||
|
||||
The evaluator tracks active alerts:
|
||||
|
||||
```python
|
||||
self.active_alerts: dict[str, Alert] = {} # key: f"{machine_id}:{rule_name}"
|
||||
|
||||
# In evaluate():
|
||||
alert_key = f"{machine_id}:{rule.name}"
|
||||
if op_func(value, rule.threshold):
|
||||
if alert_key not in self.active_alerts:
|
||||
# New alert - trigger it
|
||||
self.active_alerts[alert_key] = alert
|
||||
new_alerts.append(alert)
|
||||
# Otherwise already active - ignore
|
||||
else:
|
||||
# Threshold no longer exceeded - resolve
|
||||
if alert_key in self.active_alerts:
|
||||
del self.active_alerts[alert_key]
|
||||
```
|
||||
|
||||
New alert only triggers if not already in `active_alerts`. When the metric drops below threshold, the alert is removed and can trigger again later.
|
||||
|
||||
## Phase 4: Polish - Production Patterns
|
||||
|
||||
### Structured Logging
|
||||
|
||||
Every service uses `shared/logging.py` for structured JSON logging:
|
||||
|
||||
```python
|
||||
logger.info(
|
||||
"stream_completed",
|
||||
machine_id=current_machine,
|
||||
metrics_received=metrics_received,
|
||||
)
|
||||
```
|
||||
|
||||
Output:
|
||||
```json
|
||||
{"event": "stream_completed", "machine_id": "workstation", "metrics_received": 1500, "timestamp": "..."}
|
||||
```
|
||||
|
||||
This is searchable. "Show me all logs where metrics_received > 1000" is a simple query.
|
||||
|
||||
### Health Checks
|
||||
|
||||
Every service has health endpoints. The aggregator uses gRPC health checking (`services/aggregator/main.py:236-240`):
|
||||
|
||||
```python
|
||||
health_servicer = health.HealthServicer()
|
||||
health_servicer.set("", health_pb2.HealthCheckResponse.SERVING)
|
||||
health_servicer.set("MetricsService", health_pb2.HealthCheckResponse.SERVING)
|
||||
health_pb2_grpc.add_HealthServicer_to_server(health_servicer, self.server)
|
||||
```
|
||||
|
||||
The gateway has HTTP health endpoints (`services/gateway/main.py:197-216`):
|
||||
|
||||
```python
|
||||
@app.get("/ready")
|
||||
async def readiness_check():
|
||||
checks = {"gateway": "ok"}
|
||||
|
||||
try:
|
||||
await grpc_stub.GetAllStates(metrics_pb2.Empty(), timeout=2.0)
|
||||
checks["aggregator"] = "ok"
|
||||
except Exception as e:
|
||||
checks["aggregator"] = f"error: {str(e)}"
|
||||
|
||||
return {"status": "ready", "checks": checks}
|
||||
```
|
||||
|
||||
### Graceful Degradation
|
||||
|
||||
The aggregator continues streaming even if storage fails (`services/aggregator/main.py:137-152`):
|
||||
|
||||
```python
|
||||
try:
|
||||
await self.redis.update_machine_state(...)
|
||||
except Exception as e:
|
||||
self.logger.warning("redis_update_failed", error=str(e))
|
||||
# Don't re-raise - continue processing
|
||||
|
||||
try:
|
||||
await self.timescale.insert_metrics(...)
|
||||
except Exception as e:
|
||||
self.logger.warning("timescale_insert_failed", error=str(e))
|
||||
# Don't re-raise - continue processing
|
||||
```
|
||||
|
||||
Redis down? Metrics still flow to TimescaleDB. TimescaleDB down? Metrics still flow to the event stream. This keeps the system partially functional during partial failures.
|
||||
|
||||
### Configuration
|
||||
|
||||
All configuration uses Pydantic with environment variable support (`shared/config.py`):
|
||||
|
||||
```python
|
||||
class CollectorConfig(BaseSettings):
|
||||
machine_id: str = Field(default_factory=lambda: socket.gethostname())
|
||||
aggregator_url: str = "aggregator:50051"
|
||||
collection_interval: int = 5
|
||||
|
||||
model_config = SettingsConfigDict(env_prefix="COLLECTOR_")
|
||||
```
|
||||
|
||||
Set `COLLECTOR_AGGREGATOR_URL=192.168.1.100:50051` and it overrides the default. No code changes for different environments.
|
||||
|
||||
## What Worked
|
||||
|
||||
**The event abstraction.** Adding a new consumer (like the gateway's WebSocket bridge) required zero changes to the aggregator. Subscribe to the topic, process events.
|
||||
|
||||
**Tiered storage.** Redis handles the hot path (dashboard reads). TimescaleDB handles history. Each optimized for its access pattern.
|
||||
|
||||
**Graceful degradation.** During development, I regularly restarted individual services. The system stayed partially functional throughout.
|
||||
|
||||
## What Could Be Better
|
||||
|
||||
**No backpressure.** If the aggregator falls behind, events accumulate in memory. A production system would need flow control.
|
||||
|
||||
**Alert rules are database-only.** Changing thresholds requires database updates. A proper config management system would be better.
|
||||
|
||||
**No authentication.** The gRPC channels are insecure. Production would need TLS and service authentication.
|
||||
|
||||
## Key Files Reference
|
||||
|
||||
| Component | File | Purpose |
|
||||
|-----------|------|---------|
|
||||
| Proto definitions | `proto/metrics.proto` | gRPC service and message definitions |
|
||||
| Collector main | `services/collector/main.py` | gRPC client, streaming logic |
|
||||
| Metric collection | `services/collector/metrics.py` | psutil wrappers |
|
||||
| Aggregator main | `services/aggregator/main.py` | gRPC server, batch processing |
|
||||
| Storage layer | `services/aggregator/storage.py` | Redis + TimescaleDB abstraction |
|
||||
| Gateway main | `services/gateway/main.py` | FastAPI, WebSocket, event bridge |
|
||||
| Alerts main | `services/alerts/main.py` | Event subscription, rule evaluation |
|
||||
| Event abstraction | `shared/events/base.py` | Publisher/subscriber interfaces |
|
||||
| Redis events | `shared/events/redis_pubsub.py` | Redis Pub/Sub implementation |
|
||||
| Configuration | `shared/config.py` | Pydantic settings for all services |
|
||||
| DB initialization | `scripts/init-db.sql` | TimescaleDB schema, hypertables |
|
||||
| Docker setup | `docker-compose.yml` | Full stack orchestration |
|
||||
|
||||
## Running It
|
||||
|
||||
```bash
|
||||
docker compose up
|
||||
```
|
||||
|
||||
Open `http://localhost:8000` for the dashboard. Metrics appear within seconds.
|
||||
|
||||
To add another machine, run the collector pointed at your aggregator:
|
||||
|
||||
```bash
|
||||
COLLECTOR_AGGREGATOR_URL=your-server:50051 python services/collector/main.py
|
||||
```
|
||||
|
||||
It connects, starts streaming, and appears on the dashboard.
|
||||
294
docs/index.html
Normal file
294
docs/index.html
Normal file
@@ -0,0 +1,294 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>System Monitor - Documentation</title>
|
||||
<link rel="stylesheet" href="architecture/styles.css">
|
||||
<style>
|
||||
/* Additional styles for docs index */
|
||||
.nav-section {
|
||||
background: var(--bg-secondary);
|
||||
border-radius: 8px;
|
||||
padding: 1.5rem;
|
||||
margin-bottom: 2rem;
|
||||
border: 1px solid var(--border);
|
||||
}
|
||||
|
||||
.nav-section h2 {
|
||||
color: var(--accent);
|
||||
margin-bottom: 1rem;
|
||||
font-size: 1.25rem;
|
||||
}
|
||||
|
||||
.doc-links {
|
||||
display: grid;
|
||||
grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
.doc-link {
|
||||
display: block;
|
||||
background: var(--bg-card);
|
||||
padding: 1.25rem;
|
||||
border-radius: 8px;
|
||||
text-decoration: none;
|
||||
border: 1px solid var(--border);
|
||||
transition: border-color 0.2s, transform 0.2s;
|
||||
}
|
||||
|
||||
.doc-link:hover {
|
||||
border-color: var(--accent);
|
||||
transform: translateY(-2px);
|
||||
}
|
||||
|
||||
.doc-link h3 {
|
||||
color: var(--text-primary);
|
||||
margin-bottom: 0.5rem;
|
||||
font-size: 1rem;
|
||||
}
|
||||
|
||||
.doc-link p {
|
||||
color: var(--text-secondary);
|
||||
font-size: 0.875rem;
|
||||
line-height: 1.5;
|
||||
}
|
||||
|
||||
.doc-link .tag {
|
||||
display: inline-block;
|
||||
background: var(--accent-secondary);
|
||||
color: var(--text-primary);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 3px;
|
||||
font-size: 0.75rem;
|
||||
margin-top: 0.75rem;
|
||||
}
|
||||
|
||||
.section-divider {
|
||||
border: none;
|
||||
border-top: 1px solid var(--border);
|
||||
margin: 2rem 0;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<header>
|
||||
<h1>System Monitoring Platform</h1>
|
||||
<p class="subtitle">Documentation</p>
|
||||
</header>
|
||||
|
||||
<main>
|
||||
<!-- Explainer Articles -->
|
||||
<section class="nav-section">
|
||||
<h2>Explainer Articles</h2>
|
||||
<div class="doc-links">
|
||||
<a href="explainer/sysmonstm-from-start-to-finish.md" class="doc-link">
|
||||
<h3>sysmonstm: From Start to Finish</h3>
|
||||
<p>The complete story of building this monitoring platform. Architecture decisions, trade-offs, and code walkthrough from MVP to production patterns.</p>
|
||||
<span class="tag">Article</span>
|
||||
</a>
|
||||
<a href="explainer/other-applications.md" class="doc-link">
|
||||
<h3>Same Patterns, Different Domains</h3>
|
||||
<p>How the same architecture applies to payment processing systems and the Deskmeter workspace timer. Domain mapping and implementation paths.</p>
|
||||
<span class="tag">Article</span>
|
||||
</a>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<hr class="section-divider">
|
||||
|
||||
<!-- Architecture Diagrams -->
|
||||
<section class="graph-section" id="overview">
|
||||
<div class="graph-header-row">
|
||||
<h2>System Overview</h2>
|
||||
<a href="architecture/graph.html?g=01-system-overview" class="view-btn">View Full</a>
|
||||
</div>
|
||||
<a href="architecture/graph.html?g=01-system-overview" class="graph-preview">
|
||||
<img src="architecture/01-system-overview.svg" alt="System Overview">
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>High-level architecture showing all services, data stores, and communication patterns.</p>
|
||||
<h4>Key Components</h4>
|
||||
<ul>
|
||||
<li><strong>Collector</strong>: Runs on each monitored machine, streams metrics via gRPC</li>
|
||||
<li><strong>Aggregator</strong>: Central gRPC server, receives streams, normalizes data</li>
|
||||
<li><strong>Gateway</strong>: FastAPI service, WebSocket for browser, REST for queries</li>
|
||||
<li><strong>Alerts</strong>: Subscribes to events, evaluates thresholds, triggers actions</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="graph-section" id="data-flow">
|
||||
<div class="graph-header-row">
|
||||
<h2>Data Flow Pipeline</h2>
|
||||
<a href="architecture/graph.html?g=02-data-flow" class="view-btn">View Full</a>
|
||||
</div>
|
||||
<a href="architecture/graph.html?g=02-data-flow" class="graph-preview">
|
||||
<img src="architecture/02-data-flow.svg" alt="Data Flow">
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>How metrics flow from collection through storage with different retention tiers.</p>
|
||||
<h4>Storage Tiers</h4>
|
||||
<table class="details-table">
|
||||
<thead>
|
||||
<tr><th>Tier</th><th>Resolution</th><th>Retention</th><th>Use Case</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Hot (Redis)</td>
|
||||
<td>5s</td>
|
||||
<td>5 min</td>
|
||||
<td>Current state, live dashboard</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Raw (TimescaleDB)</td>
|
||||
<td>5s</td>
|
||||
<td>24h</td>
|
||||
<td>Recent detailed analysis</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>1-min Aggregates</td>
|
||||
<td>1m</td>
|
||||
<td>7d</td>
|
||||
<td>Week view, trends</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>1-hour Aggregates</td>
|
||||
<td>1h</td>
|
||||
<td>90d</td>
|
||||
<td>Long-term analysis</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="graph-section" id="deployment">
|
||||
<div class="graph-header-row">
|
||||
<h2>Deployment Architecture</h2>
|
||||
<a href="architecture/graph.html?g=03-deployment" class="view-btn">View Full</a>
|
||||
</div>
|
||||
<a href="architecture/graph.html?g=03-deployment" class="graph-preview">
|
||||
<img src="architecture/03-deployment.svg" alt="Deployment">
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>Deployment options from local development to AWS production.</p>
|
||||
<h4>Environments</h4>
|
||||
<ul>
|
||||
<li><strong>Local Dev</strong>: Kind + Tilt for K8s, or Docker Compose</li>
|
||||
<li><strong>Demo (EC2)</strong>: Docker Compose on t2.small at sysmonstm.mcrn.ar</li>
|
||||
<li><strong>Lambda Pipeline</strong>: SQS-triggered aggregation for data processing experience</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="graph-section" id="grpc">
|
||||
<div class="graph-header-row">
|
||||
<h2>gRPC Service Definitions</h2>
|
||||
<a href="architecture/graph.html?g=04-grpc-services" class="view-btn">View Full</a>
|
||||
</div>
|
||||
<a href="architecture/graph.html?g=04-grpc-services" class="graph-preview">
|
||||
<img src="architecture/04-grpc-services.svg" alt="gRPC Services">
|
||||
</a>
|
||||
<div class="graph-details">
|
||||
<p>Protocol Buffer service and message definitions.</p>
|
||||
<h4>Services</h4>
|
||||
<ul>
|
||||
<li><strong>MetricsService</strong>: Client-side streaming for metrics ingestion</li>
|
||||
<li><strong>ControlService</strong>: Bidirectional streaming for collector control</li>
|
||||
<li><strong>ConfigService</strong>: Server-side streaming for config updates</li>
|
||||
</ul>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<hr class="section-divider">
|
||||
|
||||
<section class="findings-section">
|
||||
<h2>Interview Talking Points</h2>
|
||||
<div class="findings-grid">
|
||||
<article class="finding-card">
|
||||
<h3>Domain Mapping</h3>
|
||||
<ul>
|
||||
<li>Machine = Payment Processor</li>
|
||||
<li>Metrics Stream = Transaction Stream</li>
|
||||
<li>Thresholds = Fraud Detection</li>
|
||||
<li>Aggregator = Payment Hub</li>
|
||||
</ul>
|
||||
</article>
|
||||
<article class="finding-card">
|
||||
<h3>gRPC Patterns</h3>
|
||||
<ul>
|
||||
<li>Client streaming (metrics)</li>
|
||||
<li>Server streaming (config)</li>
|
||||
<li>Bidirectional (control)</li>
|
||||
<li>Health checking</li>
|
||||
</ul>
|
||||
</article>
|
||||
<article class="finding-card">
|
||||
<h3>Event-Driven</h3>
|
||||
<ul>
|
||||
<li>Redis Pub/Sub (current)</li>
|
||||
<li>Abstraction for Kafka switch</li>
|
||||
<li>Decoupled alert processing</li>
|
||||
<li>Real-time WebSocket push</li>
|
||||
</ul>
|
||||
</article>
|
||||
<article class="finding-card">
|
||||
<h3>Resilience</h3>
|
||||
<ul>
|
||||
<li>Collectors are independent</li>
|
||||
<li>Graceful degradation</li>
|
||||
<li>Retry with backoff</li>
|
||||
<li>Health checks everywhere</li>
|
||||
</ul>
|
||||
</article>
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<section class="tech-section">
|
||||
<h2>Technology Stack</h2>
|
||||
<div class="tech-grid">
|
||||
<div class="tech-column">
|
||||
<h3>Core</h3>
|
||||
<ul>
|
||||
<li>Python 3.11+</li>
|
||||
<li>FastAPI</li>
|
||||
<li>gRPC / protobuf</li>
|
||||
<li>asyncio</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tech-column">
|
||||
<h3>Data</h3>
|
||||
<ul>
|
||||
<li>TimescaleDB</li>
|
||||
<li>Redis</li>
|
||||
<li>Redis Pub/Sub</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tech-column">
|
||||
<h3>Infrastructure</h3>
|
||||
<ul>
|
||||
<li>Docker</li>
|
||||
<li>Kubernetes</li>
|
||||
<li>Kind + Tilt</li>
|
||||
<li>Terraform</li>
|
||||
</ul>
|
||||
</div>
|
||||
<div class="tech-column">
|
||||
<h3>CI/CD</h3>
|
||||
<ul>
|
||||
<li>Woodpecker CI</li>
|
||||
<li>Kustomize</li>
|
||||
<li>Container Registry</li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</section>
|
||||
</main>
|
||||
|
||||
<footer>
|
||||
<p>System Monitoring Platform - Documentation</p>
|
||||
<p class="date">Generated: <time datetime="2025-12-31">December 2025</time></p>
|
||||
</footer>
|
||||
</body>
|
||||
</html>
|
||||
Reference in New Issue
Block a user