// Server pipeline — Phase 2 (stub) + planned architecture // Receiver machine (X11, RTX 3080, NVDEC) digraph server_pipeline { graph [fontname="monospace" bgcolor="#1e1e2e" rankdir=TB pad="0.6" splines=polyline] node [fontname="monospace" fontcolor="#cdd6f4" style=filled shape=box fillcolor="#313244" color="#585b70" margin="0.25,0.12"] edge [color="#585b70" fontname="monospace" fontcolor="#a6adc8"] net [label="TCP :4444" shape=parallelogram fillcolor="#1e2a3e" color="#89b4fa"] python [label="Python app\n(stream/manager.py)" shape=parallelogram fillcolor="#2a2a3e" color="#cba6f7"] subgraph cluster_implemented { label="Implemented (Phase 2)" fontcolor="#a6e3a1" color="#a6e3a1" fontname="monospace" listener [label="Listener\n─────────────\nTCP accept loop\nspawns task per client\nreads WirePacket headers\ncounts video/audio pkts\nlogs keyframes + ts" fillcolor="#1e2d3e" color="#89b4fa"] } subgraph cluster_planned { label="Planned" fontcolor="#f38ba8" color="#f38ba8" fontname="monospace" style=dashed decoder [label="Decoder (Phase 3)\n─────────────\nNVDEC H.264 → NV12\nGPU frames" fillcolor="#2d1e1e" color="#f38ba8"] scene [label="Scene Detector (Phase 3)\n─────────────\nffmpeg select filter\nin-process (no subprocess)\nJPEG → frames/\nframes/index.json" fillcolor="#2d1e1e" color="#f38ba8"] audio [label="Audio Extractor (Phase 4)\n─────────────\nAAC decode\nWAV chunks → audio/" fillcolor="#2d1e1e" color="#f38ba8"] writer [label="Segment Writer (Phase 3)\n─────────────\nfMP4 segments → stream/\nkeyframe boundaries" fillcolor="#2d1e1e" color="#f38ba8"] framebuf [label="Frame Buffer (Phase 6)\n─────────────\nGPU ring buffer ~300 frames\nscrub: GPU→CPU on demand\n→ /dev/shm/cht_scrub_frame" fillcolor="#2d1e1e" color="#f38ba8"] ipc [label="IPC Server (Phase 5)\n─────────────\nUnix socket JSON-lines\ncommands: start/stop/get_frame\nevents: frame_detected/audio_chunk/…" fillcolor="#2d1e1e" color="#f38ba8"] } // Flow — implemented net -> listener [label="WirePacket"] // Flow — planned listener -> decoder [style=dashed label="H.264 payload"] decoder -> scene [style=dashed label="NV12 frame"] decoder -> writer [style=dashed label="encoded pkt"] decoder -> framebuf [style=dashed label="GPU frame"] decoder -> audio [style=dashed label="audio pkt"] scene -> ipc [style=dashed label="frame_detected"] audio -> ipc [style=dashed label="audio_chunk"] writer -> ipc [style=dashed label="segment_completed"] ipc -> python [style=dashed label="JSON-lines\n(Unix socket)"] // Outputs frames_dir [label="frames/\nindex.json + *.jpg" shape=folder fillcolor="#2a2a3e" color="#585b70"] audio_dir [label="audio/\n*.wav chunks" shape=folder fillcolor="#2a2a3e" color="#585b70"] stream_dir [label="stream/\n*.mp4 segments" shape=folder fillcolor="#2a2a3e" color="#585b70"] shm [label="/dev/shm/cht_scrub_frame\nraw RGBA pixels" shape=folder fillcolor="#2a2a3e" color="#585b70"] scene -> frames_dir [style=dashed] audio -> audio_dir [style=dashed] writer -> stream_dir [style=dashed] framebuf -> shm [style=dashed label="get_frame cmd"] }