almost back to working state with rust transport

This commit is contained in:
2026-04-09 22:15:16 -03:00
parent ff96dcb4f7
commit 512d8ecef8
13 changed files with 1504 additions and 488 deletions

View File

@@ -11,3 +11,4 @@ tracing-subscriber = { workspace = true }
anyhow = { workspace = true }
ffmpeg = { package = "ffmpeg-next", version = "8" }
nix = { version = "0.29", features = ["signal", "process"] }
libc = "0.2"

View File

@@ -1,16 +1,13 @@
//! Subprocess backend: spawn ffmpeg CLI for capture+encode.
//!
//! Spawns ffmpeg with the same hardware pipeline as `stream_av.py`:
//! Spawns ffmpeg with the same hardware pipeline as `stream_av.sh`:
//! kmsgrab → hwmap=derive_device=vaapi → scale_vaapi → h264_vaapi
//! + PulseAudio desktop audio + mic → amix → AAC
//!
//! ffmpeg outputs NUT format to stdout. We demux that pipe with ffmpeg-next
//! to get proper AVPackets (keyframe flags, timestamps) without parsing
//! bytestreams. NUT is lighter than mpegts — no TS overhead, exact packet
//! metadata in the container layer.
//!
//! This approach works where the direct VAAPI API path fails: hwmap uses
//! fftools' internal AVFilterGraph.hw_device_ctx (removed from public API
//! in ffmpeg 7+), so X2RGB10LE format negotiation succeeds.
use std::os::fd::AsRawFd;
use std::os::unix::io::RawFd;
@@ -21,7 +18,7 @@ use std::sync::Arc;
use anyhow::{Context, Result};
use tracing::{error, info, warn};
use crate::encoder::EncodedPacket;
use crate::encoder::{EncodedPacket, MediaType};
pub struct SubprocessConfig {
pub device: String,
@@ -63,8 +60,6 @@ pub fn run(
.expect("spawn stderr thread");
// Get the raw fd from stdout before handing it to ffmpeg-next.
// ffmpeg-next takes ownership of the input context but we keep the Child
// alive so the fd stays valid.
let stdout = child.stdout.take().expect("stdout piped");
let fd: RawFd = stdout.as_raw_fd();
@@ -79,32 +74,141 @@ pub fn run(
result
}
/// Detect PulseAudio audio sources for capture.
struct AudioSources {
monitor: Option<String>, // desktop audio (speaker tap)
mic: Option<String>, // microphone
pulse_server: String, // PULSE_SERVER env for root
}
fn detect_audio_sources() -> AudioSources {
// When running as root (sudo for kmsgrab), we need the real user's PulseAudio
let real_uid = std::env::var("SUDO_UID")
.unwrap_or_else(|_| unsafe { libc::getuid() }.to_string());
let pulse_server = format!("unix:/run/user/{real_uid}/pulse/native");
let monitor = detect_monitor_source(&pulse_server);
let mic = detect_default_source(&pulse_server);
// Don't use mic if it's the same as monitor (some systems set monitor as default)
let mic = match (&monitor, &mic) {
(Some(m), Some(d)) if m == d => None,
_ => mic,
};
info!("Audio sources — monitor: {:?}, mic: {:?}", monitor, mic);
AudioSources { monitor, mic, pulse_server }
}
fn detect_monitor_source(pulse_server: &str) -> Option<String> {
let output = Command::new("pactl")
.arg("info")
.env("PULSE_SERVER", pulse_server)
.output()
.ok()?;
let stdout = String::from_utf8_lossy(&output.stdout);
for line in stdout.lines() {
if line.contains("Default Sink:") {
let sink = line.split(':').nth(1)?.trim();
return Some(format!("{sink}.monitor"));
}
}
None
}
fn detect_default_source(pulse_server: &str) -> Option<String> {
let output = Command::new("pactl")
.args(["get-default-source"])
.env("PULSE_SERVER", pulse_server)
.output()
.ok()?;
let source = String::from_utf8_lossy(&output.stdout).trim().to_string();
if source.is_empty() { None } else { Some(source) }
}
fn spawn_ffmpeg(cfg: &SubprocessConfig) -> Result<Child> {
let audio = detect_audio_sources();
let filter = format!(
"hwmap=derive_device=vaapi,scale_vaapi=w={}:h={}:format=nv12,fps={}",
cfg.width, cfg.height, cfg.fps,
);
let mut args: Vec<String> = vec![
// Hardware init
"-init_hw_device".into(), format!("drm=drm:{}", cfg.device),
"-init_hw_device".into(), "vaapi=va@drm".into(),
// Video input (kmsgrab)
"-thread_queue_size".into(), "64".into(),
"-device".into(), cfg.device.clone(),
"-f".into(), "kmsgrab".into(),
"-framerate".into(), cfg.fps.to_string(),
"-i".into(), "-".into(),
];
// Audio inputs
let has_monitor = audio.monitor.is_some();
let has_mic = audio.mic.is_some();
if let Some(ref monitor) = audio.monitor {
args.extend([
"-f".into(), "pulse".into(),
"-thread_queue_size".into(), "1024".into(),
"-i".into(), monitor.clone(),
]);
}
if let Some(ref mic) = audio.mic {
args.extend([
"-f".into(), "pulse".into(),
"-thread_queue_size".into(), "1024".into(),
"-i".into(), mic.clone(),
]);
}
// Audio filter: mix monitor + mic if both present
if has_monitor && has_mic {
args.extend([
"-filter_complex".into(),
"[1:a][2:a]amix=inputs=2:duration=longest[aout]".into(),
"-map".into(), "0:v".into(),
"-map".into(), "[aout]".into(),
]);
} else if has_monitor {
args.extend(["-map".into(), "0:v".into(), "-map".into(), "1:a".into()]);
}
// If no audio: no -map needed, only video output
// Video encoding
args.extend([
"-vf".into(), filter,
"-c:v".into(), "h264_vaapi".into(),
"-qp".into(), cfg.qp.to_string(),
"-g".into(), cfg.gop_size.to_string(),
"-bf".into(), "0".into(),
]);
// Audio encoding (if any audio source)
if has_monitor || has_mic {
args.extend([
"-c:a".into(), "aac".into(),
"-b:a".into(), "128k".into(),
]);
}
// Output
args.extend([
"-flush_packets".into(), "1".into(),
"-fflags".into(), "nobuffer".into(),
"-f".into(), "nut".into(),
"pipe:1".into(),
"-hide_banner".into(),
]);
info!("ffmpeg args: {:?}", args);
let child = Command::new("ffmpeg")
.args([
"-init_hw_device", &format!("drm=drm:{}", cfg.device),
"-init_hw_device", "vaapi=va@drm",
"-thread_queue_size", "64",
"-device", &cfg.device,
"-f", "kmsgrab",
"-framerate", &cfg.fps.to_string(),
"-i", "-",
"-vf", &filter,
"-c:v", "h264_vaapi",
"-qp", &cfg.qp.to_string(),
"-g", &cfg.gop_size.to_string(),
"-bf", "0",
"-flush_packets", "1",
"-fflags", "nobuffer",
"-f", "nut",
"pipe:1",
"-hide_banner",
])
.args(&args)
.env("PULSE_SERVER", &audio.pulse_server)
.stdin(Stdio::null())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
@@ -127,22 +231,34 @@ fn demux_and_send(
let mut input_ctx = ffmpeg::format::input(&pipe_url)
.context("open ffmpeg input from pipe")?;
// Find video stream
let video_stream = input_ctx
.streams()
.best(ffmpeg::media::Type::Video)
.context("no video stream in NUT output")?;
let video_idx = video_stream.index();
let video_tb = video_stream.time_base();
let video_tb_num = video_tb.numerator() as u32;
let video_tb_den = video_tb.denominator() as u32;
let stream_idx = video_stream.index();
let time_base = video_stream.time_base();
let tb_num = time_base.numerator() as u32;
let tb_den = time_base.denominator() as u32;
// Find audio stream (may not exist if no PulseAudio sources found)
let audio_info = input_ctx
.streams()
.best(ffmpeg::media::Type::Audio)
.map(|s| {
let tb = s.time_base();
(s.index(), tb.numerator() as u32, tb.denominator() as u32)
});
info!(
"Subprocess demux ready: stream_idx={}, time_base={}/{}",
stream_idx, tb_num, tb_den
);
if let Some((idx, num, den)) = audio_info {
info!("Demux: video_idx={video_idx} tb={video_tb_num}/{video_tb_den}, \
audio_idx={idx} tb={num}/{den}");
} else {
info!("Demux: video_idx={video_idx} tb={video_tb_num}/{video_tb_den}, no audio");
}
let mut packet_count = 0u64;
let mut video_count = 0u64;
let mut audio_count = 0u64;
for (stream, packet) in input_ctx.packets() {
if stop.load(Ordering::Relaxed) {
@@ -155,36 +271,52 @@ fn demux_and_send(
break;
}
if stream.index() != stream_idx {
continue;
}
let data = match packet.data() {
Some(d) => d.to_vec(),
None => continue,
};
let encoded = EncodedPacket {
data,
pts: packet.pts().unwrap_or(0),
dts: packet.dts().unwrap_or(0),
keyframe: packet.is_key(),
time_base_num: tb_num,
time_base_den: tb_den,
};
let stream_idx = stream.index();
packet_count += 1;
if packet_count % 300 == 1 {
info!("Subprocess: {packet_count} packets encoded");
}
if packet_tx.blocking_send(encoded).is_err() {
info!("Packet channel closed, stopping subprocess pipeline");
break;
if stream_idx == video_idx {
let encoded = EncodedPacket {
media_type: MediaType::Video,
data,
pts: packet.pts().unwrap_or(0),
dts: packet.dts().unwrap_or(0),
keyframe: packet.is_key(),
time_base_num: video_tb_num,
time_base_den: video_tb_den,
};
video_count += 1;
if video_count % 300 == 1 {
info!("Subprocess: {video_count} video, {audio_count} audio packets");
}
if packet_tx.blocking_send(encoded).is_err() {
info!("Packet channel closed");
break;
}
} else if let Some((audio_idx, audio_tb_num, audio_tb_den)) = audio_info {
if stream_idx == audio_idx {
let encoded = EncodedPacket {
media_type: MediaType::Audio,
data,
pts: packet.pts().unwrap_or(0),
dts: packet.dts().unwrap_or(0),
keyframe: packet.is_key(),
time_base_num: audio_tb_num,
time_base_den: audio_tb_den,
};
audio_count += 1;
if packet_tx.blocking_send(encoded).is_err() {
info!("Packet channel closed");
break;
}
}
}
}
info!("Subprocess pipeline stopped ({packet_count} packets)");
info!("Subprocess pipeline stopped ({video_count} video, {audio_count} audio packets)");
Ok(())
}
@@ -224,8 +356,15 @@ fn kill_child(child: &mut Child) {
child.kill().ok();
}
match child.wait() {
Ok(s) => info!("ffmpeg exited: {s}"),
Err(e) => warn!("ffmpeg wait error: {e}"),
// Wait up to 3 seconds, then SIGKILL.
for _ in 0..30 {
if child.try_wait().ok().flatten().is_some() {
info!("ffmpeg exited cleanly");
return;
}
std::thread::sleep(std::time::Duration::from_millis(100));
}
warn!("ffmpeg didn't exit after SIGINT, killing");
child.kill().ok();
let _ = child.wait();
}

View File

@@ -310,6 +310,7 @@ impl EncoderInner {
let mut encoded = ffmpeg::Packet::empty();
while self.encoder.receive_packet(&mut encoded).is_ok() {
packets.push(EncodedPacket {
media_type: MediaType::Video,
data: encoded.data().unwrap_or(&[]).to_vec(),
pts: encoded.pts().unwrap_or(0),
dts: encoded.dts().unwrap_or(0),
@@ -327,8 +328,16 @@ impl EncoderInner {
}
}
/// An encoded video packet ready for transport.
/// Type of media stream in an encoded packet.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MediaType {
Video,
Audio,
}
/// An encoded media packet ready for transport.
pub struct EncodedPacket {
pub media_type: MediaType,
pub data: Vec<u8>,
pub pts: i64,
pub dts: i64,

View File

@@ -1,3 +1,5 @@
use std::time::Duration;
use anyhow::Result;
use cht_common::protocol::{
self, AudioParams, ControlMessage, PacketHeader, PacketType, VideoParams, WirePacket,
@@ -5,14 +7,14 @@ use cht_common::protocol::{
};
use tokio::io::{AsyncWriteExt, BufWriter};
use tokio::net::TcpStream;
use tracing::info;
use tracing::{info, warn};
use cht_client::backends::Backend;
use cht_client::capture::CaptureConfig;
use cht_client::encoder::EncoderConfig;
use cht_client::encoder::{EncoderConfig, MediaType};
use cht_client::pipeline::Pipeline;
const DEFAULT_SERVER: &str = "mcrndeb:4444";
const DEFAULT_SERVER: &str = "mcrndeb:4447";
#[tokio::main]
async fn main() -> Result<()> {
@@ -35,9 +37,8 @@ async fn main() -> Result<()> {
Backend::Subprocess
};
info!("Connecting to {server_addr}...");
let stream = TcpStream::connect(&server_addr).await?;
info!("Connected");
// Wait for the server to become available.
let stream = wait_for_server(&server_addr).await?;
let mut writer = BufWriter::new(stream);
@@ -69,6 +70,7 @@ async fn main() -> Result<()> {
// Forward encoded packets to the server
let mut video_count = 0u64;
let mut audio_count = 0u64;
let mut keepalive_interval = tokio::time::interval(std::time::Duration::from_secs(5));
loop {
@@ -76,9 +78,13 @@ async fn main() -> Result<()> {
pkt = packet_rx.recv() => {
match pkt {
Some(encoded) => {
let pkt_type = match encoded.media_type {
MediaType::Video => PacketType::Video,
MediaType::Audio => PacketType::Audio,
};
let wire = WirePacket {
header: PacketHeader {
packet_type: PacketType::Video,
packet_type: pkt_type,
flags: if encoded.keyframe { FLAG_KEYFRAME } else { 0 },
length: encoded.data.len() as u32,
timestamp_ns: pts_to_ns(
@@ -90,11 +96,18 @@ async fn main() -> Result<()> {
payload: encoded.data,
};
protocol::write_packet(&mut writer, &wire).await?;
video_count += 1;
if video_count % 300 == 1 {
info!("Sent {video_count} video packets");
writer.flush().await?;
match encoded.media_type {
MediaType::Video => {
video_count += 1;
if video_count % 300 == 1 {
info!("Sent {video_count} video, {audio_count} audio packets");
writer.flush().await?;
}
}
MediaType::Audio => {
audio_count += 1;
}
}
}
None => {
@@ -115,17 +128,56 @@ async fn main() -> Result<()> {
}
}
pipeline.stop();
// Stop pipeline first (signals ffmpeg, joins thread).
// Give it a few seconds — if ffmpeg hangs, don't block forever.
info!("Stopping pipeline...");
let stop_handle = tokio::task::spawn_blocking(move || {
pipeline.stop();
});
let _ = tokio::time::timeout(Duration::from_secs(5), stop_handle).await;
let stop = ControlMessage::SessionStop;
protocol::write_packet(&mut writer, &stop.to_wire_packet()?).await?;
writer.flush().await?;
writer.shutdown().await?;
info!("Sent session_stop, {video_count} video packets total");
// Try to send SessionStop so the server closes cleanly.
let stop_msg = ControlMessage::SessionStop;
match tokio::time::timeout(
Duration::from_secs(2),
async {
protocol::write_packet(&mut writer, &stop_msg.to_wire_packet()?).await?;
writer.flush().await?;
writer.shutdown().await?;
Ok::<_, anyhow::Error>(())
}
).await {
Ok(Ok(())) => {}
Ok(Err(e)) => warn!("Error sending session_stop: {e}"),
Err(_) => warn!("Timeout sending session_stop"),
}
info!("Done — {video_count} video + {audio_count} audio packets");
Ok(())
}
async fn wait_for_server(addr: &str) -> Result<TcpStream> {
info!("Waiting for server at {addr}...");
let mut interval = tokio::time::interval(Duration::from_secs(2));
loop {
tokio::select! {
_ = interval.tick() => {}
_ = tokio::signal::ctrl_c() => {
anyhow::bail!("interrupted while waiting for server");
}
}
match TcpStream::connect(addr).await {
Ok(stream) => {
info!("Connected to {addr}");
return Ok(stream);
}
Err(e) => {
info!("Server not ready ({e}), retrying...");
}
}
}
}
fn pts_to_ns(pts: i64, tb_num: u32, tb_den: u32) -> u64 {
if tb_den == 0 {
return 0;
@@ -134,10 +186,23 @@ fn pts_to_ns(pts: i64, tb_num: u32, tb_den: u32) -> u64 {
}
fn session_id() -> String {
// Match Python's time.strftime("%Y%m%d_%H%M%S") format
use std::time::{SystemTime, UNIX_EPOCH};
let secs = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
format!("{secs}")
.as_secs() as libc::time_t;
let mut tm: libc::tm = unsafe { std::mem::zeroed() };
unsafe { libc::localtime_r(&secs, &mut tm) };
let mut buf = [0u8; 20];
let fmt = b"%Y%m%d_%H%M%S\0";
let len = unsafe {
libc::strftime(
buf.as_mut_ptr() as *mut libc::c_char,
buf.len(),
fmt.as_ptr() as *const libc::c_char,
&tm,
)
};
String::from_utf8_lossy(&buf[..len]).to_string()
}