refactor: cleanup rtmp setup

This commit is contained in:
2025-06-12 09:44:25 +01:00
parent 3a38b05630
commit ad20fbc052
16 changed files with 501 additions and 366 deletions

View File

@ -5,11 +5,13 @@ use log::info;
use std::path::PathBuf;
use std::sync::Arc;
use tokio::runtime::Handle;
use uuid::Uuid;
pub async fn listen(out_dir: String, path: PathBuf, overseer: Arc<dyn Overseer>) -> Result<()> {
info!("Sending file: {}", path.display());
let info = ConnectionInfo {
id: Uuid::new_v4(),
ip_addr: "127.0.0.1:6969".to_string(),
endpoint: "file-input".to_owned(),
app_name: "".to_string(),

View File

@ -1,10 +1,12 @@
use crate::overseer::Overseer;
use crate::pipeline::runner::PipelineRunner;
use log::{error, info};
use log::{error, info, warn};
use serde::{Deserialize, Serialize};
use std::io::Read;
use std::sync::Arc;
use std::time::Instant;
use tokio::runtime::Handle;
use uuid::Uuid;
pub mod file;
#[cfg(feature = "rtmp")]
@ -16,6 +18,9 @@ pub mod test;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ConnectionInfo {
/// Unique ID of this connection / pipeline
pub id: Uuid,
/// Endpoint of the ingress
pub endpoint: String,
@ -36,33 +41,103 @@ pub fn spawn_pipeline(
seer: Arc<dyn Overseer>,
reader: Box<dyn Read + Send>,
) {
info!("New client connected: {}", &info.ip_addr);
let seer = seer.clone();
let out_dir = out_dir.to_string();
std::thread::spawn(move || unsafe {
match PipelineRunner::new(handle, out_dir, seer, info, reader) {
Ok(mut pl) => loop {
match pl.run() {
Ok(c) => {
if !c {
if let Err(e) = pl.flush() {
error!("Pipeline flush failed: {}", e);
}
break;
}
}
Err(e) => {
if let Err(e) = pl.flush() {
error!("Pipeline flush failed: {}", e);
}
error!("Pipeline run failed: {}", e);
break;
}
}
},
match PipelineRunner::new(handle, out_dir, seer, info, reader, None) {
Ok(pl) => match run_pipeline(pl) {
Ok(_) => {}
Err(e) => {
error!("Failed to create PipelineRunner: {}", e);
error!("Failed to run PipelineRunner: {}", e);
}
},
Err(e) => {
error!("Failed to create PipelineRunner: {}", e);
}
});
}
}
pub fn run_pipeline(mut pl: PipelineRunner) -> anyhow::Result<()> {
info!("New client connected: {}", &pl.connection.ip_addr);
std::thread::Builder::new()
.name(format!("pipeline-{}", pl.connection.id))
.spawn(move || {
pl.run();
})?;
Ok(())
}
/// Common buffered reader functionality for ingress sources
pub struct BufferedReader {
pub buf: Vec<u8>,
pub max_buffer_size: usize,
pub last_buffer_log: Instant,
pub bytes_processed: u64,
pub packets_received: u64,
pub source_name: &'static str,
}
impl BufferedReader {
pub fn new(capacity: usize, max_size: usize, source_name: &'static str) -> Self {
Self {
buf: Vec::with_capacity(capacity),
max_buffer_size: max_size,
last_buffer_log: Instant::now(),
bytes_processed: 0,
packets_received: 0,
source_name,
}
}
/// Add data to buffer with size limit and performance tracking
pub fn add_data(&mut self, data: &[u8]) {
// Inline buffer management to avoid borrow issues
if self.buf.len() + data.len() > self.max_buffer_size {
let bytes_to_drop = (self.buf.len() + data.len()) - self.max_buffer_size;
warn!(
"{} buffer full ({} bytes), dropping {} oldest bytes",
self.source_name,
self.buf.len(),
bytes_to_drop
);
self.buf.drain(..bytes_to_drop);
}
self.buf.extend(data);
// Update performance counters
self.bytes_processed += data.len() as u64;
self.packets_received += 1;
// Log buffer status every 5 seconds
if self.last_buffer_log.elapsed().as_secs() >= 5 {
let buffer_util = (self.buf.len() as f32 / self.max_buffer_size as f32) * 100.0;
let elapsed = self.last_buffer_log.elapsed();
let mbps = (self.bytes_processed as f64 * 8.0) / (elapsed.as_secs_f64() * 1_000_000.0);
let pps = self.packets_received as f64 / elapsed.as_secs_f64();
info!(
"{} ingress: {:.1} Mbps, {:.1} packets/sec, buffer: {}% ({}/{} bytes)",
self.source_name,
mbps,
pps,
buffer_util as u32,
self.buf.len(),
self.max_buffer_size
);
// Reset counters
self.last_buffer_log = Instant::now();
self.bytes_processed = 0;
self.packets_received = 0;
}
}
/// Read data from buffer, filling the entire output buffer before returning
pub fn read_buffered(&mut self, buf: &mut [u8]) -> usize {
if self.buf.len() >= buf.len() {
let drain = self.buf.drain(..buf.len());
buf.copy_from_slice(drain.as_slice());
buf.len()
} else {
0
}
}
}

View File

@ -1,111 +1,77 @@
use crate::ingress::{spawn_pipeline, ConnectionInfo};
use crate::ingress::{BufferedReader, ConnectionInfo};
use crate::overseer::Overseer;
use crate::pipeline::runner::PipelineRunner;
use anyhow::{bail, Result};
use log::{error, info, warn};
use log::{error, info};
use rml_rtmp::handshake::{Handshake, HandshakeProcessResult, PeerType};
use rml_rtmp::sessions::{
ServerSession, ServerSessionConfig, ServerSessionEvent, ServerSessionResult,
};
use std::collections::VecDeque;
use std::io::{ErrorKind, Read, Write};
use std::net::TcpStream;
use std::sync::Arc;
use std::time::Duration;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::net::TcpListener;
use tokio::runtime::Handle;
use tokio::time::Instant;
use uuid::Uuid;
const MAX_MEDIA_BUFFER_SIZE: usize = 10 * 1024 * 1024; // 10MB limit
#[derive(PartialEq, Eq, Clone, Hash)]
struct RtmpPublishedStream(String, String);
struct RtmpClient {
socket: std::net::TcpStream,
media_buf: Vec<u8>,
socket: TcpStream,
buffer: BufferedReader,
session: ServerSession,
msg_queue: VecDeque<ServerSessionResult>,
reader_buf: [u8; 4096],
pub published_stream: Option<RtmpPublishedStream>,
last_buffer_log: Instant,
bytes_processed: u64,
frames_received: u64,
}
impl RtmpClient {
/// Add data to media buffer with size limit to prevent unbounded growth
fn add_to_media_buffer(&mut self, data: &[u8]) {
if self.media_buf.len() + data.len() > MAX_MEDIA_BUFFER_SIZE {
let bytes_to_drop = (self.media_buf.len() + data.len()) - MAX_MEDIA_BUFFER_SIZE;
warn!("RTMP buffer full ({} bytes), dropping {} oldest bytes",
self.media_buf.len(), bytes_to_drop);
self.media_buf.drain(..bytes_to_drop);
}
self.media_buf.extend(data);
// Update performance counters
self.bytes_processed += data.len() as u64;
self.frames_received += 1;
// Log buffer status every 5 seconds
if self.last_buffer_log.elapsed().as_secs() >= 5 {
let buffer_util = (self.media_buf.len() as f32 / MAX_MEDIA_BUFFER_SIZE as f32) * 100.0;
let elapsed = self.last_buffer_log.elapsed();
let mbps = (self.bytes_processed as f64 * 8.0) / (elapsed.as_secs_f64() * 1_000_000.0);
let fps = self.frames_received as f64 / elapsed.as_secs_f64();
info!(
"RTMP ingress: {:.1} Mbps, {:.1} frames/sec, buffer: {}% ({}/{} bytes)",
mbps, fps, buffer_util as u32, self.media_buf.len(), MAX_MEDIA_BUFFER_SIZE
);
// Reset counters
self.last_buffer_log = Instant::now();
self.bytes_processed = 0;
self.frames_received = 0;
}
pub fn new(socket: TcpStream) -> Result<Self> {
socket.set_nonblocking(false)?;
let cfg = ServerSessionConfig::new();
let (ses, res) = ServerSession::new(cfg)?;
Ok(Self {
socket,
session: ses,
buffer: BufferedReader::new(1024 * 1024, MAX_MEDIA_BUFFER_SIZE, "RTMP"),
msg_queue: VecDeque::from(res),
reader_buf: [0; 4096],
published_stream: None,
})
}
async fn start(mut socket: TcpStream) -> Result<Self> {
pub fn handshake(&mut self) -> Result<()> {
let mut hs = Handshake::new(PeerType::Server);
let exchange = hs.generate_outbound_p0_and_p1()?;
socket.write_all(&exchange).await?;
self.socket.write_all(&exchange)?;
let mut buf = [0; 4096];
loop {
let r = socket.read(&mut buf).await?;
let r = self.socket.read(&mut buf)?;
if r == 0 {
bail!("EOF reached while reading");
}
match hs.process_bytes(&buf[..r])? {
HandshakeProcessResult::InProgress { response_bytes } => {
socket.write_all(&response_bytes).await?;
self.socket.write_all(&response_bytes)?;
}
HandshakeProcessResult::Completed {
response_bytes,
remaining_bytes,
} => {
socket.write_all(&response_bytes).await?;
self.socket.write_all(&response_bytes)?;
let cfg = ServerSessionConfig::new();
let (mut ses, mut res) = ServerSession::new(cfg)?;
let q = ses.handle_input(&remaining_bytes)?;
res.extend(q);
let ret = Self {
socket: socket.into_std()?,
media_buf: vec![],
session: ses,
msg_queue: VecDeque::from(res),
reader_buf: [0; 4096],
published_stream: None,
last_buffer_log: Instant::now(),
bytes_processed: 0,
frames_received: 0,
};
return Ok(ret);
let q = self.session.handle_input(&remaining_bytes)?;
self.msg_queue.extend(q);
return Ok(());
}
}
}
@ -154,12 +120,8 @@ impl RtmpClient {
}
ServerSessionResult::RaisedEvent(ev) => self.handle_event(ev)?,
ServerSessionResult::UnhandleableMessageReceived(m) => {
// Log unhandleable messages for debugging
// Log unhandleable messages for debugging
error!("Received unhandleable message with {} bytes", m.data.len());
// Only append data if it looks like valid media data
if !m.data.is_empty() && m.data.len() > 4 {
self.add_to_media_buffer(&m.data);
}
}
}
}
@ -209,20 +171,10 @@ impl RtmpClient {
);
}
ServerSessionEvent::AudioDataReceived { data, .. } => {
// Validate audio data before adding to buffer
if !data.is_empty() {
self.add_to_media_buffer(&data);
} else {
error!("Received empty audio data");
}
self.buffer.add_data(&data);
}
ServerSessionEvent::VideoDataReceived { data, .. } => {
// Validate video data before adding to buffer
if !data.is_empty() {
self.add_to_media_buffer(&data);
} else {
error!("Received empty video data");
}
self.buffer.add_data(&data);
}
ServerSessionEvent::UnhandleableAmf0Command { .. } => {}
ServerSessionEvent::PlayStreamRequested { request_id, .. } => {
@ -241,18 +193,15 @@ impl RtmpClient {
impl Read for RtmpClient {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
// block this thread until something comes into [media_buf]
while self.media_buf.is_empty() {
// Block until we have enough data to fill the buffer
while self.buffer.buf.len() < buf.len() {
if let Err(e) = self.read_data() {
error!("Error reading data: {}", e);
return Ok(0);
};
}
let to_read = buf.len().min(self.media_buf.len());
let drain = self.media_buf.drain(..to_read);
buf[..to_read].copy_from_slice(drain.as_slice());
Ok(to_read)
Ok(self.buffer.read_buffered(buf))
}
}
@ -261,7 +210,7 @@ pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>)
info!("RTMP listening on: {}", &addr);
while let Ok((socket, ip)) = listener.accept().await {
let mut cc = RtmpClient::start(socket).await?;
let mut cc = RtmpClient::new(socket.into_std()?)?;
let addr = addr.clone();
let overseer = overseer.clone();
let out_dir = out_dir.clone();
@ -269,24 +218,36 @@ pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>)
std::thread::Builder::new()
.name("rtmp-client".to_string())
.spawn(move || {
if let Err(e) = cc.read_until_publish_request(Duration::from_secs(10)) {
error!("{}", e);
} else {
let pr = cc.published_stream.as_ref().unwrap();
let info = ConnectionInfo {
ip_addr: ip.to_string(),
endpoint: addr.clone(),
app_name: pr.0.clone(),
key: pr.1.clone(),
};
spawn_pipeline(
handle,
info,
out_dir.clone(),
overseer.clone(),
Box::new(cc),
);
if let Err(e) = cc.handshake() {
bail!("Error during handshake: {}", e)
}
if let Err(e) = cc.read_until_publish_request(Duration::from_secs(10)) {
bail!("Error waiting for publish request: {}", e)
}
let pr = cc.published_stream.as_ref().unwrap();
let info = ConnectionInfo {
id: Uuid::new_v4(),
ip_addr: ip.to_string(),
endpoint: addr.clone(),
app_name: pr.0.clone(),
key: pr.1.clone(),
};
let mut pl = match PipelineRunner::new(
handle,
out_dir,
overseer,
info,
Box::new(cc),
Some("flv".to_string()),
) {
Ok(pl) => pl,
Err(e) => {
bail!("Failed to create PipelineRunner {}", e)
}
};
pl.run();
Ok(())
})?;
}
Ok(())

View File

@ -1,15 +1,15 @@
use crate::ingress::{spawn_pipeline, ConnectionInfo};
use crate::ingress::{spawn_pipeline, BufferedReader, ConnectionInfo};
use crate::overseer::Overseer;
use anyhow::Result;
use futures_util::stream::FusedStream;
use futures_util::StreamExt;
use log::{info, warn};
use log::info;
use srt_tokio::{SrtListener, SrtSocket};
use std::io::Read;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Instant;
use tokio::runtime::Handle;
use uuid::Uuid;
const MAX_SRT_BUFFER_SIZE: usize = 10 * 1024 * 1024; // 10MB limit
@ -21,6 +21,7 @@ pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>)
while let Some(request) = packets.incoming().next().await {
let socket = request.accept(None).await?;
let info = ConnectionInfo {
id: Uuid::new_v4(),
endpoint: addr.clone(),
ip_addr: socket.settings().remote.to_string(),
app_name: "".to_string(),
@ -38,10 +39,7 @@ pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>)
Box::new(SrtReader {
handle: Handle::current(),
socket,
buf: Vec::with_capacity(4096),
last_buffer_log: Instant::now(),
bytes_processed: 0,
packets_received: 0,
buffer: BufferedReader::new(4096, MAX_SRT_BUFFER_SIZE, "SRT"),
}),
);
}
@ -51,56 +49,21 @@ pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>)
struct SrtReader {
pub handle: Handle,
pub socket: SrtSocket,
pub buf: Vec<u8>,
last_buffer_log: Instant,
bytes_processed: u64,
packets_received: u64,
pub buffer: BufferedReader,
}
impl Read for SrtReader {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let (mut rx, _) = self.socket.split_mut();
while self.buf.len() < buf.len() {
while self.buffer.buf.len() < buf.len() {
if rx.is_terminated() {
return Ok(0);
}
if let Some((_, data)) = self.handle.block_on(rx.next()) {
let data_slice = data.iter().as_slice();
// Inline buffer management to avoid borrow issues
if self.buf.len() + data_slice.len() > MAX_SRT_BUFFER_SIZE {
let bytes_to_drop = (self.buf.len() + data_slice.len()) - MAX_SRT_BUFFER_SIZE;
warn!("SRT buffer full ({} bytes), dropping {} oldest bytes",
self.buf.len(), bytes_to_drop);
self.buf.drain(..bytes_to_drop);
}
self.buf.extend(data_slice);
// Update performance counters
self.bytes_processed += data_slice.len() as u64;
self.packets_received += 1;
// Log buffer status every 5 seconds
if self.last_buffer_log.elapsed().as_secs() >= 5 {
let buffer_util = (self.buf.len() as f32 / MAX_SRT_BUFFER_SIZE as f32) * 100.0;
let elapsed = self.last_buffer_log.elapsed();
let mbps = (self.bytes_processed as f64 * 8.0) / (elapsed.as_secs_f64() * 1_000_000.0);
let pps = self.packets_received as f64 / elapsed.as_secs_f64();
info!(
"SRT ingress: {:.1} Mbps, {:.1} packets/sec, buffer: {}% ({}/{} bytes)",
mbps, pps, buffer_util as u32, self.buf.len(), MAX_SRT_BUFFER_SIZE
);
// Reset counters
self.last_buffer_log = Instant::now();
self.bytes_processed = 0;
self.packets_received = 0;
}
self.buffer.add_data(data_slice);
}
}
let drain = self.buf.drain(..buf.len());
buf.copy_from_slice(drain.as_slice());
Ok(buf.len())
Ok(self.buffer.read_buffered(buf))
}
}

View File

@ -5,6 +5,7 @@ use log::info;
use std::sync::Arc;
use tokio::net::TcpListener;
use tokio::runtime::Handle;
use uuid::Uuid;
pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>) -> Result<()> {
let listener = TcpListener::bind(&addr).await?;
@ -12,6 +13,7 @@ pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>)
info!("TCP listening on: {}", &addr);
while let Ok((socket, ip)) = listener.accept().await {
let info = ConnectionInfo {
id: Uuid::new_v4(),
ip_addr: ip.to_string(),
endpoint: addr.clone(),
app_name: "".to_string(),

View File

@ -11,13 +11,20 @@ use ringbuf::traits::{Observer, Split};
use ringbuf::{HeapCons, HeapRb};
use std::io::Read;
use std::sync::Arc;
use std::time::Duration;
use tiny_skia::Pixmap;
use tokio::runtime::Handle;
use uuid::Uuid;
pub async fn listen(out_dir: String, overseer: Arc<dyn Overseer>) -> Result<()> {
info!("Test pattern enabled");
// add a delay, there is a race condition somewhere, the test pattern doesnt always
// get added to active_streams
tokio::time::sleep(Duration::from_secs(1)).await;
let info = ConnectionInfo {
id: Uuid::new_v4(),
endpoint: "test-pattern".to_string(),
ip_addr: "test-pattern".to_string(),
app_name: "".to_string(),