refactor: frame gen

This commit is contained in:
2025-06-09 13:08:03 +01:00
parent e400e969fd
commit 5d7da09801
15 changed files with 865 additions and 775 deletions

View File

@ -4,18 +4,9 @@ version = "0.1.0"
edition = "2021"
[features]
default = ["test-pattern", "srt", "rtmp"]
default = ["srt", "rtmp"]
srt = ["dep:srt-tokio"]
rtmp = ["dep:rml_rtmp"]
local-overseer = [] # WIP
webhook-overseer = [] # WIP
test-pattern = [
"dep:resvg",
"dep:usvg",
"dep:tiny-skia",
"dep:fontdue",
"dep:ringbuf",
]
[dependencies]
ffmpeg-rs-raw.workspace = true
@ -27,20 +18,20 @@ uuid.workspace = true
serde.workspace = true
hex.workspace = true
itertools.workspace = true
futures-util = "0.3.30"
m3u8-rs.workspace = true
sha2.workspace = true
data-encoding.workspace = true
futures-util = "0.3.30"
resvg = "0.45.1"
usvg = "0.45.1"
tiny-skia = "0.11.4"
fontdue = "0.9.2"
ringbuf = "0.4.7"
# srt
srt-tokio = { version = "0.4.3", optional = true }
# rtmp
rml_rtmp = { version = "0.8.0", optional = true }
# test-pattern
resvg = { version = "0.44.0", optional = true }
usvg = { version = "0.44.0", optional = true }
tiny-skia = { version = "0.11.4", optional = true }
fontdue = { version = "0.9.2", optional = true }
ringbuf = { version = "0.4.7", optional = true }
libc = "0.2.169"

View File

@ -0,0 +1,431 @@
use crate::overseer::IngressStream;
use anyhow::{bail, Result};
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVColorSpace::AVCOL_SPC_RGB;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_RGBA;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVSampleFormat::AV_SAMPLE_FMT_FLTP;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
av_channel_layout_default, av_frame_alloc, av_frame_free, av_frame_get_buffer, AVFrame,
AVPixelFormat, AVRational,
};
use ffmpeg_rs_raw::Scaler;
use fontdue::layout::{CoordinateSystem, Layout, TextStyle};
use fontdue::Font;
use std::mem::transmute;
use std::time::{Duration, Instant};
use std::{ptr, slice};
/// Frame generator
pub struct FrameGenerator {
fps: f32,
width: u16,
height: u16,
video_sample_fmt: AVPixelFormat,
audio_sample_rate: u32,
audio_frame_size: i32,
audio_channels: u8,
frame_idx: u64,
audio_samples: u64,
// internal
next_frame: *mut AVFrame,
scaler: Scaler,
font: Font,
start: Instant,
}
impl Drop for FrameGenerator {
fn drop(&mut self) {
unsafe {
if !self.next_frame.is_null() {
av_frame_free(&mut self.next_frame);
self.next_frame = std::ptr::null_mut();
}
}
}
}
impl FrameGenerator {
pub fn new(
fps: f32,
width: u16,
height: u16,
pix_fmt: AVPixelFormat,
sample_rate: u32,
frame_size: i32,
channels: u8,
) -> Result<Self> {
let font = include_bytes!("../SourceCodePro-Regular.ttf") as &[u8];
let font = Font::from_bytes(font, Default::default()).unwrap();
Ok(Self {
fps,
width,
height,
video_sample_fmt: pix_fmt,
audio_sample_rate: sample_rate,
audio_frame_size: frame_size,
audio_channels: channels,
frame_idx: 0,
audio_samples: 0,
font,
start: Instant::now(),
scaler: Scaler::default(),
next_frame: ptr::null_mut(),
})
}
pub fn from_stream(
video_stream: &IngressStream,
audio_stream: Option<&IngressStream>,
) -> Result<Self> {
Ok(Self::new(
video_stream.fps,
video_stream.width as _,
video_stream.height as _,
unsafe { transmute(video_stream.format as i32) },
audio_stream.map(|i| i.sample_rate as _).unwrap_or(0),
if audio_stream.is_none() { 0 } else { 1024 },
audio_stream.map(|i| i.channels as _).unwrap_or(0),
)?)
}
pub fn frame_no(&self) -> u64 {
self.frame_idx
}
/// Create a new frame for composing text / images
pub fn begin(&mut self) -> Result<()> {
if self.next_frame.is_null() {
unsafe {
let mut src_frame = av_frame_alloc();
if src_frame.is_null() {
bail!("Failed to allocate placeholder video frame");
}
(*src_frame).width = self.width as _;
(*src_frame).height = self.height as _;
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
(*src_frame).key_frame = 1;
(*src_frame).colorspace = AVCOL_SPC_RGB;
//internally always use RGBA, we convert frame to target pixel format at the end
(*src_frame).format = AV_PIX_FMT_RGBA as _;
(*src_frame).pts = self.frame_idx as _;
(*src_frame).duration = 1;
(*src_frame).time_base = AVRational {
num: 1,
den: self.fps as i32,
};
if av_frame_get_buffer(src_frame, 0) < 0 {
av_frame_free(&mut src_frame);
bail!("Failed to get frame buffer");
}
self.next_frame = src_frame;
}
}
Ok(())
}
/// Write some text into the next frame
pub fn write_text(&mut self, msg: &str, size: f32, x: f32, y: f32) -> Result<()> {
if self.next_frame.is_null() {
bail!("Must call begin() before writing text")
}
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
layout.append(&[&self.font], &TextStyle::new(msg, size, 0));
self.write_layout(layout, x, y)?;
Ok(())
}
/// Write text layout into frame
fn write_layout(&mut self, layout: Layout, x: f32, y: f32) -> Result<()> {
for g in layout.glyphs() {
let (metrics, bitmap) = self.font.rasterize_config_subpixel(g.key);
for y1 in 0..metrics.height {
for x1 in 0..metrics.width {
let dst_x = x as usize + x1 + g.x as usize;
let dst_y = y as usize + y1 + g.y as usize;
let offset_src = (x1 + y1 * metrics.width) * 3;
unsafe {
let offset_dst =
4 * dst_x + dst_y * (*self.next_frame).linesize[0] as usize;
let pixel_dst = (*self.next_frame).data[0].add(offset_dst);
*pixel_dst.offset(0) = bitmap[offset_src];
*pixel_dst.offset(1) = bitmap[offset_src + 1];
*pixel_dst.offset(2) = bitmap[offset_src + 2];
}
}
}
}
Ok(())
}
/// Copy data directly into the frame buffer (must be RGBA data)
pub unsafe fn copy_frame_data(&mut self, data: &[u8]) -> Result<()> {
if self.next_frame.is_null() {
bail!("Must call begin() before writing frame data")
}
let buf = slice::from_raw_parts_mut(
(*self.next_frame).data[0],
(self.width as usize * self.height as usize * 4) as usize,
);
if buf.len() < data.len() {
bail!("Frame buffer is too small");
}
buf.copy_from_slice(data);
Ok(())
}
/// Generate audio to stay synchronized with video frames
unsafe fn generate_audio_frame(&mut self) -> Result<*mut AVFrame> {
const FREQUENCY: f32 = 440.0; // A4 note
// audio is disabled if sample rate is 0
if self.audio_sample_rate == 0 {
return Ok(ptr::null_mut());
}
// Calculate how many audio samples we need to cover the next video frame
let samples_per_frame = (self.audio_sample_rate as f32 / self.fps) as u64;
let next_frame_needs_samples = (self.frame_idx + 1) * samples_per_frame;
// Generate audio if we don't have enough to cover the next video frame
if self.audio_samples < next_frame_needs_samples {
let audio_frame = av_frame_alloc();
(*audio_frame).format = AV_SAMPLE_FMT_FLTP as _;
(*audio_frame).nb_samples = self.audio_frame_size as _;
(*audio_frame).duration = self.audio_frame_size as _;
(*audio_frame).sample_rate = self.audio_sample_rate as _;
(*audio_frame).pts = self.audio_samples as _;
(*audio_frame).time_base = AVRational {
num: 1,
den: self.audio_sample_rate as _,
};
av_channel_layout_default(&mut (*audio_frame).ch_layout, self.audio_channels as _);
av_frame_get_buffer(audio_frame, 0);
// Generate sine wave samples
let data = (*audio_frame).data[0] as *mut f32;
for i in 0..self.audio_frame_size {
let sample_time =
(self.audio_samples + i as u64) as f32 / self.audio_sample_rate as f32;
let sample_value =
(2.0 * std::f32::consts::PI * FREQUENCY * sample_time).sin() * 0.5;
*data.add(i as _) = sample_value;
}
self.audio_samples += self.audio_frame_size as u64;
return Ok(audio_frame);
}
Ok(ptr::null_mut())
}
/// Return the next frame for encoding (blocking)
pub unsafe fn next(&mut self) -> Result<*mut AVFrame> {
// set start time to now if this is the first call to next()
if self.frame_idx == 0 {
self.start = Instant::now();
}
// try to get audio frames before video frames (non-blocking)
let audio_frame = self.generate_audio_frame()?;
if !audio_frame.is_null() {
return Ok(audio_frame);
}
// auto-init frame
if self.next_frame.is_null() {
self.begin()?;
}
let stream_time = Duration::from_secs_f64(self.frame_idx as f64 / self.fps as f64);
let real_time = Instant::now().duration_since(self.start);
let wait_time = if stream_time > real_time {
stream_time - real_time
} else {
Duration::new(0, 0)
};
if !wait_time.is_zero() && wait_time.as_secs_f32() > 1f32 / self.fps {
std::thread::sleep(wait_time);
}
// convert to output pixel format, or just return internal frame if it matches output
if self.video_sample_fmt != transmute((*self.next_frame).format) {
let out_frame = self.scaler.process_frame(
self.next_frame,
self.width,
self.height,
self.video_sample_fmt,
)?;
av_frame_free(&mut self.next_frame);
self.next_frame = ptr::null_mut();
self.frame_idx += 1;
Ok(out_frame)
} else {
let ret = self.next_frame;
self.next_frame = ptr::null_mut();
self.frame_idx += 1;
Ok(ret)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
#[test]
fn test_frame_timing_synchronization() {
unsafe {
let fps = 30.0;
let sample_rate = 44100;
let frame_size = 1024;
let channels = 2;
let mut gen = FrameGenerator::new(
fps,
1280,
720,
AV_PIX_FMT_YUV420P,
sample_rate,
frame_size,
channels,
)
.unwrap();
let samples_per_frame = sample_rate as f64 / fps as f64; // Expected: 1470 samples per frame
println!("Expected samples per video frame: {:.2}", samples_per_frame);
let mut audio_frames = 0;
let mut video_frames = 0;
let mut total_audio_samples = 0;
// Generate frames for 2 seconds (60 video frames at 30fps)
for i in 0..120 {
let mut frame = gen.next().unwrap();
if (*frame).sample_rate > 0 {
// Audio frame
audio_frames += 1;
total_audio_samples += (*frame).nb_samples as u64;
println!(
"Frame {}: AUDIO - PTS: {}, samples: {}, total_samples: {}",
i,
(*frame).pts,
(*frame).nb_samples,
total_audio_samples
);
} else {
// Video frame
video_frames += 1;
let expected_audio_samples = (video_frames as f64 * samples_per_frame) as u64;
let audio_deficit = if total_audio_samples >= expected_audio_samples {
0
} else {
expected_audio_samples - total_audio_samples
};
println!("Frame {}: VIDEO - PTS: {}, frame_idx: {}, expected_audio: {}, actual_audio: {}, deficit: {}",
i, (*frame).pts, video_frames, expected_audio_samples, total_audio_samples, audio_deficit);
// Verify we have enough audio for this video frame
assert!(
total_audio_samples >= expected_audio_samples,
"Video frame {} needs {} audio samples but only have {}",
video_frames,
expected_audio_samples,
total_audio_samples
);
}
av_frame_free(&mut frame);
}
println!("\nSummary:");
println!("Video frames: {}", video_frames);
println!("Audio frames: {}", audio_frames);
println!("Total audio samples: {}", total_audio_samples);
println!(
"Expected audio samples for {} video frames: {:.2}",
video_frames,
video_frames as f64 * samples_per_frame
);
// Verify the ratio is correct
let expected_total_audio = video_frames as f64 * samples_per_frame;
let sample_accuracy = (total_audio_samples as f64 - expected_total_audio).abs();
println!("Sample accuracy (difference): {:.2}", sample_accuracy);
// Allow for some tolerance due to frame size constraints
assert!(
sample_accuracy < frame_size as f64,
"Audio sample count too far from expected: got {}, expected {:.2}, diff {:.2}",
total_audio_samples,
expected_total_audio,
sample_accuracy
);
}
}
#[test]
fn test_pts_progression() {
unsafe {
let fps = 30.0;
let sample_rate = 44100;
let mut gen =
FrameGenerator::new(fps, 1280, 720, AV_PIX_FMT_YUV420P, sample_rate, 1024, 2)
.unwrap();
let mut last_audio_pts = -1i64;
let mut last_video_pts = -1i64;
let mut audio_pts_gaps = Vec::new();
let mut video_pts_gaps = Vec::new();
// Generate 60 frames to test PTS progression
for _ in 0..60 {
let mut frame = gen.next().unwrap();
if (*frame).sample_rate > 0 {
// Audio frame - check PTS progression
if last_audio_pts >= 0 {
let gap = (*frame).pts - last_audio_pts;
audio_pts_gaps.push(gap);
println!("Audio PTS gap: {}", gap);
}
last_audio_pts = (*frame).pts;
} else {
// Video frame - check PTS progression
if last_video_pts >= 0 {
let gap = (*frame).pts - last_video_pts;
video_pts_gaps.push(gap);
println!("Video PTS gap: {}", gap);
}
last_video_pts = (*frame).pts;
}
av_frame_free(&mut frame);
}
// Verify audio PTS gaps are consistent (should be 1024 samples)
for gap in &audio_pts_gaps {
assert_eq!(
*gap, 1024,
"Audio PTS should increment by frame_size (1024)"
);
}
// Verify video PTS gaps are consistent (should be 1 frame)
for gap in &video_pts_gaps {
assert_eq!(*gap, 1, "Video PTS should increment by 1 frame");
}
println!("PTS progression test passed - all gaps are consistent");
}
}
}

View File

@ -12,7 +12,6 @@ pub mod rtmp;
#[cfg(feature = "srt")]
pub mod srt;
pub mod tcp;
#[cfg(feature = "test-pattern")]
pub mod test;
#[derive(Clone, Debug, Serialize, Deserialize)]

View File

@ -1,23 +1,16 @@
use crate::generator::FrameGenerator;
use crate::ingress::{spawn_pipeline, ConnectionInfo};
use crate::overseer::Overseer;
use anyhow::Result;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVColorSpace::AVCOL_SPC_RGB;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::{AV_PIX_FMT_RGBA, AV_PIX_FMT_YUV420P};
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVSampleFormat::AV_SAMPLE_FMT_FLTP;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
av_frame_alloc, av_frame_free, av_frame_get_buffer, av_packet_free, AVRational,
AV_PROFILE_H264_MAIN,
};
use ffmpeg_rs_raw::{Encoder, Muxer, Scaler};
use fontdue::layout::{CoordinateSystem, Layout, TextStyle};
use fontdue::Font;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{av_frame_free, av_packet_free, AV_PROFILE_H264_MAIN};
use ffmpeg_rs_raw::{Encoder, Muxer};
use log::info;
use ringbuf::traits::{Observer, Split};
use ringbuf::{HeapCons, HeapRb};
use std::io::Read;
use std::sync::Arc;
use std::time::{Duration, Instant};
use tiny_skia::Pixmap;
use tokio::runtime::Handle;
@ -42,33 +35,31 @@ pub async fn listen(out_dir: String, overseer: Arc<dyn Overseer>) -> Result<()>
}
struct TestPatternSrc {
gen: FrameGenerator,
video_encoder: Encoder,
audio_encoder: Encoder,
scaler: Scaler,
muxer: Muxer,
background: Pixmap,
font: [Font; 1],
frame_no: u64,
audio_sample_no: u64,
start: Instant,
muxer: Muxer,
reader: HeapCons<u8>,
}
unsafe impl Send for TestPatternSrc {}
const VIDEO_FPS: f32 = 30.0;
const VIDEO_WIDTH: u16 = 1280;
const VIDEO_HEIGHT: u16 = 720;
const SAMPLE_RATE: u32 = 44100;
impl TestPatternSrc {
pub fn new() -> Result<Self> {
let scaler = Scaler::new();
let video_encoder = unsafe {
Encoder::new_with_name("libx264")?
.with_stream_index(0)
.with_framerate(VIDEO_FPS)?
.with_bitrate(1_000_000)
.with_pix_fmt(AV_PIX_FMT_YUV420P)
.with_width(1280)
.with_height(720)
.with_width(VIDEO_WIDTH as _)
.with_height(VIDEO_HEIGHT as _)
.with_level(51)
.with_profile(AV_PROFILE_H264_MAIN)
.open(None)?
@ -80,22 +71,20 @@ impl TestPatternSrc {
.with_default_channel_layout(1)
.with_bitrate(128_000)
.with_sample_format(AV_SAMPLE_FMT_FLTP)
.with_sample_rate(44100)?
.with_sample_rate(SAMPLE_RATE as _)?
.open(None)?
};
let svg_data = include_bytes!("../../test.svg");
let tree = usvg::Tree::from_data(svg_data, &Default::default())?;
let mut pixmap = Pixmap::new(1280, 720).unwrap();
let mut pixmap = Pixmap::new(VIDEO_WIDTH as _, VIDEO_HEIGHT as _).unwrap();
let render_ts = tiny_skia::Transform::from_scale(
pixmap.width() as f32 / tree.size().width(),
pixmap.height() as f32 / tree.size().height(),
);
resvg::render(&tree, render_ts, &mut pixmap.as_mut());
let font = include_bytes!("../../SourceCodePro-Regular.ttf") as &[u8];
let font = Font::from_bytes(font, Default::default()).unwrap();
let buf = HeapRb::new(1024 * 1024);
let (writer, reader) = buf.split();
@ -109,140 +98,51 @@ impl TestPatternSrc {
m
};
let frame_size = unsafe { (*audio_encoder.codec_context()).frame_size as _ };
Ok(Self {
gen: FrameGenerator::new(
VIDEO_FPS,
VIDEO_WIDTH,
VIDEO_HEIGHT,
AV_PIX_FMT_YUV420P,
SAMPLE_RATE,
frame_size,
1,
)?,
video_encoder,
audio_encoder,
scaler,
muxer,
background: pixmap,
font: [font],
frame_no: 0,
audio_sample_no: 0,
start: Instant::now(),
reader,
})
}
pub unsafe fn next_pkt(&mut self) -> Result<()> {
let stream_time = Duration::from_secs_f64(self.frame_no as f64 / VIDEO_FPS as f64);
let real_time = Instant::now().duration_since(self.start);
let wait_time = if stream_time > real_time {
stream_time - real_time
} else {
Duration::new(0, 0)
};
if !wait_time.is_zero() && wait_time.as_secs_f32() > 1f32 / VIDEO_FPS {
std::thread::sleep(wait_time);
self.gen.begin()?;
self.gen.copy_frame_data(self.background.data())?;
self.gen
.write_text(&format!("frame={}", self.gen.frame_no()), 40.0, 5.0, 5.0)?;
let mut frame = self.gen.next()?;
if frame.is_null() {
return Ok(());
}
let mut src_frame = unsafe {
let src_frame = av_frame_alloc();
(*src_frame).width = 1280;
(*src_frame).height = 720;
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
(*src_frame).key_frame = 1;
(*src_frame).colorspace = AVCOL_SPC_RGB;
(*src_frame).format = AV_PIX_FMT_RGBA as _;
(*src_frame).pts = self.frame_no as i64;
(*src_frame).duration = 1;
av_frame_get_buffer(src_frame, 0);
self.background
.data()
.as_ptr()
.copy_to((*src_frame).data[0] as *mut _, 1280 * 720 * 4);
src_frame
};
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
layout.clear();
layout.append(
&self.font,
&TextStyle::new(&format!("frame={}", self.frame_no), 40.0, 0),
);
for g in layout.glyphs() {
let (metrics, bitmap) = self.font[0].rasterize_config_subpixel(g.key);
for y in 0..metrics.height {
for x in 0..metrics.width {
let dst_x = x + g.x as usize;
let dst_y = y + g.y as usize;
let offset_src = (x + y * metrics.width) * 3;
unsafe {
let offset_dst = 4 * dst_x + dst_y * (*src_frame).linesize[0] as usize;
let pixel_dst = (*src_frame).data[0].add(offset_dst);
*pixel_dst.offset(0) = bitmap[offset_src];
*pixel_dst.offset(1) = bitmap[offset_src + 1];
*pixel_dst.offset(2) = bitmap[offset_src + 2];
}
}
}
}
// scale/encode video
let mut frame = self
.scaler
.process_frame(src_frame, 1280, 720, AV_PIX_FMT_YUV420P)?;
for mut pkt in self.video_encoder.encode_frame(frame)? {
self.muxer.write_packet(pkt)?;
av_packet_free(&mut pkt);
}
av_frame_free(&mut frame);
av_frame_free(&mut src_frame);
// Generate and encode audio (sine wave)
self.generate_audio_frame()?;
self.frame_no += 1;
Ok(())
}
/// Generate audio to stay synchronized with video frames
unsafe fn generate_audio_frame(&mut self) -> Result<()> {
const SAMPLE_RATE: f32 = 44100.0;
const FREQUENCY: f32 = 440.0; // A4 note
const SAMPLES_PER_FRAME: usize = 1024; // Fixed AAC frame size
// Calculate how many audio samples we should have by now
// At 30fps, each video frame = 1/30 sec = 1470 audio samples at 44.1kHz
let audio_samples_per_video_frame = (SAMPLE_RATE / VIDEO_FPS) as u64; // ~1470 samples
let target_audio_samples = self.frame_no * audio_samples_per_video_frame;
// Generate audio frames to catch up to the target
while self.audio_sample_no < target_audio_samples {
let mut audio_frame = av_frame_alloc();
(*audio_frame).format = AV_SAMPLE_FMT_FLTP as _;
(*audio_frame).nb_samples = SAMPLES_PER_FRAME as _;
(*audio_frame).ch_layout.nb_channels = 1;
(*audio_frame).sample_rate = SAMPLE_RATE as _;
(*audio_frame).pts = self.audio_sample_no as i64;
(*audio_frame).duration = 1;
(*audio_frame).time_base = AVRational {
num: 1,
den: SAMPLE_RATE as _,
};
av_frame_get_buffer(audio_frame, 0);
// Generate sine wave samples
let data = (*audio_frame).data[0] as *mut f32;
for i in 0..SAMPLES_PER_FRAME {
let sample_time = (self.audio_sample_no + i as u64) as f32 / SAMPLE_RATE;
let sample_value =
(2.0 * std::f32::consts::PI * FREQUENCY * sample_time).sin() * 0.5;
*data.add(i) = sample_value;
}
// Encode audio frame
for mut pkt in self.audio_encoder.encode_frame(audio_frame)? {
// if sample_rate is set this frame is audio
if (*frame).sample_rate > 0 {
for mut pkt in self.audio_encoder.encode_frame(frame)? {
self.muxer.write_packet(pkt)?;
av_packet_free(&mut pkt);
}
} else {
for mut pkt in self.video_encoder.encode_frame(frame)? {
self.muxer.write_packet(pkt)?;
av_packet_free(&mut pkt);
}
self.audio_sample_no += SAMPLES_PER_FRAME as u64;
av_frame_free(&mut audio_frame);
}
av_frame_free(&mut frame);
Ok(())
}
}

View File

@ -5,3 +5,4 @@ pub mod overseer;
pub mod pipeline;
pub mod variant;
pub mod viewer;
mod generator;

View File

@ -8,12 +8,6 @@ use std::cmp::PartialEq;
use std::path::PathBuf;
use uuid::Uuid;
#[cfg(feature = "local-overseer")]
mod local;
#[cfg(feature = "webhook-overseer")]
mod webhook;
/// A copy of [ffmpeg_rs_raw::DemuxerInfo] without internal ptr
#[derive(PartialEq, Clone)]
pub struct IngressInfo {
@ -32,6 +26,7 @@ pub struct IngressStream {
pub height: usize,
pub fps: f32,
pub sample_rate: usize,
pub channels: u8,
pub language: String,
}

View File

@ -7,7 +7,6 @@ use serde::{Deserialize, Serialize};
use uuid::Uuid;
pub mod runner;
pub mod placeholder;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum EgressType {
@ -41,7 +40,7 @@ impl Display for EgressType {
}
}
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
#[derive(Clone)]
pub struct PipelineConfig {
pub id: Uuid,
/// Transcoded/Copied stream config
@ -49,7 +48,11 @@ pub struct PipelineConfig {
/// Output muxers
pub egress: Vec<EgressType>,
/// Source stream information for placeholder generation
pub ingress_info: Option<IngressInfo>,
pub ingress_info: IngressInfo,
/// Primary source video stream
pub video_src: usize,
/// Primary audio source stream
pub audio_src: Option<usize>,
}
impl Display for PipelineConfig {

View File

@ -1,188 +0,0 @@
use anyhow::{bail, Result};
use crate::variant::video::VideoVariant;
use crate::variant::audio::AudioVariant;
use crate::overseer::{IngressStream, IngressStreamType};
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
av_frame_alloc, av_frame_get_buffer, av_frame_free, av_get_sample_fmt, AVFrame,
AVPixelFormat, AVSampleFormat
};
use std::ffi::CString;
/// Placeholder frame generator for idle mode when stream disconnects
pub struct PlaceholderGenerator;
impl PlaceholderGenerator {
/// Generate a placeholder video frame based on ingress stream info
pub unsafe fn generate_video_frame_from_stream(
stream: &IngressStream,
stream_time_base: (i32, i32),
frame_index: u64
) -> Result<*mut AVFrame> {
let frame = av_frame_alloc();
if frame.is_null() {
bail!("Failed to allocate placeholder video frame");
}
(*frame).format = AVPixelFormat::AV_PIX_FMT_YUV420P as i32;
(*frame).width = stream.width as i32;
(*frame).height = stream.height as i32;
(*frame).time_base.num = stream_time_base.0;
(*frame).time_base.den = stream_time_base.1;
// Set PTS based on frame rate and total frame index
let fps = if stream.fps > 0.0 { stream.fps } else { 30.0 };
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
(*frame).pts = (frame_index as f64 / fps / time_base_f64) as i64;
if av_frame_get_buffer(frame, 0) < 0 {
av_frame_free(&mut frame);
bail!("Failed to allocate buffer for placeholder video frame");
}
// Fill with black (Y=16, U=V=128 for limited range YUV420P)
let y_size = ((*frame).width * (*frame).height) as usize;
let uv_size = y_size / 4;
if !(*frame).data[0].is_null() {
std::ptr::write_bytes((*frame).data[0], 16, y_size);
}
if !(*frame).data[1].is_null() {
std::ptr::write_bytes((*frame).data[1], 128, uv_size);
}
if !(*frame).data[2].is_null() {
std::ptr::write_bytes((*frame).data[2], 128, uv_size);
}
Ok(frame)
}
/// Generate a placeholder audio frame based on ingress stream info
pub unsafe fn generate_audio_frame_from_stream(
stream: &IngressStream,
stream_time_base: (i32, i32),
frame_index: u64,
sample_fmt: &str,
channels: u32
) -> Result<*mut AVFrame> {
let frame = av_frame_alloc();
if frame.is_null() {
bail!("Failed to allocate placeholder audio frame");
}
// Use the provided sample format
let sample_fmt_cstr = CString::new(sample_fmt)
.map_err(|_| anyhow::anyhow!("Invalid sample format string"))?;
let sample_fmt_int = av_get_sample_fmt(sample_fmt_cstr.as_ptr());
(*frame).format = sample_fmt_int;
(*frame).channels = channels as i32;
(*frame).sample_rate = stream.sample_rate as i32;
(*frame).nb_samples = 1024; // Standard audio frame size
(*frame).time_base.num = stream_time_base.0;
(*frame).time_base.den = stream_time_base.1;
// Set PTS based on sample rate and frame index
let samples_per_second = stream.sample_rate as f64;
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
(*frame).pts = ((frame_index * 1024) as f64 / samples_per_second / time_base_f64) as i64;
if av_frame_get_buffer(frame, 0) < 0 {
av_frame_free(&mut frame);
bail!("Failed to allocate buffer for placeholder audio frame");
}
// Fill with silence (zeros)
for i in 0..8 {
if !(*frame).data[i].is_null() && (*frame).linesize[i] > 0 {
std::ptr::write_bytes((*frame).data[i], 0, (*frame).linesize[i] as usize);
}
}
Ok(frame)
}
/// Generate a placeholder black video frame
pub unsafe fn generate_video_frame(
variant: &VideoVariant,
stream_time_base: (i32, i32),
frame_index: u64
) -> Result<*mut AVFrame> {
let frame = av_frame_alloc();
if frame.is_null() {
bail!("Failed to allocate placeholder video frame");
}
(*frame).format = AVPixelFormat::AV_PIX_FMT_YUV420P as i32;
(*frame).width = variant.width as i32;
(*frame).height = variant.height as i32;
(*frame).time_base.num = stream_time_base.0;
(*frame).time_base.den = stream_time_base.1;
// Set PTS based on frame rate and total frame index
let fps = if variant.fps > 0.0 { variant.fps } else { 30.0 };
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
(*frame).pts = (frame_index as f64 / fps / time_base_f64) as i64;
if av_frame_get_buffer(frame, 0) < 0 {
av_frame_free(&mut frame);
bail!("Failed to allocate buffer for placeholder video frame");
}
// Fill with black (Y=16, U=V=128 for limited range YUV420P)
let y_size = ((*frame).width * (*frame).height) as usize;
let uv_size = y_size / 4;
if !(*frame).data[0].is_null() {
std::ptr::write_bytes((*frame).data[0], 16, y_size);
}
if !(*frame).data[1].is_null() {
std::ptr::write_bytes((*frame).data[1], 128, uv_size);
}
if !(*frame).data[2].is_null() {
std::ptr::write_bytes((*frame).data[2], 128, uv_size);
}
Ok(frame)
}
/// Generate a placeholder silent audio frame
pub unsafe fn generate_audio_frame(
variant: &AudioVariant,
stream_time_base: (i32, i32),
frame_index: u64
) -> Result<*mut AVFrame> {
let frame = av_frame_alloc();
if frame.is_null() {
bail!("Failed to allocate placeholder audio frame");
}
// Use the sample format from the variant configuration
let sample_fmt_cstr = CString::new(variant.sample_fmt.as_str())
.map_err(|_| anyhow::anyhow!("Invalid sample format string"))?;
let sample_fmt_int = av_get_sample_fmt(sample_fmt_cstr.as_ptr());
(*frame).format = sample_fmt_int;
(*frame).channels = variant.channels as i32;
(*frame).sample_rate = variant.sample_rate as i32;
(*frame).nb_samples = 1024; // Standard audio frame size
(*frame).time_base.num = stream_time_base.0;
(*frame).time_base.den = stream_time_base.1;
// Set PTS based on sample rate and frame index
let samples_per_second = variant.sample_rate as f64;
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
(*frame).pts = ((frame_index * 1024) as f64 / samples_per_second / time_base_f64) as i64;
if av_frame_get_buffer(frame, 0) < 0 {
av_frame_free(&mut frame);
bail!("Failed to allocate buffer for placeholder audio frame");
}
// Fill with silence (zeros)
for i in 0..8 {
if !(*frame).data[i].is_null() && (*frame).linesize[i] > 0 {
std::ptr::write_bytes((*frame).data[i], 0, (*frame).linesize[i] as usize);
}
}
Ok(frame)
}
}

View File

@ -10,18 +10,19 @@ use std::time::{Duration, Instant};
use crate::egress::hls::HlsEgress;
use crate::egress::recorder::RecorderEgress;
use crate::egress::{Egress, EgressResult};
use crate::generator::FrameGenerator;
use crate::ingress::ConnectionInfo;
use crate::mux::SegmentType;
use crate::overseer::{IngressInfo, IngressStream, IngressStreamType, Overseer};
use crate::pipeline::{EgressType, PipelineConfig};
use crate::variant::{StreamMapping, VariantStream};
use crate::pipeline::placeholder::PlaceholderGenerator;
use anyhow::{bail, Result};
use anyhow::{bail, Context, Result};
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_WEBP;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
av_frame_free, av_get_sample_fmt, av_packet_free, av_q2d, av_rescale_q, AVMediaType,
av_frame_free, av_get_sample_fmt, av_packet_free, av_q2d, av_rescale_q, AVFrame, AVMediaType,
AVStream,
};
use ffmpeg_rs_raw::{
cstr, get_frame_from_hw, AudioFifo, Decoder, Demuxer, DemuxerInfo, Encoder, Resample, Scaler,
@ -32,15 +33,14 @@ use tokio::runtime::Handle;
use uuid::Uuid;
/// Runner state for handling normal vs idle modes
#[derive(Debug, Clone)]
pub enum RunnerState {
/// Normal operation - processing live stream
Normal,
/// Idle mode - generating placeholder content after disconnection
Idle {
start_time: Instant,
variant_index: usize,
last_frame_time: Option<Instant>,
gen: FrameGenerator,
},
}
@ -129,142 +129,131 @@ impl PipelineRunner {
})
}
/// Process a single idle frame - generates one source frame and processes it through all variants
unsafe fn process_single_idle_frame(&mut self, config: &PipelineConfig) -> Result<()> {
use std::time::{Duration, Instant};
if config.variants.is_empty() {
return Ok(());
}
/// process the frame in the pipeline
unsafe fn process_frame(
&mut self,
config: &PipelineConfig,
stream: *mut AVStream,
frame: *mut AVFrame,
) -> Result<Vec<EgressResult>> {
// Copy frame from GPU if using hwaccel decoding
let mut frame = get_frame_from_hw(frame)?;
(*frame).time_base = (*stream).time_base;
// Extract timing info from current state
let (mut last_frame_time, variant_index) = match &mut self.state {
RunnerState::Idle { last_frame_time, variant_index, .. } => (last_frame_time, variant_index),
_ => return Ok(()), // Only process in idle state
};
let p = (*stream).codecpar;
if (*p).codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO {
// Conditionally generate thumbnails based on interval (0 = disabled)
if self.thumb_interval > 0 && (self.frame_ctr % self.thumb_interval) == 0 {
let thumb_start = Instant::now();
let dst_pic = PathBuf::from(&self.out_dir)
.join(config.id.to_string())
.join("thumb.webp");
{
let mut sw = Scaler::new();
let mut scaled_frame = sw.process_frame(
frame,
(*frame).width as _,
(*frame).height as _,
AV_PIX_FMT_YUV420P,
)?;
// Time-based frame rate calculation
let now = Instant::now();
if let Some(last_time) = *last_frame_time {
// Calculate target frame interval (assume 30fps for now)
let target_interval = Duration::from_millis(33); // ~30fps
let elapsed = now.duration_since(last_time);
if elapsed < target_interval {
// Not time for next frame yet
std::thread::sleep(target_interval - elapsed);
let encoder = Encoder::new(AV_CODEC_ID_WEBP)?
.with_height((*scaled_frame).height)
.with_width((*scaled_frame).width)
.with_pix_fmt(transmute((*scaled_frame).format))
.open(None)?;
encoder.save_picture(scaled_frame, dst_pic.to_str().unwrap())?;
av_frame_free(&mut scaled_frame);
}
let thumb_duration = thumb_start.elapsed();
info!(
"Saved thumb ({:.2}ms) to: {}",
thumb_duration.as_millis() as f32 / 1000.0,
dst_pic.display(),
);
}
}
*last_frame_time = Some(Instant::now());
// Get source video stream info from stored ingress info
let video_stream = config.ingress_info.as_ref()
.and_then(|info| info.streams.iter().find(|s| matches!(s.stream_type, crate::overseer::IngressStreamType::Video)));
let mut egress_results = vec![];
// Generate one source frame and process it through all relevant variants
if let Some(stream) = video_stream {
// Generate a single source placeholder video frame based on original stream properties
let fps = if stream.fps > 0.0 { stream.fps } else { 30.0 };
let time_base = (1, fps as i32);
let mut source_frame = PlaceholderGenerator::generate_video_frame_from_stream(stream, time_base, self.frame_ctr)?;
// Set the frame time_base
(*source_frame).time_base.num = time_base.0;
(*source_frame).time_base.den = time_base.1;
// Increment frame counter for all video processing
self.frame_ctr += 1;
// Process this single frame through all video variants (like normal pipeline)
for variant in &config.variants {
if let VariantStream::Video(v) = variant {
// Scale/encode the source frame for this variant
if let Some(enc) = self.encoders.get_mut(&v.id()) {
// Use scaler if needed for different resolutions
let frame_to_encode = if v.width as i32 == (*source_frame).width &&
v.height as i32 == (*source_frame).height {
// Same resolution, use source frame directly
source_frame
} else {
// Different resolution, need to scale
if let Some(scaler) = self.scalers.get_mut(&v.id()) {
scaler.process_frame(source_frame, v.width, v.height, AV_PIX_FMT_YUV420P)?
} else {
source_frame // Fallback to source frame
}
};
let packets = enc.encode_frame(frame_to_encode)?;
for mut pkt in packets {
for eg in self.egress.iter_mut() {
let er = eg.process_pkt(pkt, &v.id())?;
egress_results.push(er);
}
av_packet_free(&mut pkt);
}
}
}
}
av_frame_free(&mut source_frame);
}
// Generate and process audio frames separately (audio doesn't share like video)
let audio_stream = config.ingress_info.as_ref()
.and_then(|info| info.streams.iter().find(|s| matches!(s.stream_type, crate::overseer::IngressStreamType::Audio)));
for variant in &config.variants {
if let VariantStream::Audio(a) = variant {
let time_base = (1, a.sample_rate as i32);
let mut frame = if let Some(stream) = audio_stream {
// Use original stream properties for placeholder generation
PlaceholderGenerator::generate_audio_frame_from_stream(stream, time_base, self.frame_ctr, &a.sample_fmt, a.channels)?
} else {
// Fallback to variant properties if no stream info available
PlaceholderGenerator::generate_audio_frame(a, time_base, self.frame_ctr)?
};
// Set the frame time_base
(*frame).time_base.num = time_base.0;
(*frame).time_base.den = time_base.1;
// Process through the encoding pipeline
if let Some(enc) = self.encoders.get_mut(&a.id()) {
let packets = enc.encode_frame(frame)?;
for mut pkt in packets {
for eg in self.egress.iter_mut() {
let er = eg.process_pkt(pkt, &a.id())?;
egress_results.push(er);
}
av_packet_free(&mut pkt);
let mut egress_results = Vec::new();
// Get the variants which want this pkt
let pkt_vars = config
.variants
.iter()
.filter(|v| v.src_index() == (*stream).index as usize);
for var in pkt_vars {
let enc = if let Some(enc) = self.encoders.get_mut(&var.id()) {
enc
} else {
//warn!("Frame had nowhere to go in {} :/", var.id());
continue;
};
// scaling / resampling
let mut new_frame = false;
let mut frame = match var {
VariantStream::Video(v) => {
if let Some(s) = self.scalers.get_mut(&v.id()) {
new_frame = true;
s.process_frame(frame, v.width, v.height, transmute(v.pixel_format))?
} else {
frame
}
}
VariantStream::Audio(a) => {
if let Some((r, f)) = self.resampler.get_mut(&a.id()) {
let frame_size = (*enc.codec_context()).frame_size;
new_frame = true;
let mut resampled_frame = r.process_frame(frame)?;
if let Some(ret) = f.buffer_frame(resampled_frame, frame_size as usize)? {
// Set correct timebase for audio (1/sample_rate)
(*ret).time_base.num = 1;
(*ret).time_base.den = a.sample_rate as i32;
av_frame_free(&mut resampled_frame);
ret
} else {
av_frame_free(&mut resampled_frame);
continue;
}
} else {
frame
}
}
_ => frame,
};
// before encoding frame, rescale timestamps
if !frame.is_null() {
let enc_ctx = enc.codec_context();
(*frame).pict_type = AV_PICTURE_TYPE_NONE;
(*frame).pts = av_rescale_q((*frame).pts, (*frame).time_base, (*enc_ctx).time_base);
(*frame).pkt_dts =
av_rescale_q((*frame).pkt_dts, (*frame).time_base, (*enc_ctx).time_base);
(*frame).duration =
av_rescale_q((*frame).duration, (*frame).time_base, (*enc_ctx).time_base);
(*frame).time_base = (*enc_ctx).time_base;
}
let packets = enc.encode_frame(frame)?;
// pass new packets to egress
for mut pkt in packets {
for eg in self.egress.iter_mut() {
let er = eg.process_pkt(pkt, &var.id())?;
egress_results.push(er);
}
av_packet_free(&mut pkt);
}
if new_frame {
av_frame_free(&mut frame);
}
}
// Handle egress results (same as normal processing)
if !egress_results.is_empty() {
self.handle.block_on(async {
for er in egress_results {
if let EgressResult::Segments { created, deleted } = er {
if let Err(e) = self
.overseer
.on_segments(&config.id, &created, &deleted)
.await
{
bail!("Failed to process segment {}", e.to_string());
}
}
}
Ok(())
})?;
}
Ok(())
av_frame_free(&mut frame);
Ok(egress_results)
}
/// EOF, cleanup
@ -297,23 +286,36 @@ impl PipelineRunner {
self.setup()?;
let config = if let Some(config) = &self.config {
config
config.clone()
} else {
bail!("Pipeline not configured, cannot run")
};
// run transcoder pipeline
let (mut pkt, stream_info) = self.demuxer.get_packet()?;
let (mut pkt, _) = self.demuxer.get_packet()?;
let src_video_stream = config
.ingress_info
.streams
.iter()
.find(|s| s.index == config.video_src)
.unwrap();
let src_audio_stream = config
.ingress_info
.streams
.iter()
.find(|s| Some(s.index) == config.audio_src);
// Handle state transitions based on packet availability
match (&self.state, pkt.is_null()) {
(RunnerState::Normal, true) => {
// First time entering idle mode
info!("Stream input disconnected, entering idle mode with placeholder content");
info!("Stream input disconnected, entering idle mode");
self.state = RunnerState::Idle {
start_time: Instant::now(),
variant_index: 0,
last_frame_time: None,
gen: FrameGenerator::from_stream(src_video_stream, src_audio_stream)?,
};
}
(RunnerState::Idle { start_time, .. }, true) => {
@ -332,27 +334,23 @@ impl PipelineRunner {
// Normal operation continues
}
}
// Process based on current state
match &self.state {
RunnerState::Idle { .. } => {
// Process a single idle frame (rotating through variants)
self.process_single_idle_frame(config)?;
// Free the null packet if needed
if !pkt.is_null() {
av_packet_free(&mut pkt);
}
return Ok(true); // Continue processing
let result = match &mut self.state {
RunnerState::Idle { gen, .. } => {
let frame = gen.next()?;
let stream = if (*frame).sample_rate > 0 {
self.demuxer.get_stream(
src_audio_stream
.context("frame generator created an audio frame with no src stream")?
.index,
)?
} else {
self.demuxer.get_stream(src_video_stream.index)?
};
self.process_frame(&config, stream, frame)?
}
RunnerState::Normal => {
// Normal packet processing
if pkt.is_null() {
// This shouldn't happen in Normal state but handle gracefully
return Ok(true);
}
// TODO: For copy streams, skip decoder
let frames = match self.decoder.decode_pkt(pkt) {
Ok(f) => f,
@ -364,133 +362,19 @@ impl PipelineRunner {
let mut egress_results = vec![];
for (frame, stream) in frames {
// Copy frame from GPU if using hwaccel decoding
let mut frame = get_frame_from_hw(frame)?;
(*frame).time_base = (*stream).time_base;
let p = (*stream).codecpar;
if (*p).codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO {
// Conditionally generate thumbnails based on interval (0 = disabled)
if self.thumb_interval > 0 && (self.frame_ctr % self.thumb_interval) == 0 {
let thumb_start = Instant::now();
let dst_pic = PathBuf::from(&self.out_dir)
.join(config.id.to_string())
.join("thumb.webp");
{
let mut sw = Scaler::new();
let mut scaled_frame = sw.process_frame(
frame,
(*frame).width as _,
(*frame).height as _,
AV_PIX_FMT_YUV420P,
)?;
let mut encoder = Encoder::new(AV_CODEC_ID_WEBP)?
.with_height((*scaled_frame).height)
.with_width((*scaled_frame).width)
.with_pix_fmt(transmute((*scaled_frame).format))
.open(None)?;
encoder.save_picture(scaled_frame, dst_pic.to_str().unwrap())?;
av_frame_free(&mut scaled_frame);
}
let thumb_duration = thumb_start.elapsed();
info!(
"Saved thumb ({:.2}ms) to: {}",
thumb_duration.as_millis() as f32 / 1000.0,
dst_pic.display(),
);
let results = self.process_frame(&config, stream, frame)?;
egress_results.extend(results);
}
self.frame_ctr += 1;
av_packet_free(&mut pkt);
egress_results
}
// Get the variants which want this pkt
let pkt_vars = config
.variants
.iter()
.filter(|v| v.src_index() == (*stream).index as usize);
for var in pkt_vars {
let enc = if let Some(enc) = self.encoders.get_mut(&var.id()) {
enc
} else {
//warn!("Frame had nowhere to go in {} :/", var.id());
continue;
};
// scaling / resampling
let mut new_frame = false;
let mut frame = match var {
VariantStream::Video(v) => {
if let Some(s) = self.scalers.get_mut(&v.id()) {
new_frame = true;
s.process_frame(frame, v.width, v.height, transmute(v.pixel_format))?
} else {
frame
}
}
VariantStream::Audio(a) => {
if let Some((r, f)) = self.resampler.get_mut(&a.id()) {
let frame_size = (*enc.codec_context()).frame_size;
new_frame = true;
let mut resampled_frame = r.process_frame(frame)?;
if let Some(ret) =
f.buffer_frame(resampled_frame, frame_size as usize)?
{
// Set correct timebase for audio (1/sample_rate)
(*ret).time_base.num = 1;
(*ret).time_base.den = a.sample_rate as i32;
av_frame_free(&mut resampled_frame);
ret
} else {
av_frame_free(&mut resampled_frame);
continue;
}
} else {
frame
}
}
_ => frame,
};
// before encoding frame, rescale timestamps
if !frame.is_null() {
let enc_ctx = enc.codec_context();
(*frame).pict_type = AV_PICTURE_TYPE_NONE;
(*frame).pts =
av_rescale_q((*frame).pts, (*frame).time_base, (*enc_ctx).time_base);
(*frame).pkt_dts =
av_rescale_q((*frame).pkt_dts, (*frame).time_base, (*enc_ctx).time_base);
(*frame).duration =
av_rescale_q((*frame).duration, (*frame).time_base, (*enc_ctx).time_base);
(*frame).time_base = (*enc_ctx).time_base;
}
let packets = enc.encode_frame(frame)?;
// pass new packets to egress
for mut pkt in packets {
for eg in self.egress.iter_mut() {
let er = eg.process_pkt(pkt, &var.id())?;
egress_results.push(er);
}
av_packet_free(&mut pkt);
}
if new_frame {
av_frame_free(&mut frame);
}
}
av_frame_free(&mut frame);
}
av_packet_free(&mut pkt);
};
// egress results - process async operations without blocking if possible
if !egress_results.is_empty() {
if !result.is_empty() {
self.handle.block_on(async {
for er in egress_results {
for er in result {
if let EgressResult::Segments { created, deleted } = er {
if let Err(e) = self
.overseer
@ -510,8 +394,6 @@ impl PipelineRunner {
info!("Average fps: {:.2}", n_frames as f32 / elapsed);
self.fps_counter_start = Instant::now();
self.fps_last_frame_ctr = self.frame_ctr;
}
} // Close the RunnerState::Normal match arm
}
Ok(true)
}
@ -542,18 +424,16 @@ impl PipelineRunner {
height: s.height,
fps: s.fps,
sample_rate: s.sample_rate,
channels: s.channels,
language: s.language.clone(),
})
.collect(),
};
let mut cfg = self
let cfg = self
.handle
.block_on(async { self.overseer.start_stream(&self.connection, &i_info).await })?;
// Store ingress info in config for placeholder generation
cfg.ingress_info = Some(i_info.clone());
self.config = Some(cfg);
self.info = Some(i_info);
@ -649,7 +529,10 @@ impl Drop for PipelineRunner {
self.copy_stream.clear();
self.egress.clear();
info!("PipelineRunner cleaned up resources for stream: {}", self.connection.key);
info!(
"PipelineRunner cleaned up resources for stream: {}",
self.connection.key
);
}
}
}