Refactor pipeline
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
kieran 2024-09-03 14:09:30 +01:00
parent 65d8964632
commit 2c7d2dc9d1
Signed by: Kieran
GPG Key ID: DE71CEB3925BE941
27 changed files with 1465 additions and 1349 deletions

View File

@ -1,11 +1,14 @@
use std::collections::HashMap;
use std::ffi::CStr;
use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{av_frame_alloc, AVCodec, avcodec_alloc_context3, avcodec_find_decoder, avcodec_free_context, avcodec_open2, avcodec_parameters_to_context, avcodec_receive_frame, avcodec_send_packet, AVCodecContext, AVERROR, AVERROR_EOF, AVPacket};
use ffmpeg_sys_next::AVPictureType::AV_PICTURE_TYPE_NONE;
use tokio::sync::broadcast;
use tokio::sync::mpsc::UnboundedReceiver;
use ffmpeg_sys_next::{
av_frame_alloc, avcodec_alloc_context3, avcodec_find_decoder, avcodec_free_context,
avcodec_get_name, avcodec_open2, avcodec_parameters_to_context, avcodec_receive_frame,
avcodec_send_packet, AVCodec, AVCodecContext, AVPacket, AVERROR, AVERROR_EOF,
};
use crate::pipeline::{AVFrameSource, AVPacketSource, PipelinePayload};
@ -25,8 +28,6 @@ impl Drop for CodecContext {
}
pub struct Decoder {
chan_in: UnboundedReceiver<PipelinePayload>,
chan_out: broadcast::Sender<PipelinePayload>,
codecs: HashMap<i32, CodecContext>,
pts: i64,
}
@ -36,13 +37,8 @@ unsafe impl Send for Decoder {}
unsafe impl Sync for Decoder {}
impl Decoder {
pub fn new(
chan_in: UnboundedReceiver<PipelinePayload>,
chan_out: broadcast::Sender<PipelinePayload>,
) -> Self {
pub fn new() -> Self {
Self {
chan_in,
chan_out,
codecs: HashMap::new(),
pts: 0,
}
@ -52,7 +48,7 @@ impl Decoder {
&mut self,
pkt: *mut AVPacket,
src: &AVPacketSource,
) -> Result<usize, Error> {
) -> Result<Vec<PipelinePayload>, Error> {
let stream_index = (*pkt).stream_index;
let stream = match src {
AVPacketSource::Demuxer(s) => *s,
@ -75,7 +71,11 @@ impl Decoder {
if let std::collections::hash_map::Entry::Vacant(e) = self.codecs.entry(stream_index) {
let codec = avcodec_find_decoder((*codec_par).codec_id);
if codec.is_null() {
return Err(Error::msg("Failed to find codec"));
return Err(Error::msg(format!(
"Failed to find codec: {}",
CStr::from_ptr(avcodec_get_name((*codec_par).codec_id))
.to_str()?
)));
}
let context = avcodec_alloc_context3(ptr::null());
if context.is_null() {
@ -96,7 +96,7 @@ impl Decoder {
return Err(Error::msg(format!("Failed to decode packet {}", ret)));
}
let mut frames = 0;
let mut pkgs = Vec::new();
while ret >= 0 {
let frame = av_frame_alloc();
ret = avcodec_receive_frame(ctx.context, frame);
@ -108,28 +108,22 @@ impl Decoder {
}
(*frame).pict_type = AV_PICTURE_TYPE_NONE; // encoder prints warnings
self.chan_out.send(PipelinePayload::AvFrame(
pkgs.push(PipelinePayload::AvFrame(
frame,
AVFrameSource::Decoder(stream),
))?;
frames += 1;
));
}
return Ok(frames);
Ok(pkgs)
} else {
Ok(vec![])
}
Ok(0)
}
pub fn process(&mut self) -> Result<usize, Error> {
if let Ok(pkg) = self.chan_in.try_recv() {
return if let PipelinePayload::AvPacket(pkt, ref src) = pkg {
unsafe {
let frames = self.decode_pkt(pkt, src)?;
Ok(frames)
}
} else {
Err(Error::msg("Payload not supported"))
};
pub fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error> {
if let PipelinePayload::AvPacket(pkt, ref src) = pkg {
unsafe { self.decode_pkt(pkt, src) }
} else {
Err(Error::msg("Payload not supported"))
}
Ok(0)
}
}

View File

@ -1,13 +1,17 @@
use crate::fraction::Fraction;
use ffmpeg_sys_next::AVFormatContext;
use std::fmt::{Display, Formatter};
use crate::fraction::Fraction;
#[derive(Clone, Debug, PartialEq)]
pub struct DemuxStreamInfo {
pub struct DemuxerInfo {
pub channels: Vec<StreamInfoChannel>,
pub ctx: *const AVFormatContext,
}
impl Display for DemuxStreamInfo {
unsafe impl Send for DemuxerInfo {}
unsafe impl Sync for DemuxerInfo {}
impl Display for DemuxerInfo {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Demuxer Info:")?;
for c in &self.channels {
@ -43,6 +47,7 @@ pub struct StreamInfoChannel {
pub width: usize,
pub height: usize,
pub fps: f32,
pub format: usize,
}
impl TryInto<Fraction> for StreamInfoChannel {

View File

@ -3,32 +3,24 @@ use std::time::Duration;
use anyhow::Error;
use bytes::{BufMut, Bytes};
use ffmpeg_sys_next::*;
use ffmpeg_sys_next::AVMediaType::{AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO};
use tokio::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use ffmpeg_sys_next::*;
use tokio::sync::mpsc::error::TryRecvError;
use tokio::sync::mpsc::UnboundedReceiver;
use tokio::time::Instant;
use crate::demux::info::{DemuxStreamInfo, StreamChannelType, StreamInfoChannel};
use crate::demux::info::{DemuxerInfo, StreamChannelType, StreamInfoChannel};
use crate::pipeline::{AVPacketSource, PipelinePayload};
use crate::return_ffmpeg_error;
use crate::utils::get_ffmpeg_error_msg;
pub mod info;
///
/// Demuxer supports demuxing and decoding
///
/// | Type | Value |
/// | ------ | ----------------------------- |
/// | Video | H264, H265, VP8, VP9, AV1 |
/// | Audio | AAC, Opus |
/// | Format | MPEG-TS |
///
pub(crate) struct Demuxer {
ctx: *mut AVFormatContext,
chan_out: UnboundedSender<PipelinePayload>,
started: Instant,
state: DemuxerBuffer,
info: Option<DemuxerInfo>,
}
unsafe impl Send for Demuxer {}
@ -75,27 +67,24 @@ unsafe extern "C" fn read_data(
}
impl Demuxer {
pub fn new(
chan_in: UnboundedReceiver<Bytes>,
chan_out: UnboundedSender<PipelinePayload>,
) -> Self {
pub fn new(chan_in: UnboundedReceiver<Bytes>) -> Self {
unsafe {
let ps = avformat_alloc_context();
(*ps).flags |= AVFMT_FLAG_CUSTOM_IO;
Self {
ctx: ps,
chan_out,
state: DemuxerBuffer {
chan_in,
buffer: bytes::BytesMut::new(),
},
info: None,
started: Instant::now(),
}
}
}
unsafe fn probe_input(&mut self) -> Result<DemuxStreamInfo, Error> {
unsafe fn probe_input(&mut self) -> Result<DemuxerInfo, Error> {
const BUFFER_SIZE: usize = 4096;
let buf_ptr = ptr::from_mut(&mut self.state) as *mut libc::c_void;
let pb = avio_alloc_context(
@ -115,10 +104,8 @@ impl Demuxer {
ptr::null_mut(),
ptr::null_mut(),
);
if ret < 0 {
let msg = get_ffmpeg_error_msg(ret);
return Err(Error::msg(msg));
}
return_ffmpeg_error!(ret);
if avformat_find_stream_info(self.ctx, ptr::null_mut()) < 0 {
return Err(Error::msg("Could not find stream info"));
}
@ -135,6 +122,7 @@ impl Demuxer {
width: (*(*video_stream).codecpar).width as usize,
height: (*(*video_stream).codecpar).height as usize,
fps: av_q2d((*video_stream).avg_frame_rate) as f32,
format: (*(*video_stream).codecpar).format as usize
});
}
@ -153,21 +141,22 @@ impl Demuxer {
width: (*(*audio_stream).codecpar).width as usize,
height: (*(*audio_stream).codecpar).height as usize,
fps: 0.0,
format: (*(*audio_stream).codecpar).format as usize
});
}
let info = DemuxStreamInfo {
let info = DemuxerInfo {
channels: channel_infos,
ctx: self.ctx,
};
Ok(info)
}
unsafe fn get_packet(&mut self) -> Result<(), Error> {
pub unsafe fn get_packet(&mut self) -> Result<PipelinePayload, Error> {
let pkt: *mut AVPacket = av_packet_alloc();
let ret = av_read_frame(self.ctx, pkt);
if ret == AVERROR_EOF {
self.chan_out.send(PipelinePayload::Flush)?;
return Ok(());
return Ok(PipelinePayload::Flush);
}
if ret < 0 {
let msg = get_ffmpeg_error_msg(ret);
@ -175,21 +164,22 @@ impl Demuxer {
}
let stream = *(*self.ctx).streams.add((*pkt).stream_index as usize);
let pkg = PipelinePayload::AvPacket(pkt, AVPacketSource::Demuxer(stream));
self.chan_out.send(pkg)?;
Ok(())
Ok(pkg)
}
pub fn process(&mut self) -> Result<Option<DemuxStreamInfo>, Error> {
unsafe {
let score = (*self.ctx).probe_score;
if score < 30 {
/// Try probe input stream
pub fn try_probe(&mut self) -> Result<Option<DemuxerInfo>, Error> {
match &self.info {
None => {
if (Instant::now() - self.started) > Duration::from_millis(500) {
return Ok(Some(self.probe_input()?));
let inf = unsafe { self.probe_input()? };
self.info = Some(inf.clone());
Ok(Some(inf))
} else {
Ok(None)
}
return Ok(None);
}
self.get_packet()?;
Ok(None)
Some(i) => Ok(Some(i.clone())),
}
}
}

View File

@ -1,27 +1,27 @@
use std::collections::{HashMap, HashSet, VecDeque};
use std::collections::{HashSet, VecDeque};
use std::fmt::Display;
use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{
av_dump_format, av_interleaved_write_frame, av_opt_set, av_packet_clone, av_packet_copy_props,
avcodec_parameters_from_context, avformat_alloc_output_context2, avformat_free_context,
avformat_write_header, AVFormatContext, AVPacket,
av_dump_format, av_interleaved_write_frame, av_opt_set, avcodec_parameters_copy,
avcodec_parameters_from_context, avformat_alloc_output_context2, avformat_free_context, avformat_write_header, AVFormatContext, AVPacket, AVStream,
};
use itertools::Itertools;
use log::info;
use tokio::sync::mpsc::UnboundedReceiver;
use uuid::Uuid;
use crate::egress::{EgressConfig, map_variants_to_streams};
use crate::egress::{map_variants_to_streams, EgressConfig};
use crate::pipeline::{AVPacketSource, PipelinePayload, PipelineProcessor};
use crate::return_ffmpeg_error;
use crate::utils::get_ffmpeg_error_msg;
use crate::variant::{VariantStream, VariantStreamType};
use crate::variant::{find_stream, StreamMapping, VariantStream};
pub struct HlsEgress {
id: Uuid,
config: EgressConfig,
variants: Vec<VariantStream>,
ctx: *mut AVFormatContext,
chan_in: UnboundedReceiver<PipelinePayload>,
stream_init: HashSet<Uuid>,
init: bool,
packet_buffer: VecDeque<PipelinePayload>,
@ -40,212 +40,279 @@ impl Drop for HlsEgress {
}
}
enum HlsMapEntry {
Video(usize),
Audio(usize),
Subtitle(usize),
}
impl Display for HlsMapEntry {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HlsMapEntry::Video(i) => write!(f, "v:{}", i),
HlsMapEntry::Audio(i) => write!(f, "a:{}", i),
HlsMapEntry::Subtitle(i) => write!(f, "s:{}", i),
}
}
}
struct HlsStream {
name: String,
entries: Vec<HlsMapEntry>,
}
impl Display for HlsStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{},name:{}", self.entries.iter().join(","), self.name)
}
}
impl HlsEgress {
pub fn new(
chan_in: UnboundedReceiver<PipelinePayload>,
id: Uuid,
config: EgressConfig,
) -> Self {
pub fn new(id: Uuid, config: EgressConfig, variants: Vec<VariantStream>) -> Self {
let filtered_vars: Vec<VariantStream> = config
.variants
.iter()
.filter_map(|x| variants.iter().find(|y| y.id() == *x))
.cloned()
.collect();
Self {
id,
config,
variants: filtered_vars,
ctx: ptr::null_mut(),
chan_in,
init: false,
stream_init: HashSet::new(),
packet_buffer: VecDeque::new(),
}
}
unsafe fn setup_muxer(&mut self) -> Result<(), Error> {
let mut ctx = ptr::null_mut();
pub(crate) fn setup_muxer(&mut self) -> Result<(), Error> {
unsafe {
let mut ctx = ptr::null_mut();
let base = format!("{}/{}", self.config.out_dir, self.id);
let base = format!("{}/{}", self.config.out_dir, self.id);
let ret = avformat_alloc_output_context2(
&mut ctx,
ptr::null(),
"hls\0".as_ptr() as *const libc::c_char,
format!("{}/%v/live.m3u8\0", base).as_ptr() as *const libc::c_char,
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
av_opt_set(
(*ctx).priv_data,
"hls_segment_filename\0".as_ptr() as *const libc::c_char,
format!("{}/%v/%05d.ts\0", base).as_ptr() as *const libc::c_char,
0,
);
av_opt_set(
(*ctx).priv_data,
"master_pl_name\0".as_ptr() as *const libc::c_char,
"live.m3u8\0".as_ptr() as *const libc::c_char,
0,
);
av_opt_set(
(*ctx).priv_data,
"master_pl_publish_rate\0".as_ptr() as *const libc::c_char,
"10\0".as_ptr() as *const libc::c_char,
0,
);
if let Some(first_video_track) = self.config.variants.iter().find_map(|v| {
if let VariantStream::Video(vv) = v {
Some(vv)
} else {
None
}
}) {
let ret = avformat_alloc_output_context2(
&mut ctx,
ptr::null(),
"hls\0".as_ptr() as *const libc::c_char,
format!("{}/%v/live.m3u8\0", base).as_ptr() as *const libc::c_char,
);
return_ffmpeg_error!(ret);
av_opt_set(
(*ctx).priv_data,
"hls_time\0".as_ptr() as *const libc::c_char,
format!("{}\0", first_video_track.keyframe_interval).as_ptr()
as *const libc::c_char,
"hls_segment_filename\0".as_ptr() as *const libc::c_char,
format!("{}/%v/%05d.ts\0", base).as_ptr() as *const libc::c_char,
0,
);
}
av_opt_set(
(*ctx).priv_data,
"hls_flags\0".as_ptr() as *const libc::c_char,
"delete_segments\0".as_ptr() as *const libc::c_char,
0,
);
av_opt_set(
(*ctx).priv_data,
"master_pl_name\0".as_ptr() as *const libc::c_char,
"live.m3u8\0".as_ptr() as *const libc::c_char,
0,
);
av_opt_set(
(*ctx).priv_data,
"master_pl_publish_rate\0".as_ptr() as *const libc::c_char,
"10\0".as_ptr() as *const libc::c_char,
0,
);
if let Some(first_video_track) = self.variants.iter().find_map(|v| {
if let VariantStream::Video(vv) = v {
Some(vv)
} else {
None
}
}) {
av_opt_set(
(*ctx).priv_data,
"hls_time\0".as_ptr() as *const libc::c_char,
format!(
"{}\0",
first_video_track.keyframe_interval / first_video_track.fps
)
.as_ptr() as *const libc::c_char,
0,
);
}
av_opt_set(
(*ctx).priv_data,
"hls_flags\0".as_ptr() as *const libc::c_char,
"delete_segments\0".as_ptr() as *const libc::c_char,
0,
);
map_variants_to_streams(ctx, &self.variants)?;
self.ctx = ctx;
Ok(())
}
}
unsafe fn setup_hls_mapping(&mut self) -> Result<(), Error> {
if self.ctx.is_null() {
return Err(Error::msg("Context not setup"));
}
// configure mapping
let mut stream_map: HashMap<usize, Vec<String>> = HashMap::new();
for var in &self.config.variants {
let cfg = match var {
VariantStream::Video(vx) => format!("v:{}", vx.dst_index),
VariantStream::Audio(ax) => format!("a:{}", ax.dst_index),
let mut stream_map = Vec::new();
for (g, vars) in &self
.variants
.iter()
.sorted_by(|a, b| a.group_id().cmp(&b.group_id()))
.group_by(|x| x.group_id())
{
let mut group = HlsStream {
name: format!("stream_{}", g),
entries: Vec::new(),
};
if let Some(out_stream) = stream_map.get_mut(&var.dst_index()) {
out_stream.push(cfg);
} else {
stream_map.insert(var.dst_index(), vec![cfg]);
for var in vars {
let n = Self::get_as_nth_stream_type(self.ctx, var);
match var {
VariantStream::Video(_) => group.entries.push(HlsMapEntry::Video(n)),
VariantStream::Audio(_) => group.entries.push(HlsMapEntry::Audio(n)),
VariantStream::CopyVideo(_) => group.entries.push(HlsMapEntry::Video(n)),
VariantStream::CopyAudio(_) => group.entries.push(HlsMapEntry::Audio(n)),
};
}
stream_map.push(group);
}
let stream_map = stream_map.values().map(|v| v.join(",")).join(" ");
let stream_map = stream_map.iter().join(" ");
info!("map_str={}", stream_map);
av_opt_set(
(*ctx).priv_data,
(*self.ctx).priv_data,
"var_stream_map\0".as_ptr() as *const libc::c_char,
format!("{}\0", stream_map).as_ptr() as *const libc::c_char,
0,
);
map_variants_to_streams(ctx, &mut self.config.variants)?;
self.ctx = ctx;
av_dump_format(self.ctx, 0, ptr::null(), 1);
Ok(())
}
unsafe fn process_pkt_internal(
unsafe fn process_av_packet_internal(
&mut self,
pkt: *mut AVPacket,
src: &AVPacketSource,
) -> Result<(), Error> {
let variant = match src {
AVPacketSource::Encoder(v) => self
.config
AVPacketSource::Encoder(v) => find_stream(&self.variants, v)?,
AVPacketSource::Demuxer(v) => self
.variants
.iter()
.find(|x| x.id() == *v)
.ok_or(Error::msg("Variant does not exist"))?,
AVPacketSource::Muxer(v) => self
.config
.variants
.iter()
.find(|x| x.id() == *v)
.ok_or(Error::msg("Variant does not exist"))?,
_ => return Err(Error::msg(format!("Cannot mux packet from {:?}", src))),
.find(|x| x.src_index() == (*(*v)).index as usize)
.ok_or(Error::msg("Demuxer packet didn't match any variant"))?,
};
(*pkt).stream_index = variant.dst_index() as libc::c_int;
//dump_pkt_info(pkt);
let ret = av_interleaved_write_frame(self.ctx, pkt);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
return_ffmpeg_error!(ret);
Ok(())
}
fn process_payload_internal(&mut self, pkg: PipelinePayload) -> Result<(), Error> {
if let PipelinePayload::AvPacket(p, ref s) = pkg {
unsafe {
self.process_av_packet_internal(p, s)?;
}
}
Ok(())
}
unsafe fn process_pkt(
&mut self,
pkt: *mut AVPacket,
src: &AVPacketSource,
) -> Result<(), Error> {
let variant = match &src {
AVPacketSource::Encoder(v) => v,
AVPacketSource::Muxer(v) => v,
_ => return Err(Error::msg(format!("Cannot mux packet from {:?}", src))),
};
if !self.init {
let pkt_clone = av_packet_clone(pkt);
av_packet_copy_props(pkt_clone, pkt);
self.packet_buffer.push_back(PipelinePayload::AvPacket(
pkt_clone,
AVPacketSource::Muxer(*variant),
));
}
unsafe fn process_payload(&mut self, pkg: PipelinePayload) -> Result<(), Error> {
if !self.init && self.stream_init.len() == self.config.variants.len() {
let ret = avformat_write_header(self.ctx, ptr::null_mut());
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
self.setup_hls_mapping()?;
let ret = avformat_write_header(self.ctx, ptr::null_mut());
return_ffmpeg_error!(ret);
av_dump_format(self.ctx, 0, ptr::null(), 1);
self.init = true;
// push in pkts from buffer
// dequeue buffer
while let Some(pkt) = self.packet_buffer.pop_front() {
match pkt {
PipelinePayload::AvPacket(pkt, ref src) => {
self.process_pkt_internal(pkt, src)?;
}
_ => return Err(Error::msg("")),
}
self.process_payload_internal(pkt)?;
}
return Ok(());
} else if !self.init {
self.packet_buffer.push_back(pkg);
return Ok(());
}
self.process_pkt_internal(pkt, src)
self.process_payload_internal(pkg)
}
unsafe fn get_dst_stream(ctx: *const AVFormatContext, idx: usize) -> *mut AVStream {
for x in 0..(*ctx).nb_streams {
let stream = *(*ctx).streams.add(x as usize);
if (*stream).index as usize == idx {
return stream;
}
}
panic!("Stream index not found in output")
}
unsafe fn get_as_nth_stream_type(ctx: *const AVFormatContext, var: &VariantStream) -> usize {
let stream = Self::get_dst_stream(ctx, var.dst_index());
let mut ctr = 0;
for x in 0..(*ctx).nb_streams {
let stream_x = *(*ctx).streams.add(x as usize);
if (*(*stream).codecpar).codec_type == (*(*stream_x).codecpar).codec_type {
if (*stream_x).index == (*stream).index {
break;
}
ctr += 1;
}
}
ctr
}
}
impl PipelineProcessor for HlsEgress {
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv() {
match pkg {
PipelinePayload::AvPacket(pkt, ref src) => unsafe {
self.process_pkt(pkt, src)?;
},
PipelinePayload::EncoderInfo(ref var, ctx) => unsafe {
if self.ctx.is_null() {
self.setup_muxer()?;
fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error> {
match pkg {
PipelinePayload::AvPacket(_, _) => unsafe {
self.process_payload(pkg)?;
},
PipelinePayload::SourceInfo(ref d) => unsafe {
for var in &self.variants {
match var {
VariantStream::CopyVideo(cv) => {
let src = *(*d.ctx).streams.add(cv.src_index);
let dst = Self::get_dst_stream(self.ctx, cv.dst_index);
let ret = avcodec_parameters_copy((*dst).codecpar, (*src).codecpar);
return_ffmpeg_error!(ret);
self.stream_init.insert(var.id());
}
VariantStream::CopyAudio(ca) => {
let src = *(*d.ctx).streams.add(ca.src_index);
let dst = Self::get_dst_stream(self.ctx, ca.dst_index);
let ret = avcodec_parameters_copy((*dst).codecpar, (*src).codecpar);
return_ffmpeg_error!(ret);
self.stream_init.insert(var.id());
}
_ => {}
}
}
},
PipelinePayload::EncoderInfo(ref var, ctx) => unsafe {
if let Some(my_var) = self.variants.iter().find(|x| x.id() == *var) {
if !self.stream_init.contains(var) {
let variant = self
.config
.variants
.iter()
.find(|x| x.id() == *var)
.ok_or(Error::msg("Variant does not exist"))?;
let out_stream = *(*self.ctx).streams.add(variant.dst_index());
let out_stream = Self::get_dst_stream(self.ctx, my_var.dst_index());
avcodec_parameters_from_context((*out_stream).codecpar, ctx);
self.stream_init.insert(*var);
}
},
_ => return Err(Error::msg(format!("Payload not supported: {:?}", pkg))),
}
}
},
_ => return Err(Error::msg(format!("Payload not supported: {:?}", pkg))),
}
Ok(())
// Muxer never returns anything
Ok(vec![])
}
}

View File

@ -1,11 +1,11 @@
use std::fmt::{Display, Formatter};
use std::ptr;
use crate::variant::{StreamMapping, VariantStream};
use anyhow::Error;
use ffmpeg_sys_next::{avformat_new_stream, AVFormatContext};
use serde::{Deserialize, Serialize};
use crate::variant::{VariantStream, VariantStreamType};
use uuid::Uuid;
pub mod hls;
pub mod http;
@ -15,7 +15,8 @@ pub mod recorder;
pub struct EgressConfig {
pub name: String,
pub out_dir: String,
pub variants: Vec<VariantStream>,
/// Which variants will be used in this muxer
pub variants: Vec<Uuid>,
}
impl Display for EgressConfig {
@ -33,31 +34,18 @@ impl Display for EgressConfig {
pub unsafe fn map_variants_to_streams(
ctx: *mut AVFormatContext,
variants: &mut Vec<VariantStream>,
variants: &Vec<VariantStream>,
) -> Result<(), Error> {
for var in variants {
match var {
VariantStream::Video(vs) => {
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// overwrite dst_index to match output stream
vs.dst_index = (*stream).index as usize;
vs.to_stream(stream);
}
VariantStream::Audio(va) => {
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// overwrite dst_index to match output stream
va.dst_index = (*stream).index as usize;
va.to_stream(stream);
}
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// replace stream index value with variant dst_index
(*stream).index = var.dst_index() as libc::c_int;
var.to_stream(stream);
}
Ok(())
}
}

View File

@ -3,27 +3,27 @@ use std::{fs, ptr};
use anyhow::Error;
use ffmpeg_sys_next::{
av_dump_format, av_interleaved_write_frame, av_opt_set, avformat_alloc_output_context2, avformat_free_context,
avio_open2, AVFormatContext, AVPacket, AVIO_FLAG_WRITE,
av_dump_format, av_interleaved_write_frame, av_opt_set, avformat_alloc_output_context2,
avformat_free_context, avio_open2, AVFormatContext, AVPacket, AVIO_FLAG_WRITE,
};
use ffmpeg_sys_next::{
avcodec_parameters_from_context, avformat_write_header, AVFMT_GLOBALHEADER,
AV_CODEC_FLAG_GLOBAL_HEADER,
};
use log::info;
use tokio::sync::mpsc::UnboundedReceiver;
use uuid::Uuid;
use crate::egress::{map_variants_to_streams, EgressConfig};
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::return_ffmpeg_error;
use crate::utils::get_ffmpeg_error_msg;
use crate::variant::VariantStreamType;
use crate::variant::{find_stream, StreamMapping, VariantStream};
pub struct RecorderEgress {
id: Uuid,
config: EgressConfig,
variants: Vec<VariantStream>,
ctx: *mut AVFormatContext,
chan_in: UnboundedReceiver<PipelinePayload>,
stream_init: HashSet<Uuid>,
init: bool,
packet_buffer: VecDeque<PipelinePayload>,
@ -43,16 +43,18 @@ impl Drop for RecorderEgress {
}
impl RecorderEgress {
pub fn new(
chan_in: UnboundedReceiver<PipelinePayload>,
id: Uuid,
config: EgressConfig,
) -> Self {
pub fn new(id: Uuid, config: EgressConfig, variants: Vec<VariantStream>) -> Self {
let filtered_vars: Vec<VariantStream> = config
.variants
.iter()
.filter_map(|x| variants.iter().find(|y| y.id() == *x))
.cloned()
.collect();
Self {
id,
config,
variants: filtered_vars,
ctx: ptr::null_mut(),
chan_in,
stream_init: HashSet::new(),
init: false,
packet_buffer: VecDeque::new(),
@ -72,10 +74,8 @@ impl RecorderEgress {
ptr::null_mut(),
out_file.as_ptr() as *const libc::c_char,
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
map_variants_to_streams(ctx, &mut self.config.variants)?;
return_ffmpeg_error!(ret);
map_variants_to_streams(ctx, &self.variants)?;
if (*(*ctx).oformat).flags & AVFMT_GLOBALHEADER != 0 {
(*ctx).flags |= AV_CODEC_FLAG_GLOBAL_HEADER as libc::c_int;
@ -99,15 +99,12 @@ impl RecorderEgress {
ptr::null_mut(),
ptr::null_mut(),
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
return_ffmpeg_error!(ret);
av_dump_format(self.ctx, 0, ptr::null(), 1);
let ret = avformat_write_header(self.ctx, ptr::null_mut());
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
return_ffmpeg_error!(ret);
self.init = true;
Ok(true)
} else {
@ -118,54 +115,46 @@ impl RecorderEgress {
unsafe fn process_pkt(&mut self, pkt: *mut AVPacket) -> Result<(), Error> {
//dump_pkt_info(pkt);
let ret = av_interleaved_write_frame(self.ctx, pkt);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
return_ffmpeg_error!(ret);
Ok(())
}
}
impl PipelineProcessor for RecorderEgress {
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv() {
match pkg {
PipelinePayload::AvPacket(pkt, ref src) => unsafe {
if self.open_muxer()? {
while let Some(pkt) = self.packet_buffer.pop_front() {
match pkt {
PipelinePayload::AvPacket(pkt, ref src) => {
self.process_pkt(pkt)?;
}
_ => return Err(Error::msg("")),
fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error> {
match pkg {
PipelinePayload::AvPacket(pkt, ref src) => unsafe {
if self.open_muxer()? {
while let Some(pkt) = self.packet_buffer.pop_front() {
match pkt {
PipelinePayload::AvPacket(pkt, ref src) => {
self.process_pkt(pkt)?;
}
_ => return Err(Error::msg("")),
}
self.process_pkt(pkt)?;
} else {
self.packet_buffer.push_back(pkg);
}
},
PipelinePayload::EncoderInfo(ref var, ctx) => unsafe {
if self.ctx.is_null() {
self.setup_muxer()?;
}
if !self.stream_init.contains(var) {
let my_var = self
.config
.variants
.iter()
.find(|x| x.id() == *var)
.ok_or(Error::msg("Variant does not exist"))?;
let out_stream = *(*self.ctx).streams.add(my_var.dst_index());
avcodec_parameters_from_context((*out_stream).codecpar, ctx);
(*(*out_stream).codecpar).codec_tag = 0;
self.process_pkt(pkt)?;
} else {
self.packet_buffer.push_back(pkg);
}
},
PipelinePayload::EncoderInfo(ref var, ctx) => unsafe {
if self.ctx.is_null() {
self.setup_muxer()?;
}
if !self.stream_init.contains(var) {
let my_var = find_stream(&self.variants, var)?;
let out_stream = *(*self.ctx).streams.add(my_var.dst_index());
avcodec_parameters_from_context((*out_stream).codecpar, ctx);
(*(*out_stream).codecpar).codec_tag = 0;
self.stream_init.insert(*var);
info!("Setup encoder info: {}", my_var);
}
},
_ => return Err(Error::msg("Payload not supported")),
}
self.stream_init.insert(*var);
info!("Setup encoder info: {}", my_var);
}
},
_ => return Err(Error::msg("Payload not supported")),
}
Ok(())
// Muxer never returns anything
Ok(vec![])
}
}

View File

@ -3,32 +3,38 @@ use std::mem::transmute;
use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{av_audio_fifo_alloc, av_audio_fifo_free, av_audio_fifo_read, av_audio_fifo_size, av_audio_fifo_write, av_channel_layout_copy, av_frame_alloc, av_frame_free, av_get_sample_fmt_name, av_packet_alloc, av_packet_free, av_packet_rescale_ts, av_samples_alloc_array_and_samples, AVAudioFifo, AVCodec, avcodec_alloc_context3, avcodec_free_context, avcodec_open2, avcodec_receive_packet, avcodec_send_frame, AVCodecContext, AVERROR, AVFrame, AVRational, swr_alloc_set_opts2, swr_convert_frame, swr_free, swr_init, SwrContext};
use ffmpeg_sys_next::{
av_audio_fifo_alloc, av_audio_fifo_free, av_audio_fifo_read, av_audio_fifo_size,
av_audio_fifo_write, av_channel_layout_copy, av_frame_alloc, av_frame_free,
av_get_sample_fmt_name, av_packet_alloc, av_packet_free, av_packet_rescale_ts,
av_samples_alloc_array_and_samples, AVAudioFifo, AVCodec,
avcodec_alloc_context3, avcodec_free_context, avcodec_open2, avcodec_receive_packet,
avcodec_send_frame, AVCodecContext, AVERROR, AVFrame, AVRational, swr_alloc_set_opts2, swr_convert_frame,
swr_free, swr_init, SwrContext,
};
use libc::EAGAIN;
use log::info;
use tokio::sync::mpsc::UnboundedSender;
use crate::ipc::Rx;
use crate::pipeline::{AVFrameSource, AVPacketSource, PipelinePayload, PipelineProcessor};
use crate::return_ffmpeg_error;
use crate::utils::get_ffmpeg_error_msg;
use crate::variant::{AudioVariant, VariantStreamType};
use crate::variant::{EncodedStream, StreamMapping};
use crate::variant::audio::AudioVariant;
pub struct AudioEncoder<T> {
pub struct AudioEncoder {
variant: AudioVariant,
ctx: *mut AVCodecContext,
codec: *const AVCodec,
swr_ctx: *mut SwrContext,
fifo: *mut AVAudioFifo,
chan_in: T,
chan_out: UnboundedSender<PipelinePayload>,
pts: i64,
}
unsafe impl<T> Send for AudioEncoder<T> {}
unsafe impl Send for AudioEncoder {}
unsafe impl<T> Sync for AudioEncoder<T> {}
unsafe impl Sync for AudioEncoder {}
impl<T> Drop for AudioEncoder<T> {
impl Drop for AudioEncoder {
fn drop(&mut self) {
unsafe {
swr_free(&mut self.swr_ctx);
@ -38,113 +44,88 @@ impl<T> Drop for AudioEncoder<T> {
}
}
impl<TRecv> AudioEncoder<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
pub fn new(
chan_in: TRecv,
chan_out: UnboundedSender<PipelinePayload>,
variant: AudioVariant,
) -> Self {
impl AudioEncoder {
pub fn new(variant: AudioVariant) -> Self {
Self {
ctx: ptr::null_mut(),
codec: ptr::null(),
swr_ctx: ptr::null_mut(),
fifo: ptr::null_mut(),
variant,
chan_in,
chan_out,
pts: 0,
}
}
unsafe fn setup_encoder(&mut self, frame: *mut AVFrame) -> Result<(), Error> {
if self.ctx.is_null() {
let encoder = self.variant.get_codec();
if encoder.is_null() {
return Err(Error::msg("Encoder not found"));
}
let ctx = avcodec_alloc_context3(encoder);
if ctx.is_null() {
return Err(Error::msg("Failed to allocate encoder context"));
}
self.variant.to_codec_context(ctx);
// setup re-sampler if output format does not match input format
if (*ctx).sample_fmt != transmute((*frame).format)
|| (*ctx).sample_rate != (*frame).sample_rate
|| (*ctx).ch_layout.nb_channels != (*frame).ch_layout.nb_channels
{
info!(
"Setup audio resampler: {}.{}@{} -> {}.{}@{}",
(*frame).ch_layout.nb_channels,
CStr::from_ptr(av_get_sample_fmt_name(transmute((*frame).format)))
.to_str()
.unwrap(),
(*frame).sample_rate,
(*ctx).ch_layout.nb_channels,
CStr::from_ptr(av_get_sample_fmt_name((*ctx).sample_fmt))
.to_str()
.unwrap(),
(*ctx).sample_rate
);
let mut swr_ctx = ptr::null_mut();
let ret = swr_alloc_set_opts2(
&mut swr_ctx,
&(*ctx).ch_layout,
(*ctx).sample_fmt,
(*ctx).sample_rate,
&(*frame).ch_layout,
transmute((*frame).format),
(*frame).sample_rate,
0,
ptr::null_mut(),
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
let ret = swr_init(swr_ctx);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
self.swr_ctx = swr_ctx;
let fifo = av_audio_fifo_alloc((*ctx).sample_fmt, (*ctx).ch_layout.nb_channels, 1);
if fifo.is_null() {
return Err(Error::msg("Failed to allocate audio FIFO"));
}
self.fifo = fifo;
}
let ret = avcodec_open2(ctx, encoder, ptr::null_mut());
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
// copy channel layout from codec
let mut px = (*encoder).ch_layouts;
while !px.is_null() {
if (*px).nb_channels as u16 == self.variant.channels {
av_channel_layout_copy(&mut (*ctx).ch_layout, px);
break;
}
px = px.add(1);
}
// let downstream steps know about the encoder
self.chan_out
.send(PipelinePayload::EncoderInfo(self.variant.id(), ctx))?;
self.ctx = ctx;
self.codec = encoder;
unsafe fn setup_encoder(
&mut self,
frame: *mut AVFrame,
) -> Result<Option<PipelinePayload>, Error> {
if !self.ctx.is_null() {
return Ok(None);
}
Ok(())
let encoder = self.variant.get_codec();
if encoder.is_null() {
return Err(Error::msg("Encoder not found"));
}
let ctx = avcodec_alloc_context3(encoder);
if ctx.is_null() {
return Err(Error::msg("Failed to allocate encoder context"));
}
self.variant.to_codec_context(ctx);
// setup re-sampler if output format does not match input format
if (*ctx).sample_fmt != transmute((*frame).format)
|| (*ctx).sample_rate != (*frame).sample_rate
|| (*ctx).ch_layout.nb_channels != (*frame).ch_layout.nb_channels
{
info!(
"Setup audio resampler: {}.{}@{} -> {}.{}@{}",
(*frame).ch_layout.nb_channels,
CStr::from_ptr(av_get_sample_fmt_name(transmute((*frame).format)))
.to_str()?,
(*frame).sample_rate,
(*ctx).ch_layout.nb_channels,
CStr::from_ptr(av_get_sample_fmt_name((*ctx).sample_fmt))
.to_str()?,
(*ctx).sample_rate
);
let mut swr_ctx = ptr::null_mut();
let ret = swr_alloc_set_opts2(
&mut swr_ctx,
&(*ctx).ch_layout,
(*ctx).sample_fmt,
(*ctx).sample_rate,
&(*frame).ch_layout,
transmute((*frame).format),
(*frame).sample_rate,
0,
ptr::null_mut(),
);
return_ffmpeg_error!(ret);
let ret = swr_init(swr_ctx);
return_ffmpeg_error!(ret);
self.swr_ctx = swr_ctx;
let fifo = av_audio_fifo_alloc((*ctx).sample_fmt, (*ctx).ch_layout.nb_channels, 1);
if fifo.is_null() {
return Err(Error::msg("Failed to allocate audio FIFO"));
}
self.fifo = fifo;
}
let ret = avcodec_open2(ctx, encoder, ptr::null_mut());
return_ffmpeg_error!(ret);
self.ctx = ctx;
self.codec = encoder;
Ok(Some(PipelinePayload::EncoderInfo(self.variant.id(), ctx)))
}
/// Returns true if we should process audio frame from FIFO
@ -165,8 +146,6 @@ where
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
// skip fifo
return Ok(Some(out_frame));
let ret = av_audio_fifo_write(
self.fifo,
(*out_frame).extended_data as *const *mut libc::c_void,
@ -244,11 +223,14 @@ where
&mut self,
frame: *mut AVFrame,
in_tb: &AVRational,
) -> Result<(), Error> {
self.setup_encoder(frame)?;
) -> Result<Vec<PipelinePayload>, Error> {
let mut pkgs = Vec::new();
if let Some(di) = self.setup_encoder(frame)? {
pkgs.push(di);
}
let frame = self.process_audio_frame(frame)?;
if frame.is_none() {
return Ok(());
return Ok(pkgs);
}
let mut frame = frame.unwrap();
@ -266,51 +248,45 @@ where
let mut pkt = av_packet_alloc();
ret = avcodec_receive_packet(self.ctx, pkt);
if ret < 0 {
av_frame_free(&mut frame);
av_packet_free(&mut pkt);
if ret == AVERROR(EAGAIN) {
return Ok(());
break;
}
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
//set_encoded_pkt_timing(self.ctx, pkt, in_tb, &mut self.pts, &self.variant);
av_packet_rescale_ts(pkt, *in_tb, self.variant.time_base());
self.chan_out.send(PipelinePayload::AvPacket(
pkgs.push(PipelinePayload::AvPacket(
pkt,
AVPacketSource::Encoder(self.variant.id()),
))?;
));
}
av_frame_free(&mut frame);
Ok(())
Ok(pkgs)
}
}
impl<TRecv> PipelineProcessor for AudioEncoder<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv_next() {
match pkg {
PipelinePayload::AvFrame(frm, ref src) => unsafe {
let in_stream = match src {
AVFrameSource::Decoder(s) => *s,
_ => {
return Err(Error::msg(format!("Cannot process frame from: {:?}", src)))
}
};
if self.variant.src_index == (*in_stream).index as usize {
self.process_frame(frm, &(*in_stream).time_base)?;
}
},
PipelinePayload::Flush => unsafe {
self.process_frame(ptr::null_mut(), &AVRational { num: 0, den: 1 })?;
},
_ => return Err(Error::msg("Payload not supported")),
}
impl PipelineProcessor for AudioEncoder {
fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error> {
match pkg {
PipelinePayload::AvFrame(frm, ref src) => unsafe {
let in_stream = match src {
AVFrameSource::Decoder(s) => *s,
_ => return Err(Error::msg(format!("Cannot process frame from: {:?}", src))),
};
if self.variant.src_index() == (*in_stream).index as usize {
self.process_frame(frm, &(*in_stream).time_base)
} else {
// stream didnt match, skipping
Ok(vec![])
}
},
PipelinePayload::Flush => unsafe {
Ok(self.process_frame(ptr::null_mut(), &AVRational { num: 0, den: 1 })?)
},
_ => Err(Error::msg("Payload not supported")),
}
Ok(())
}
}

View File

@ -1,10 +1,10 @@
use ffmpeg_sys_next::{
AV_NOPTS_VALUE, av_packet_rescale_ts, AV_PKT_FLAG_KEY, AVCodecContext, AVPacket, AVRational,
};
use ffmpeg_sys_next::AVMediaType::AVMEDIA_TYPE_VIDEO;
use ffmpeg_sys_next::{
av_packet_rescale_ts, AVCodecContext, AVPacket, AVRational, AV_NOPTS_VALUE, AV_PKT_FLAG_KEY,
};
use log::info;
use crate::variant::VariantStreamType;
use crate::variant::{EncodedStream, StreamMapping};
pub mod audio;
pub mod video;
@ -17,7 +17,7 @@ pub unsafe fn set_encoded_pkt_timing<TVar>(
pts: &mut i64,
var: &TVar,
) where
TVar: VariantStreamType,
TVar: EncodedStream + StreamMapping,
{
let out_tb = (*ctx).time_base;

View File

@ -3,85 +3,76 @@ use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{
av_packet_alloc, av_packet_free, av_packet_rescale_ts, AVCodec,
avcodec_alloc_context3, avcodec_find_encoder, avcodec_open2, avcodec_receive_packet, avcodec_send_frame,
AVCodecContext, AVERROR, AVFrame, AVRational,
av_packet_alloc, av_packet_free, av_packet_rescale_ts, avcodec_alloc_context3,
avcodec_find_encoder, avcodec_open2, avcodec_receive_packet, avcodec_send_frame, AVCodec,
AVCodecContext, AVFrame, AVRational, AVERROR,
};
use libc::EAGAIN;
use tokio::sync::mpsc::UnboundedSender;
use crate::ipc::Rx;
use crate::pipeline::{AVFrameSource, AVPacketSource, PipelinePayload, PipelineProcessor};
use crate::return_ffmpeg_error;
use crate::utils::get_ffmpeg_error_msg;
use crate::variant::{VariantStreamType, VideoVariant};
use crate::variant::video::VideoVariant;
use crate::variant::{EncodedStream, StreamMapping};
pub struct VideoEncoder<T> {
pub struct VideoEncoder {
variant: VideoVariant,
ctx: *mut AVCodecContext,
codec: *const AVCodec,
chan_in: T,
chan_out: UnboundedSender<PipelinePayload>,
pts: i64,
}
unsafe impl<T> Send for VideoEncoder<T> {}
unsafe impl Send for VideoEncoder {}
unsafe impl<T> Sync for VideoEncoder<T> {}
unsafe impl Sync for VideoEncoder {}
impl<TRecv> VideoEncoder<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
pub fn new(
chan_in: TRecv,
chan_out: UnboundedSender<PipelinePayload>,
variant: VideoVariant,
) -> Self {
impl VideoEncoder {
pub fn new(variant: VideoVariant) -> Self {
Self {
ctx: ptr::null_mut(),
codec: ptr::null(),
variant,
chan_in,
chan_out,
pts: 0,
}
}
unsafe fn setup_encoder(&mut self) -> Result<(), Error> {
if self.ctx.is_null() {
let codec = self.variant.codec;
let encoder = avcodec_find_encoder(transmute(codec as i32));
if encoder.is_null() {
return Err(Error::msg("Encoder not found"));
}
let ctx = avcodec_alloc_context3(encoder);
if ctx.is_null() {
return Err(Error::msg("Failed to allocate encoder context"));
}
self.variant.to_codec_context(ctx);
let ret = avcodec_open2(ctx, encoder, ptr::null_mut());
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
// let downstream steps know about the encoder
self.chan_out
.send(PipelinePayload::EncoderInfo(self.variant.id(), ctx))?;
self.ctx = ctx;
self.codec = encoder;
unsafe fn setup_encoder(&mut self) -> Result<Option<PipelinePayload>, Error> {
if !self.ctx.is_null() {
return Ok(None);
}
Ok(())
let codec = self.variant.codec;
let encoder = avcodec_find_encoder(transmute(codec as i32));
if encoder.is_null() {
return Err(Error::msg("Encoder not found"));
}
let ctx = avcodec_alloc_context3(encoder);
if ctx.is_null() {
return Err(Error::msg("Failed to allocate encoder context"));
}
self.variant.to_codec_context(ctx);
let ret = avcodec_open2(ctx, encoder, ptr::null_mut());
return_ffmpeg_error!(ret);
self.ctx = ctx;
self.codec = encoder;
Ok(Some(PipelinePayload::EncoderInfo(self.variant.id(), ctx)))
}
unsafe fn process_frame(
&mut self,
frame: *mut AVFrame,
in_tb: &AVRational,
) -> Result<(), Error> {
) -> Result<Vec<PipelinePayload>, Error> {
let mut pkgs = Vec::new();
if let Some(ei) = self.setup_encoder()? {
pkgs.push(ei);
}
(*frame).pts = self.pts;
self.pts += (*frame).duration;
@ -96,7 +87,7 @@ where
if ret != 0 {
av_packet_free(&mut pkt);
if ret == AVERROR(EAGAIN) {
return Ok(());
break;
}
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
@ -104,49 +95,40 @@ where
//set_encoded_pkt_timing(self.ctx, pkt, in_tb, &mut self.pts, &self.variant);
av_packet_rescale_ts(pkt, *in_tb, self.variant.time_base());
//dump_pkt_info(pkt);
self.chan_out.send(PipelinePayload::AvPacket(
pkgs.push(PipelinePayload::AvPacket(
pkt,
AVPacketSource::Encoder(self.variant.id()),
))?;
));
}
Ok(())
Ok(pkgs)
}
}
impl<TRecv> PipelineProcessor for VideoEncoder<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
fn process(&mut self) -> Result<(), Error> {
unsafe {
self.setup_encoder()?;
}
while let Ok(pkg) = self.chan_in.try_recv_next() {
match pkg {
PipelinePayload::AvFrame(frm, ref src) => unsafe {
let (in_stream, idx) = match src {
AVFrameSource::Decoder(s) => (*s, (*(*s)).index as usize),
AVFrameSource::None(s) => (ptr::null_mut(), *s),
_ => {
return Err(Error::msg(format!("Cannot process frame from: {:?}", src)))
}
impl PipelineProcessor for VideoEncoder {
fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error> {
match pkg {
PipelinePayload::AvFrame(frm, ref src) => unsafe {
let (in_stream, idx) = match src {
AVFrameSource::Decoder(s) => (*s, (*(*s)).index as usize),
AVFrameSource::None(s) => (ptr::null_mut(), *s),
_ => return Err(Error::msg(format!("Cannot process frame from: {:?}", src))),
};
if self.variant.src_index() == idx {
let tb = if in_stream.is_null() {
self.variant.time_base()
} else {
(*in_stream).time_base
};
if self.variant.src_index == idx {
let tb = if in_stream.is_null() {
self.variant.time_base()
} else {
(*in_stream).time_base
};
self.process_frame(frm, &tb)?;
}
},
PipelinePayload::Flush => unsafe {
self.process_frame(ptr::null_mut(), &AVRational { num: 0, den: 1 })?;
},
_ => return Err(Error::msg("Payload not supported")),
}
self.process_frame(frm, &tb)
} else {
Ok(vec![])
}
},
PipelinePayload::Flush => unsafe {
self.process_frame(ptr::null_mut(), &AVRational { num: 0, den: 1 })
},
_ => Err(Error::msg("Payload not supported")),
}
Ok(())
}
}

View File

@ -1,9 +1,9 @@
use serde::{Deserialize, Serialize};
pub mod file;
pub mod srt;
pub mod tcp;
pub mod test;
pub mod file;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ConnectionInfo {
@ -12,4 +12,4 @@ pub struct ConnectionInfo {
/// IP address of the connection
pub ip_addr: String,
}
}

View File

@ -1,184 +1,185 @@
use std::slice;
use std::ops::Add;
use std::slice;
use std::time::{Duration, Instant};
use ffmpeg_sys_next::{
av_frame_alloc, av_frame_get_buffer, AV_PROFILE_H264_MAIN,
};
use crate::encode::video::VideoEncoder;
use crate::ingress::ConnectionInfo;
use crate::pipeline::builder::PipelineBuilder;
use crate::pipeline::{AVFrameSource, PipelinePayload, PipelineProcessor};
use crate::scale::Scaler;
use crate::variant::mapping::VariantMapping;
use crate::variant::video::VideoVariant;
use ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_H264;
use ffmpeg_sys_next::AVColorSpace::AVCOL_SPC_RGB;
use ffmpeg_sys_next::AVPictureType::AV_PICTURE_TYPE_NONE;
use ffmpeg_sys_next::AVPixelFormat::{AV_PIX_FMT_RGBA, AV_PIX_FMT_YUV420P};
use ffmpeg_sys_next::{
av_frame_alloc, av_frame_get_buffer, AV_PROFILE_H264_MAIN,
};
use fontdue::layout::{CoordinateSystem, Layout, TextStyle};
use libc::memcpy;
use log::{error, info, warn};
use tokio::runtime::Runtime;
use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::mpsc::{unbounded_channel, UnboundedSender};
use uuid::Uuid;
use crate::encode::video::VideoEncoder;
use crate::ingress::ConnectionInfo;
use crate::pipeline::{AVFrameSource, PipelinePayload, PipelineProcessor};
use crate::pipeline::builder::PipelineBuilder;
use crate::scale::Scaler;
use crate::variant::VideoVariant;
const WIDTH: libc::c_int = 1920;
const HEIGHT: libc::c_int = 1080;
const FPS: libc::c_int = 25;
pub async fn listen(builder: PipelineBuilder) -> Result<(), anyhow::Error> {
info!("Test pattern enabled");
const WIDTH: libc::c_int = 1920;
const HEIGHT: libc::c_int = 1080;
const FPS: libc::c_int = 25;
let (tx, rx) = unbounded_channel();
let info = ConnectionInfo {
ip_addr: "".to_owned(),
endpoint: "test-pattern".to_owned(),
};
std::thread::spawn(move || {
let (tx, rx) = unbounded_channel();
let info = ConnectionInfo {
ip_addr: "".to_owned(),
endpoint: "test-pattern".to_owned(),
if let Ok(mut pl) = builder.build_for(info, rx).await {
let pipeline = std::thread::spawn(move || loop {
if let Err(e) = pl.run() {
error!("Pipeline error: {}\n{}", e, e.backtrace());
break;
}
});
let encoder = std::thread::spawn(move || {
run_encoder(tx);
});
if encoder.join().is_err() {
error!("Encoder thread error");
}
if pipeline.join().is_err() {
error!("Pipeline thread error");
}
}
Ok(())
}
fn run_encoder(tx: UnboundedSender<bytes::Bytes>) {
let var = VideoVariant {
mapping: VariantMapping {
id: Uuid::new_v4(),
src_index: 0,
dst_index: 0,
group_id: 0,
},
width: WIDTH as u16,
height: HEIGHT as u16,
fps: FPS as u16,
bitrate: 1_000_000,
codec: AV_CODEC_ID_H264 as usize,
profile: AV_PROFILE_H264_MAIN as usize,
level: 51,
keyframe_interval: FPS as u16,
pixel_format: AV_PIX_FMT_YUV420P as u32,
};
let mut sws = Scaler::new(var.clone());
let mut enc = VideoEncoder::new(var.clone());
let svg_data = std::fs::read("./test.svg").unwrap();
let tree = usvg::Tree::from_data(&svg_data, &Default::default()).unwrap();
let mut pixmap = tiny_skia::Pixmap::new(WIDTH as u32, HEIGHT as u32).unwrap();
let render_ts = tiny_skia::Transform::from_scale(1f32, 1f32);
resvg::render(&tree, render_ts, &mut pixmap.as_mut());
let font = include_bytes!("../../SourceCodePro-Regular.ttf") as &[u8];
let scp = fontdue::Font::from_bytes(font, Default::default()).unwrap();
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
let fonts = &[&scp];
let start = Instant::now();
let mut frame_number: u64 = 0;
loop {
let stream_time = Duration::from_secs_f64(frame_number as f64 / FPS as f64);
let real_time = Instant::now().duration_since(start);
let wait_time = if stream_time > real_time {
stream_time - real_time
} else {
Duration::new(0, 0)
};
if !wait_time.is_zero() {
std::thread::sleep(wait_time);
}
let rt = Runtime::new().unwrap();
if let Ok(mut pl) = rt.block_on(builder.build_for(info, rx)) {
let pipeline = std::thread::spawn(move || loop {
if let Err(e) = pl.run() {
error!("Pipeline error: {}\n{}", e, e.backtrace());
break;
}
});
let (frame_in, frames_in_rx) = unbounded_channel();
let (sws_tx, sws_rx) = unbounded_channel();
let (frames_out_tx, mut frames_out) = unbounded_channel();
let var = VideoVariant {
id: Uuid::new_v4(),
src_index: 0,
dst_index: 0,
width: WIDTH as u16,
height: HEIGHT as u16,
fps: FPS as u16,
bitrate: 2_000_000,
codec: AV_CODEC_ID_H264 as usize,
profile: AV_PROFILE_H264_MAIN as usize,
level: 40,
keyframe_interval: 2,
pixel_format: AV_PIX_FMT_YUV420P as u32,
};
let mut sws = Scaler::new(frames_in_rx, sws_tx, var.clone());
let mut enc = VideoEncoder::new(sws_rx, frames_out_tx, var.clone());
frame_number += 1;
let svg_data = std::fs::read("./test.svg").unwrap();
let tree = usvg::Tree::from_data(&svg_data, &Default::default()).unwrap();
let mut pixmap = tiny_skia::Pixmap::new(WIDTH as u32, HEIGHT as u32).unwrap();
let render_ts = tiny_skia::Transform::from_scale(1f32, 1f32);
resvg::render(&tree, render_ts, &mut pixmap.as_mut());
let src_frame = unsafe {
let src_frame = av_frame_alloc();
let font = include_bytes!("../../SourceCodePro-Regular.ttf") as &[u8];
let scp = fontdue::Font::from_bytes(font, Default::default()).unwrap();
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
let fonts = &[&scp];
(*src_frame).width = WIDTH;
(*src_frame).height = HEIGHT;
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
(*src_frame).key_frame = 1;
(*src_frame).colorspace = AVCOL_SPC_RGB;
(*src_frame).format = AV_PIX_FMT_RGBA as libc::c_int;
(*src_frame).pts = frame_number as i64;
(*src_frame).duration = 1;
av_frame_get_buffer(src_frame, 0);
let start = Instant::now();
let mut frame_number: u64 = 0;
loop {
let stream_time = Duration::from_secs_f64(frame_number as f64 / FPS as f64);
let real_time = Instant::now().duration_since(start);
let wait_time = if stream_time > real_time {
stream_time - real_time
} else {
Duration::new(0, 0)
};
if !wait_time.is_zero() {
std::thread::sleep(wait_time);
}
frame_number += 1;
let src_frame = unsafe {
let src_frame = av_frame_alloc();
(*src_frame).width = WIDTH;
(*src_frame).height = HEIGHT;
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
(*src_frame).key_frame = 1;
(*src_frame).colorspace = AVCOL_SPC_RGB;
(*src_frame).format = AV_PIX_FMT_RGBA as libc::c_int;
(*src_frame).pts = frame_number as i64;
(*src_frame).duration = 1;
av_frame_get_buffer(src_frame, 0);
memcpy(
(*src_frame).data[0] as *mut libc::c_void,
pixmap.data().as_ptr() as *const libc::c_void,
(WIDTH * HEIGHT * 4) as libc::size_t,
);
src_frame
};
layout.clear();
layout.append(
fonts,
&TextStyle::new(&format!("frame={}", frame_number), 40.0, 0),
);
for g in layout.glyphs() {
let (metrics, bitmap) = scp.rasterize_config_subpixel(g.key);
for y in 0..metrics.height {
for x in 0..metrics.width {
let dst_x = x + g.x as usize;
let dst_y = y + g.y as usize;
let offset_src = (x + y * metrics.width) * 3;
unsafe {
let offset_dst =
4 * dst_x + dst_y * (*src_frame).linesize[0] as usize;
let pixel_dst = (*src_frame).data[0].add(offset_dst);
*pixel_dst.offset(0) = bitmap[offset_src];
*pixel_dst.offset(1) = bitmap[offset_src + 1];
*pixel_dst.offset(2) = bitmap[offset_src + 2];
}
}
}
}
// scale/encode
if let Err(e) =
frame_in.send(PipelinePayload::AvFrame(src_frame, AVFrameSource::None(0)))
{
error!("Failed to send frames to encoder: {}", e);
pipeline.join().unwrap();
return;
}
if let Err(e) = sws.process() {
error!("Failed to scale frame: {}", e);
pipeline.join().unwrap();
return;
}
match enc.process() {
Ok(_) => {
while let Ok(p) = frames_out.try_recv() {
match p {
PipelinePayload::AvPacket(pkt, _) => unsafe {
let buf = bytes::Bytes::from(slice::from_raw_parts(
(*pkt).data,
(*pkt).size as usize,
));
if let Err(e) = tx.send(buf) {
error!("Failed to send test pkt: {}", e);
pipeline.join().unwrap();
return;
}
},
_ => {
warn!("Unknown payload from encoder: {:?}", p);
}
}
}
}
Err(e) => {
error!("Failed to encode: {}", e);
pipeline.join().unwrap();
return;
memcpy(
(*src_frame).data[0] as *mut libc::c_void,
pixmap.data().as_ptr() as *const libc::c_void,
(WIDTH * HEIGHT * 4) as libc::size_t,
);
src_frame
};
layout.clear();
layout.append(
fonts,
&TextStyle::new(&format!("frame={}", frame_number), 40.0, 0),
);
for g in layout.glyphs() {
let (metrics, bitmap) = scp.rasterize_config_subpixel(g.key);
for y in 0..metrics.height {
for x in 0..metrics.width {
let dst_x = x + g.x as usize;
let dst_y = y + g.y as usize;
let offset_src = (x + y * metrics.width) * 3;
unsafe {
let offset_dst = 4 * dst_x + dst_y * (*src_frame).linesize[0] as usize;
let pixel_dst = (*src_frame).data[0].add(offset_dst);
*pixel_dst.offset(0) = bitmap[offset_src];
*pixel_dst.offset(1) = bitmap[offset_src + 1];
*pixel_dst.offset(2) = bitmap[offset_src + 2];
}
}
}
}
});
Ok(())
// scale/encode
let pkgs = match sws.process(PipelinePayload::AvFrame(src_frame, AVFrameSource::None(0))) {
Ok(p) => p,
Err(e) => {
error!("Failed to scale frame: {}", e);
return;
}
};
for pkg in pkgs {
match enc.process(pkg) {
Ok(pkgs) => {
for pkg in pkgs {
match pkg {
PipelinePayload::AvPacket(pkt, _) => unsafe {
let buf = bytes::Bytes::from(slice::from_raw_parts(
(*pkt).data,
(*pkt).size as usize,
));
if let Err(e) = tx.send(buf) {
error!("Failed to send test pkt: {}", e);
return;
}
},
_ => {
warn!("Unknown payload from encoder: {:?}", pkg);
}
}
}
}
Err(e) => {
error!("Failed to encode: {}", e);
return;
}
}
}
}
}

View File

@ -9,8 +9,8 @@ pub trait Rx<T> {
#[async_trait]
impl<T> Rx<T> for tokio::sync::mpsc::UnboundedReceiver<T>
where
T: Send + Sync,
where
T: Send + Sync,
{
async fn recv(&mut self) -> Result<T, Error> {
self.recv().await.ok_or(Error::msg("recv error"))
@ -23,8 +23,8 @@ impl<T> Rx<T> for tokio::sync::mpsc::UnboundedReceiver<T>
#[async_trait]
impl<T> Rx<T> for tokio::sync::broadcast::Receiver<T>
where
T: Send + Sync + Clone,
where
T: Send + Sync + Clone,
{
async fn recv(&mut self) -> Result<T, Error> {
Ok(self.recv().await?)

View File

@ -20,7 +20,6 @@ mod ipc;
mod pipeline;
mod scale;
mod settings;
mod tag_frame;
mod utils;
mod variant;
mod webhook;
@ -81,7 +80,7 @@ async fn main() -> anyhow::Result<()> {
if let Some(p) = args.file {
listeners.push(tokio::spawn(ingress::file::listen(
p.parse().unwrap(),
p.parse()?,
builder.clone(),
)));
}
@ -97,3 +96,12 @@ async fn main() -> anyhow::Result<()> {
info!("Server closed");
Ok(())
}
#[macro_export]
macro_rules! return_ffmpeg_error {
($x:expr) => {
if $x < 0 {
return Err(Error::msg(get_ffmpeg_error_msg($x)));
}
};
}

View File

@ -19,9 +19,9 @@ impl PipelineBuilder {
info: ConnectionInfo,
recv: UnboundedReceiver<bytes::Bytes>,
) -> Result<PipelineRunner, anyhow::Error> {
self.webhook.start(info).await?;
Ok(PipelineRunner::new(
Default::default(),
info,
self.webhook.clone(),
recv,
))

View File

@ -8,7 +8,7 @@ use ffmpeg_sys_next::{
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::demux::info::DemuxStreamInfo;
use crate::demux::info::DemuxerInfo;
use crate::egress::EgressConfig;
use crate::variant::VariantStream;
@ -17,11 +17,14 @@ pub mod runner;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum EgressType {
/// HLS output egress
HLS(EgressConfig),
DASH,
WHEP,
MPEGTS(EgressConfig),
/// Record streams to local disk
Recorder(EgressConfig),
/// Forward streams to another RTMP server
RTMPForwarder(EgressConfig),
}
impl Display for EgressType {
@ -31,10 +34,8 @@ impl Display for EgressType {
"{}",
match self {
EgressType::HLS(c) => format!("{}", c),
EgressType::DASH => "DASH".to_owned(),
EgressType::WHEP => "WHEP".to_owned(),
EgressType::MPEGTS(c) => format!("{}", c),
EgressType::Recorder(c) => format!("{}", c),
EgressType::RTMPForwarder(c) => format!("{}", c),
}
)
}
@ -42,19 +43,21 @@ impl Display for EgressType {
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
pub struct PipelineConfig {
pub id: uuid::Uuid,
pub recording: Vec<VariantStream>,
pub id: Uuid,
/// Transcoded/Copied stream config
pub variants: Vec<VariantStream>,
/// Output muxers
pub egress: Vec<EgressType>,
}
impl Display for PipelineConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "\nPipeline Config ID={}", self.id)?;
if !self.recording.is_empty() {
write!(f, "\nRecording:")?;
for r in &self.recording {
write!(f, "\n\t{}", r)?;
}
write!(f, "\nVariants:")?;
for v in &self.variants {
write!(f, "\n\t{}", v)?;
}
if !self.egress.is_empty() {
write!(f, "\nEgress:")?;
@ -72,8 +75,6 @@ pub enum AVPacketSource {
Demuxer(*mut AVStream),
/// AVPacket from an encoder
Encoder(Uuid),
/// AVPacket from muxer
Muxer(Uuid),
}
#[derive(Debug, PartialEq, Clone)]
@ -98,10 +99,10 @@ pub enum PipelinePayload {
AvPacket(*mut AVPacket, AVPacketSource),
/// FFMpeg AVFrame
AvFrame(*mut AVFrame, AVFrameSource),
/// Information about the input stream
SourceInfo(DemuxStreamInfo),
/// Information about an encoder in this pipeline
EncoderInfo(Uuid, *const AVCodecContext),
/// Source stream information provided by the demuxer
SourceInfo(DemuxerInfo),
/// Flush pipeline
Flush,
}
@ -113,8 +114,6 @@ unsafe impl Sync for PipelinePayload {}
impl Clone for PipelinePayload {
fn clone(&self) -> Self {
match self {
PipelinePayload::Empty => PipelinePayload::Empty,
PipelinePayload::Bytes(b) => PipelinePayload::Bytes(b.clone()),
PipelinePayload::AvPacket(p, v) => unsafe {
assert!(!(**p).data.is_null(), "Cannot clone empty packet");
let new_pkt = av_packet_clone(*p);
@ -127,9 +126,11 @@ impl Clone for PipelinePayload {
av_frame_copy_props(new_frame, *p);
PipelinePayload::AvFrame(new_frame, v.clone())
},
PipelinePayload::SourceInfo(i) => PipelinePayload::SourceInfo(i.clone()),
PipelinePayload::EncoderInfo(v, s) => PipelinePayload::EncoderInfo(*v, *s),
PipelinePayload::Flush => PipelinePayload::Flush,
PipelinePayload::Empty => PipelinePayload::Empty,
PipelinePayload::Bytes(b) => PipelinePayload::Bytes(b.clone()),
PipelinePayload::EncoderInfo(a, b) => PipelinePayload::EncoderInfo(*a, *b),
PipelinePayload::SourceInfo(a) => PipelinePayload::SourceInfo(a.clone()),
PipelinePayload::Flush => PipelinePayload::Flush
}
}
}
@ -149,5 +150,5 @@ impl Drop for PipelinePayload {
}
pub trait PipelineProcessor {
fn process(&mut self) -> Result<(), Error>;
fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error>;
}

View File

@ -3,56 +3,69 @@ use std::time::Instant;
use anyhow::Error;
use log::info;
use tokio::sync::broadcast;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::mpsc::UnboundedReceiver;
use uuid::Uuid;
use crate::decode::Decoder;
use crate::demux::info::DemuxerInfo;
use crate::demux::Demuxer;
use crate::demux::info::DemuxStreamInfo;
use crate::egress::EgressConfig;
use crate::egress::hls::HlsEgress;
use crate::egress::recorder::RecorderEgress;
use crate::encode::audio::AudioEncoder;
use crate::encode::video::VideoEncoder;
use crate::pipeline::{EgressType, PipelineConfig, PipelinePayload, PipelineProcessor};
use crate::ingress::ConnectionInfo;
use crate::pipeline::{
AVPacketSource, EgressType, PipelineConfig, PipelinePayload, PipelineProcessor,
};
use crate::scale::Scaler;
use crate::tag_frame::TagFrame;
use crate::variant::VariantStream;
use crate::variant::{StreamMapping, VariantStream};
use crate::webhook::Webhook;
struct PipelineChain {
pub first: Box<dyn PipelineProcessor + Sync + Send>,
pub second: Box<dyn PipelineProcessor + Sync + Send>,
type BoxedProcessor = Box<dyn PipelineProcessor + Sync + Send>;
/// Resample/Encode
struct Transcoder {
pub variant: Uuid,
/// A resampler which can take decoded sames (Audio or Video)
pub sampler: Option<BoxedProcessor>,
/// The encoder which will encode the resampled frames
pub encoder: BoxedProcessor,
}
///
/// |----------------------------------------------------|
/// | Demuxer
pub struct PipelineRunner {
config: PipelineConfig,
info: ConnectionInfo,
demuxer: Demuxer,
decoder: Decoder,
decoder_output: broadcast::Receiver<PipelinePayload>,
encoders: Vec<PipelineChain>,
egress: Vec<Box<dyn PipelineProcessor + Sync + Send>>,
transcoders: Vec<Transcoder>,
muxers: Vec<BoxedProcessor>,
started: Instant,
frame_no: u64,
stream_info: Option<DemuxStreamInfo>,
stream_info: Option<DemuxerInfo>,
webhook: Webhook,
}
impl PipelineRunner {
pub fn new(
config: PipelineConfig,
info: ConnectionInfo,
webhook: Webhook,
recv: UnboundedReceiver<bytes::Bytes>,
) -> Self {
let (demux_out, demux_in) = unbounded_channel();
let (dec_tx, dec_rx) = broadcast::channel::<PipelinePayload>(32);
Self {
config,
demuxer: Demuxer::new(recv, demux_out),
decoder: Decoder::new(demux_in, dec_tx),
decoder_output: dec_rx,
encoders: vec![],
egress: vec![],
info,
demuxer: Demuxer::new(recv),
decoder: Decoder::new(),
transcoders: vec![],
muxers: vec![],
started: Instant::now(),
frame_no: 0,
stream_info: None,
@ -61,21 +74,70 @@ impl PipelineRunner {
}
pub fn run(&mut self) -> Result<(), Error> {
if let Some(cfg) = self.demuxer.process()? {
self.configure_pipeline(cfg)?;
}
let frames = self.decoder.process()?;
self.frame_no += frames as u64;
// (scalar)-encoder chains
for sw in &mut self.encoders {
sw.first.process()?;
sw.second.process()?;
if self.stream_info.is_none() {
if let Some(cfg) = self.demuxer.try_probe()? {
self.configure_pipeline(&cfg)?;
for mux in &mut self.muxers {
mux.process(PipelinePayload::SourceInfo(cfg.clone()))?;
}
self.stream_info = Some(cfg);
} else {
return Ok(());
}
}
// egress outputs
for eg in &mut self.egress {
eg.process()?;
let demux_pkg = unsafe { self.demuxer.get_packet() }?;
let src_index = if let PipelinePayload::AvPacket(_, s) = &demux_pkg {
if let AVPacketSource::Demuxer(s) = s {
unsafe { (*(*s)).index }
} else {
-1
}
} else {
-1
};
let pkg_variant = self.config.variants.iter().find(|v| match v {
VariantStream::Video(vx) => vx.src_index() as i32 == src_index,
VariantStream::Audio(ax) => ax.src_index() as i32 == src_index,
_ => false,
});
let transcoded_pkgs = if let Some(var) = pkg_variant {
let frames = self.decoder.process(demux_pkg.clone())?;
if let VariantStream::Video(_) = var {
self.frame_no += frames.len() as u64;
//TODO: Account for multiple video streams in
}
let mut pkgs = Vec::new();
for frame in &frames {
for tran in &mut self.transcoders {
let frames = if let Some(ref mut smp) = tran.sampler {
smp.process(frame.clone())?
} else {
vec![frame.clone()]
};
for frame in frames {
for pkg in tran.encoder.process(frame)? {
pkgs.push(pkg);
}
}
}
}
pkgs
} else {
vec![]
};
// mux
for pkg in transcoded_pkgs {
for ref mut mux in &mut self.muxers {
mux.process(pkg.clone())?;
}
}
for ref mut mux in &mut self.muxers {
mux.process(demux_pkg.clone())?;
}
let elapsed = Instant::now().sub(self.started).as_secs_f32();
@ -87,87 +149,74 @@ impl PipelineRunner {
Ok(())
}
fn configure_pipeline(&mut self, info: DemuxStreamInfo) -> Result<(), Error> {
if self.stream_info.is_some() {
return Err(Error::msg("Pipeline already configured!"));
}
self.stream_info = Some(info.clone());
/// Setup pipeline based on the demuxer info
fn configure_pipeline(&mut self, info: &DemuxerInfo) -> Result<(), Error> {
// re-configure with demuxer info
self.config = self.webhook.configure(&info);
self.config = self.webhook.start(info);
info!("Configuring pipeline {}", self.config);
info!(
"Livestream url: http://localhost:8080/{}/live.m3u8",
self.config.id
);
if self.config.egress.iter().any(|x| match x {
EgressType::HLS(_) => true,
_ => false,
}) {
info!(
"Livestream url: http://localhost:8080/{}/live.m3u8",
self.config.id
);
}
for eg in &self.config.egress {
match eg {
EgressType::HLS(cfg) => {
let (egress_tx, egress_rx) = unbounded_channel();
self.egress.push(Box::new(HlsEgress::new(
egress_rx,
self.config.id,
cfg.clone(),
)));
for x in self.add_egress_variants(cfg, egress_tx) {
self.encoders.push(x);
}
// configure transcoders
for var in &self.config.variants {
match var {
VariantStream::Video(v) => {
let scaler = Scaler::new(v.clone());
let encoder = VideoEncoder::new(v.clone());
self.transcoders.push(Transcoder {
variant: v.id(),
sampler: Some(Box::new(scaler)),
encoder: Box::new(encoder),
});
}
EgressType::Recorder(cfg) => {
let (egress_tx, egress_rx) = unbounded_channel();
self.egress.push(Box::new(RecorderEgress::new(
egress_rx,
self.config.id,
cfg.clone(),
)));
for x in self.add_egress_variants(cfg, egress_tx) {
self.encoders.push(x);
}
VariantStream::Audio(a) => {
let encoder = AudioEncoder::new(a.clone());
self.transcoders.push(Transcoder {
variant: a.id(),
sampler: None,
encoder: Box::new(encoder),
});
}
_ => {
//ignored
}
_ => return Err(Error::msg("Egress config not supported")),
}
}
if self.egress.is_empty() {
// configure muxers
for mux in &self.config.egress {
match mux {
EgressType::HLS(c) => {
let mut hls =
HlsEgress::new(Uuid::new_v4(), c.clone(), self.config.variants.clone());
hls.setup_muxer()?;
self.muxers.push(Box::new(hls));
}
EgressType::Recorder(c) => {
let recorder = RecorderEgress::new(
Uuid::new_v4(),
c.clone(),
self.config.variants.clone(),
);
self.muxers.push(Box::new(recorder));
}
EgressType::RTMPForwarder(c) => {
todo!("Implement this")
}
}
}
if self.muxers.is_empty() {
Err(Error::msg("No egress config, pipeline misconfigured!"))
} else {
Ok(())
}
}
fn add_egress_variants(
&self,
cfg: &EgressConfig,
egress_tx: UnboundedSender<PipelinePayload>,
) -> Vec<PipelineChain> {
let mut ret = vec![];
for v in &cfg.variants {
match v {
VariantStream::Video(vs) => {
let (sw_tx, sw_rx) = unbounded_channel();
ret.push(PipelineChain {
first: Box::new(Scaler::new(
self.decoder_output.resubscribe(),
sw_tx.clone(),
vs.clone(),
)),
second: Box::new(VideoEncoder::new(sw_rx, egress_tx.clone(), vs.clone())),
});
}
VariantStream::Audio(va) => {
let (tag_tx, tag_rx) = unbounded_channel();
ret.push(PipelineChain {
first: Box::new(TagFrame::new(
v.clone(),
self.decoder_output.resubscribe(),
tag_tx,
)),
second: Box::new(AudioEncoder::new(tag_rx, egress_tx.clone(), va.clone())),
});
}
}
}
ret
}
}

View File

@ -4,29 +4,27 @@ use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{
av_frame_alloc, av_frame_copy_props, av_get_pix_fmt_name, AVFrame,
SWS_BILINEAR, sws_freeContext, sws_getContext, sws_scale_frame, SwsContext,
av_frame_alloc, av_frame_copy_props, av_get_pix_fmt_name, AVFrame, SWS_BILINEAR,
sws_freeContext, sws_getContext, sws_scale_frame, SwsContext,
};
use log::info;
use tokio::sync::mpsc::UnboundedSender;
use crate::ipc::Rx;
use crate::pipeline::{AVFrameSource, PipelinePayload, PipelineProcessor};
use crate::return_ffmpeg_error;
use crate::utils::get_ffmpeg_error_msg;
use crate::variant::VideoVariant;
use crate::variant::StreamMapping;
use crate::variant::video::VideoVariant;
pub struct Scaler<T> {
pub struct Scaler {
variant: VideoVariant,
ctx: *mut SwsContext,
chan_in: T,
chan_out: UnboundedSender<PipelinePayload>,
}
unsafe impl<TRecv> Send for Scaler<TRecv> {}
unsafe impl Send for Scaler {}
unsafe impl<TRecv> Sync for Scaler<TRecv> {}
unsafe impl Sync for Scaler {}
impl<TRecv> Drop for Scaler<TRecv> {
impl Drop for Scaler {
fn drop(&mut self) {
unsafe {
sws_freeContext(self.ctx);
@ -35,103 +33,87 @@ impl<TRecv> Drop for Scaler<TRecv> {
}
}
impl<TRecv> Scaler<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
pub fn new(
chan_in: TRecv,
chan_out: UnboundedSender<PipelinePayload>,
variant: VideoVariant,
) -> Self {
impl Scaler {
pub fn new(variant: VideoVariant) -> Self {
Self {
chan_in,
chan_out,
variant,
ctx: ptr::null_mut(),
}
}
unsafe fn setup_scaler(&mut self, frame: *const AVFrame) -> Result<(), Error> {
if !self.ctx.is_null() {
return Ok(());
}
let ctx = sws_getContext(
(*frame).width,
(*frame).height,
transmute((*frame).format),
self.variant.width as libc::c_int,
self.variant.height as libc::c_int,
transmute(self.variant.pixel_format),
SWS_BILINEAR,
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut(),
);
if ctx.is_null() {
return Err(Error::msg("Failed to create scalar context"));
}
info!(
"Scalar config: {}x{}@{} => {}x{}@{}",
(*frame).width,
(*frame).height,
CStr::from_ptr(av_get_pix_fmt_name(transmute((*frame).format)))
.to_str()?,
self.variant.width,
self.variant.height,
CStr::from_ptr(av_get_pix_fmt_name(transmute(self.variant.pixel_format)))
.to_str()?
);
self.ctx = ctx;
Ok(())
}
unsafe fn process_frame(
&mut self,
frame: *mut AVFrame,
src: &AVFrameSource,
) -> Result<(), Error> {
if self.ctx.is_null() {
let ctx = sws_getContext(
(*frame).width,
(*frame).height,
transmute((*frame).format),
self.variant.width as libc::c_int,
self.variant.height as libc::c_int,
transmute(self.variant.pixel_format),
SWS_BILINEAR,
ptr::null_mut(),
ptr::null_mut(),
ptr::null_mut(),
);
if ctx.is_null() {
return Err(Error::msg("Failed to create scalar context"));
}
info!(
"Scalar config: {}x{}@{} => {}x{}@{}",
(*frame).width,
(*frame).height,
CStr::from_ptr(av_get_pix_fmt_name(transmute((*frame).format)))
.to_str()
.unwrap(),
self.variant.width,
self.variant.height,
CStr::from_ptr(av_get_pix_fmt_name(transmute(self.variant.pixel_format)))
.to_str()
.unwrap()
);
self.ctx = ctx;
}
) -> Result<Vec<PipelinePayload>, Error> {
self.setup_scaler(frame)?;
let dst_frame = av_frame_alloc();
let ret = av_frame_copy_props(dst_frame, frame);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
return_ffmpeg_error!(ret);
let ret = sws_scale_frame(self.ctx, dst_frame, frame);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
return_ffmpeg_error!(ret);
self.chan_out
.send(PipelinePayload::AvFrame(dst_frame, src.clone()))?;
Ok(())
Ok(vec![PipelinePayload::AvFrame(dst_frame, src.clone())])
}
}
impl<TRecv> PipelineProcessor for Scaler<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv_next() {
match pkg {
PipelinePayload::AvFrame(frm, ref src) => unsafe {
let idx = match src {
AVFrameSource::Decoder(s) => (**s).index as usize,
AVFrameSource::None(s) => *s,
_ => {
return Err(Error::msg(format!("Cannot process frame from: {:?}", src)))
}
};
if self.variant.src_index == idx {
self.process_frame(frm, src)?;
}
},
PipelinePayload::Flush => {
// pass flush to next step
self.chan_out.send(PipelinePayload::Flush)?;
impl PipelineProcessor for Scaler {
fn process(&mut self, pkg: PipelinePayload) -> Result<Vec<PipelinePayload>, Error> {
match pkg {
PipelinePayload::AvFrame(frm, ref src) => unsafe {
let idx = match src {
AVFrameSource::Decoder(s) => (**s).index as usize,
AVFrameSource::None(s) => *s,
_ => return Err(Error::msg(format!("Cannot process frame from: {:?}", src))),
};
if self.variant.src_index() == idx {
self.process_frame(frm, src)
} else {
Ok(vec![])
}
_ => return Err(Error::msg("Payload not supported payload")),
},
PipelinePayload::Flush => {
// pass flush to next step
Ok(vec![pkg])
}
_ => Err(Error::msg("Payload not supported payload")),
}
Ok(())
}
}

View File

@ -14,4 +14,4 @@ pub struct Settings {
/// Webhook configuration URL
pub webhook_url: String,
}
}

View File

@ -1,45 +0,0 @@
use anyhow::Error;
use tokio::sync::mpsc::UnboundedSender;
use crate::ipc::Rx;
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::variant::VariantStream;
pub struct TagFrame<TRecv> {
variant: VariantStream,
chan_in: TRecv,
chan_out: UnboundedSender<PipelinePayload>,
}
unsafe impl<T> Send for TagFrame<T> {}
unsafe impl<T> Sync for TagFrame<T> {}
impl<TRecv> TagFrame<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
pub fn new(
var: VariantStream,
chan_in: TRecv,
chan_out: UnboundedSender<PipelinePayload>,
) -> Self {
Self {
variant: var,
chan_in,
chan_out,
}
}
}
impl<TRecv> PipelineProcessor for TagFrame<TRecv>
where
TRecv: Rx<PipelinePayload>,
{
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv_next() {
self.chan_out.send(pkg)?;
}
Ok(())
}
}

View File

@ -2,7 +2,6 @@ use std::ffi::CStr;
use ffmpeg_sys_next::av_make_error_string;
pub fn get_ffmpeg_error_msg(ret: libc::c_int) -> String {
unsafe {
const BUF_SIZE: usize = 512;
@ -10,4 +9,4 @@ pub fn get_ffmpeg_error_msg(ret: libc::c_int) -> String {
av_make_error_string(buf.as_mut_ptr(), BUF_SIZE, ret);
String::from(CStr::from_ptr(buf.as_ptr()).to_str().unwrap())
}
}
}

View File

@ -1,371 +0,0 @@
use std::ffi::CStr;
use std::fmt::{Display, Formatter};
use std::mem::transmute;
use std::ptr;
use ffmpeg_sys_next::{
AV_CH_LAYOUT_STEREO, av_get_sample_fmt, av_opt_set, AVChannelLayout,
AVChannelLayout__bindgen_ty_1, AVCodec, avcodec_find_encoder, avcodec_find_encoder_by_name, avcodec_get_name,
AVCodecContext, AVCodecParameters, AVRational, AVStream,
};
use ffmpeg_sys_next::AVChannelOrder::AV_CHANNEL_ORDER_NATIVE;
use ffmpeg_sys_next::AVCodecID::{AV_CODEC_ID_AAC, AV_CODEC_ID_H264};
use ffmpeg_sys_next::AVColorSpace::AVCOL_SPC_BT709;
use ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VariantStream {
/// Video stream mapping
Video(VideoVariant),
/// Audio stream mapping
Audio(AudioVariant),
}
impl Display for VariantStream {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
VariantStream::Video(v) => write!(f, "{}", v),
VariantStream::Audio(a) => write!(f, "{}", a),
}
}
}
impl VariantStreamType for VariantStream {
fn id(&self) -> Uuid {
match self {
VariantStream::Video(v) => v.id,
VariantStream::Audio(v) => v.id,
}
}
fn src_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.src_index,
VariantStream::Audio(v) => v.src_index,
}
}
fn dst_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.dst_index,
VariantStream::Audio(v) => v.dst_index,
}
}
fn time_base(&self) -> AVRational {
match self {
VariantStream::Video(v) => v.time_base(),
VariantStream::Audio(v) => v.time_base(),
}
}
unsafe fn get_codec(&self) -> *const AVCodec {
match self {
VariantStream::Video(v) => v.get_codec(),
VariantStream::Audio(v) => v.get_codec(),
}
}
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
match self {
VariantStream::Video(v) => v.to_codec_context(ctx),
VariantStream::Audio(v) => v.to_codec_context(ctx),
}
}
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
match self {
VariantStream::Video(v) => v.to_codec_params(params),
VariantStream::Audio(v) => v.to_codec_params(params),
}
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
match self {
VariantStream::Video(v) => v.to_stream(stream),
VariantStream::Audio(v) => v.to_stream(stream),
}
}
}
/// Information related to variant streams for a given egress
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VideoVariant {
/// Unique ID of this variant
pub id: Uuid,
/// Source video stream to use for this variant
pub src_index: usize,
/// Index of this variant in the output
pub dst_index: usize,
/// Width of this video stream
pub width: u16,
/// Height of this video stream
pub height: u16,
/// FPS for this stream
pub fps: u16,
/// Bitrate of this stream
pub bitrate: u64,
/// AVCodecID
pub codec: usize,
/// Codec profile
pub profile: usize,
/// Codec level
pub level: usize,
/// Keyframe interval in seconds
pub keyframe_interval: u16,
/// Pixel Format
pub pixel_format: u32,
}
impl Display for VideoVariant {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Video #{}->{}: {}, {}x{}, {}fps, {}kbps",
self.src_index,
self.dst_index,
unsafe {
CStr::from_ptr(avcodec_get_name(transmute(self.codec as i32)))
.to_str()
.unwrap()
},
self.width,
self.height,
self.fps,
self.bitrate / 1000
)
}
}
/// Information related to variant streams for a given egress
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AudioVariant {
/// Unique ID of this variant
pub id: Uuid,
/// Source video stream to use for this variant
pub src_index: usize,
/// Index of this variant in the output
pub dst_index: usize,
/// Bitrate of this stream
pub bitrate: u64,
/// AVCodecID
pub codec: usize,
/// Number of channels
pub channels: u16,
/// Sample rate
pub sample_rate: usize,
/// Sample format as ffmpeg sample format string
pub sample_fmt: String,
}
impl Display for AudioVariant {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Audio #{}->{}: {}, {}kbps",
self.src_index,
self.dst_index,
unsafe {
CStr::from_ptr(avcodec_get_name(transmute(self.codec as i32)))
.to_str()
.unwrap()
},
self.bitrate / 1000
)
}
}
pub trait VariantStreamType {
fn id(&self) -> Uuid;
fn src_index(&self) -> usize;
fn dst_index(&self) -> usize;
fn time_base(&self) -> AVRational;
unsafe fn get_codec(&self) -> *const AVCodec;
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext);
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters);
unsafe fn to_stream(&self, stream: *mut AVStream);
}
impl VariantStreamType for VideoVariant {
fn id(&self) -> Uuid {
self.id
}
fn src_index(&self) -> usize {
self.src_index
}
fn dst_index(&self) -> usize {
self.dst_index
}
fn time_base(&self) -> AVRational {
AVRational {
num: 1,
den: 90_000,
}
}
unsafe fn get_codec(&self) -> *const AVCodec {
avcodec_find_encoder(transmute(self.codec as u32))
}
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
let codec = self.get_codec();
(*ctx).codec_id = (*codec).id;
(*ctx).codec_type = (*codec).type_;
(*ctx).time_base = self.time_base();
(*ctx).bit_rate = self.bitrate as i64;
(*ctx).width = self.width as libc::c_int;
(*ctx).height = self.height as libc::c_int;
(*ctx).level = self.level as libc::c_int;
(*ctx).profile = self.profile as libc::c_int;
(*ctx).framerate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
let key_frames = self.fps * self.keyframe_interval;
(*ctx).gop_size = key_frames as libc::c_int;
(*ctx).keyint_min = key_frames as libc::c_int;
(*ctx).max_b_frames = 3;
(*ctx).pix_fmt = AV_PIX_FMT_YUV420P;
(*ctx).colorspace = AVCOL_SPC_BT709;
if (*codec).id == AV_CODEC_ID_H264 {
av_opt_set(
(*ctx).priv_data,
"preset\0".as_ptr() as *const libc::c_char,
"fast\0".as_ptr() as *const libc::c_char,
0,
);
av_opt_set(
(*ctx).priv_data,
"tune\0".as_ptr() as *const libc::c_char,
"zerolatency\0".as_ptr() as *const libc::c_char,
0,
);
}
}
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
let codec = self.get_codec();
(*params).codec_id = (*codec).id;
(*params).codec_type = (*codec).type_;
(*params).height = self.height as libc::c_int;
(*params).width = self.width as libc::c_int;
(*params).format = AV_PIX_FMT_YUV420P as i32;
(*params).framerate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
(*params).bit_rate = self.bitrate as i64;
(*params).color_space = AVCOL_SPC_BT709;
(*params).level = self.level as libc::c_int;
(*params).profile = self.profile as libc::c_int;
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
(*stream).time_base = self.time_base();
(*stream).avg_frame_rate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
(*stream).r_frame_rate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
self.to_codec_params((*stream).codecpar);
}
}
impl VariantStreamType for AudioVariant {
fn id(&self) -> Uuid {
self.id
}
fn src_index(&self) -> usize {
self.src_index
}
fn dst_index(&self) -> usize {
self.dst_index
}
fn time_base(&self) -> AVRational {
AVRational {
num: 1,
den: self.sample_rate as libc::c_int,
}
}
unsafe fn get_codec(&self) -> *const AVCodec {
if self.codec == AV_CODEC_ID_AAC as usize {
avcodec_find_encoder_by_name("libfdk_aac\0".as_ptr() as *const libc::c_char)
} else {
avcodec_find_encoder(transmute(self.codec as u32))
}
}
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
let codec = self.get_codec();
(*ctx).codec_id = (*codec).id;
(*ctx).codec_type = (*codec).type_;
(*ctx).time_base = self.time_base();
(*ctx).sample_fmt =
av_get_sample_fmt(format!("{}\0", self.sample_fmt).as_ptr() as *const libc::c_char);
(*ctx).bit_rate = self.bitrate as i64;
(*ctx).sample_rate = self.sample_rate as libc::c_int;
(*ctx).ch_layout = self.channel_layout();
}
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
let codec = self.get_codec();
(*params).codec_id = (*codec).id;
(*params).codec_type = (*codec).type_;
(*params).format =
av_get_sample_fmt(format!("{}\0", self.sample_fmt).as_ptr() as *const libc::c_char)
as libc::c_int;
(*params).bit_rate = self.bitrate as i64;
(*params).sample_rate = self.sample_rate as libc::c_int;
(*params).ch_layout = self.channel_layout();
(*params).frame_size = 1024; //TODO: fix this
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
(*stream).time_base = self.time_base();
self.to_codec_params((*stream).codecpar);
}
}
impl AudioVariant {
fn channel_layout(&self) -> AVChannelLayout {
AVChannelLayout {
order: AV_CHANNEL_ORDER_NATIVE,
nb_channels: 2,
u: AVChannelLayout__bindgen_ty_1 {
mask: AV_CH_LAYOUT_STEREO,
},
opaque: ptr::null_mut(),
}
}
}

136
src/variant/audio.rs Normal file
View File

@ -0,0 +1,136 @@
use std::ffi::CStr;
use std::fmt::{Display, Formatter};
use std::intrinsics::transmute;
use std::ptr;
use ffmpeg_sys_next::AVChannelOrder::AV_CHANNEL_ORDER_NATIVE;
use ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_AAC;
use ffmpeg_sys_next::{
av_get_sample_fmt, avcodec_find_encoder, avcodec_find_encoder_by_name, avcodec_get_name,
AVChannelLayout, AVChannelLayout__bindgen_ty_1, AVCodec, AVCodecContext, AVCodecParameters,
AVRational, AVStream, AV_CH_LAYOUT_STEREO,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::variant::{EncodedStream, StreamMapping, VariantMapping};
/// Information related to variant streams for a given egress
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct AudioVariant {
/// Id, Src, Dst
pub mapping: VariantMapping,
/// Bitrate of this stream
pub bitrate: u64,
/// AVCodecID
pub codec: usize,
/// Number of channels
pub channels: u16,
/// Sample rate
pub sample_rate: usize,
/// Sample format as ffmpeg sample format string
pub sample_fmt: String,
}
impl Display for AudioVariant {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Audio #{}->{}: {}, {}kbps",
self.mapping.src_index,
self.mapping.dst_index,
unsafe {
CStr::from_ptr(avcodec_get_name(transmute(self.codec as i32)))
.to_str()
.unwrap()
},
self.bitrate / 1000
)
}
}
impl StreamMapping for AudioVariant {
fn id(&self) -> Uuid {
self.mapping.id
}
fn src_index(&self) -> usize {
self.mapping.src_index
}
fn dst_index(&self) -> usize {
self.mapping.dst_index
}
fn set_dst_index(&mut self, dst: usize) {
self.mapping.dst_index = dst;
}
fn group_id(&self) -> usize {
self.mapping.group_id
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
(*stream).time_base = self.time_base();
self.to_codec_params((*stream).codecpar);
}
}
impl EncodedStream for AudioVariant {
fn time_base(&self) -> AVRational {
AVRational {
num: 1,
den: self.sample_rate as libc::c_int,
}
}
unsafe fn get_codec(&self) -> *const AVCodec {
if self.codec == AV_CODEC_ID_AAC as usize {
avcodec_find_encoder_by_name("libfdk_aac\0".as_ptr() as *const libc::c_char)
} else {
avcodec_find_encoder(transmute(self.codec as u32))
}
}
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
let codec = self.get_codec();
(*ctx).codec_id = (*codec).id;
(*ctx).codec_type = (*codec).type_;
(*ctx).time_base = self.time_base();
(*ctx).sample_fmt =
av_get_sample_fmt(format!("{}\0", self.sample_fmt).as_ptr() as *const libc::c_char);
(*ctx).bit_rate = self.bitrate as i64;
(*ctx).sample_rate = self.sample_rate as libc::c_int;
(*ctx).ch_layout = self.channel_layout();
(*ctx).frame_size = 1024;
}
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
let codec = self.get_codec();
(*params).codec_id = (*codec).id;
(*params).codec_type = (*codec).type_;
(*params).format =
av_get_sample_fmt(format!("{}\0", self.sample_fmt).as_ptr() as *const libc::c_char)
as libc::c_int;
(*params).bit_rate = self.bitrate as i64;
(*params).sample_rate = self.sample_rate as libc::c_int;
(*params).ch_layout = self.channel_layout();
(*params).frame_size = 1024; //TODO: fix this
}
}
impl AudioVariant {
fn channel_layout(&self) -> AVChannelLayout {
AVChannelLayout {
order: AV_CHANNEL_ORDER_NATIVE,
nb_channels: 2,
u: AVChannelLayout__bindgen_ty_1 {
mask: AV_CH_LAYOUT_STEREO,
},
opaque: ptr::null_mut(),
}
}
}

54
src/variant/mapping.rs Normal file
View File

@ -0,0 +1,54 @@
use std::fmt::{Display, Formatter};
use ffmpeg_sys_next::AVStream;
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::variant::StreamMapping;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VariantMapping {
/// Unique ID of this variant
pub id: Uuid,
/// Source video stream to use for this variant
pub src_index: usize,
/// Index of this variant stream in the output
pub dst_index: usize,
/// Stream group, groups one or more streams into a variant
pub group_id: usize,
}
impl Display for VariantMapping {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Copy #{}->{}", self.src_index, self.dst_index)
}
}
impl StreamMapping for VariantMapping {
fn id(&self) -> Uuid {
self.id
}
fn src_index(&self) -> usize {
self.src_index
}
fn dst_index(&self) -> usize {
self.dst_index
}
fn set_dst_index(&mut self, dst: usize) {
self.dst_index = dst;
}
fn group_id(&self) -> usize {
self.group_id
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
// do nothing
}
}

125
src/variant/mod.rs Normal file
View File

@ -0,0 +1,125 @@
use std::fmt::{Display, Formatter};
use anyhow::Error;
use ffmpeg_sys_next::{AVCodec, AVCodecContext, AVCodecParameters, AVRational, AVStream};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::variant::audio::AudioVariant;
use crate::variant::mapping::VariantMapping;
use crate::variant::video::VideoVariant;
pub mod audio;
pub mod mapping;
pub mod video;
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub enum VariantStream {
/// Video stream mapping
Video(VideoVariant),
/// Audio stream mapping
Audio(AudioVariant),
/// Copy stream src<>dst stream
CopyVideo(VariantMapping),
/// Copy stream src<>dst stream
CopyAudio(VariantMapping),
}
impl StreamMapping for VariantStream {
fn id(&self) -> Uuid {
match self {
VariantStream::Video(v) => v.id(),
VariantStream::Audio(v) => v.id(),
VariantStream::CopyAudio(v) => v.id(),
VariantStream::CopyVideo(v) => v.id(),
}
}
fn src_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.src_index(),
VariantStream::Audio(v) => v.src_index(),
VariantStream::CopyAudio(v) => v.src_index(),
VariantStream::CopyVideo(v) => v.src_index(),
}
}
fn dst_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.dst_index(),
VariantStream::Audio(v) => v.dst_index(),
VariantStream::CopyAudio(v) => v.dst_index(),
VariantStream::CopyVideo(v) => v.dst_index(),
}
}
fn set_dst_index(&mut self, dst: usize) {
match self {
VariantStream::Video(v) => v.set_dst_index(dst),
VariantStream::Audio(v) => v.set_dst_index(dst),
VariantStream::CopyAudio(v) => v.set_dst_index(dst),
VariantStream::CopyVideo(v) => v.set_dst_index(dst),
}
}
fn group_id(&self) -> usize {
match self {
VariantStream::Video(v) => v.group_id(),
VariantStream::Audio(v) => v.group_id(),
VariantStream::CopyAudio(v) => v.group_id(),
VariantStream::CopyVideo(v) => v.group_id(),
}
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
match self {
VariantStream::Video(v) => v.to_stream(stream),
VariantStream::Audio(v) => v.to_stream(stream),
VariantStream::CopyAudio(v) => v.to_stream(stream),
VariantStream::CopyVideo(v) => v.to_stream(stream),
}
}
}
impl Display for VariantStream {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
VariantStream::Video(v) => write!(f, "{}", v),
VariantStream::Audio(a) => write!(f, "{}", a),
VariantStream::CopyVideo(c) => write!(f, "{}", c),
VariantStream::CopyAudio(c) => write!(f, "{}", c),
}
}
}
pub trait StreamMapping {
fn id(&self) -> Uuid;
fn src_index(&self) -> usize;
fn dst_index(&self) -> usize;
fn set_dst_index(&mut self, dst: usize);
fn group_id(&self) -> usize;
unsafe fn to_stream(&self, stream: *mut AVStream);
}
pub trait EncodedStream {
fn time_base(&self) -> AVRational;
unsafe fn get_codec(&self) -> *const AVCodec;
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext);
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters);
}
/// Find a stream by ID in a vec of streams
pub fn find_stream<'a>(
config: &'a Vec<VariantStream>,
id: &Uuid,
) -> Result<&'a VariantStream, Error> {
config
.iter()
.find(|x| match x {
VariantStream::Video(v) => v.id() == *id,
VariantStream::Audio(a) => a.id() == *id,
VariantStream::CopyVideo(c) => c.id() == *id,
VariantStream::CopyAudio(c) => c.id() == *id,
})
.ok_or(Error::msg("Variant does not exist"))
}

169
src/variant/video.rs Normal file
View File

@ -0,0 +1,169 @@
use std::ffi::CStr;
use std::fmt::{Display, Formatter};
use std::intrinsics::transmute;
use ffmpeg_sys_next::AVCodecID::AV_CODEC_ID_H264;
use ffmpeg_sys_next::AVColorSpace::AVCOL_SPC_BT709;
use ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P;
use ffmpeg_sys_next::{
av_opt_set, avcodec_find_encoder, avcodec_get_name, AVCodec, AVCodecContext, AVCodecParameters,
AVRational, AVStream,
};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::variant::{EncodedStream, StreamMapping, VariantMapping};
/// Information related to variant streams for a given egress
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
pub struct VideoVariant {
/// Id, Src, Dst
pub mapping: VariantMapping,
/// Width of this video stream
pub width: u16,
/// Height of this video stream
pub height: u16,
/// FPS for this stream
pub fps: u16,
/// Bitrate of this stream
pub bitrate: u64,
/// AVCodecID
pub codec: usize,
/// Codec profile
pub profile: usize,
/// Codec level
pub level: usize,
/// Keyframe interval in frames
pub keyframe_interval: u16,
/// Pixel Format
pub pixel_format: u32,
}
impl Display for VideoVariant {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Video #{}->{}: {}, {}x{}, {}fps, {}kbps",
self.mapping.src_index,
self.mapping.dst_index,
unsafe {
CStr::from_ptr(avcodec_get_name(transmute(self.codec as i32)))
.to_str()
.unwrap()
},
self.width,
self.height,
self.fps,
self.bitrate / 1000
)
}
}
impl StreamMapping for VideoVariant {
fn id(&self) -> Uuid {
self.mapping.id
}
fn src_index(&self) -> usize {
self.mapping.src_index
}
fn dst_index(&self) -> usize {
self.mapping.dst_index
}
fn set_dst_index(&mut self, dst: usize) {
self.mapping.dst_index = dst;
}
fn group_id(&self) -> usize {
self.mapping.group_id
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
(*stream).time_base = self.time_base();
(*stream).avg_frame_rate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
(*stream).r_frame_rate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
self.to_codec_params((*stream).codecpar);
}
}
impl EncodedStream for VideoVariant {
fn time_base(&self) -> AVRational {
AVRational {
num: 1,
den: 90_000,
}
}
unsafe fn get_codec(&self) -> *const AVCodec {
avcodec_find_encoder(transmute(self.codec as u32))
}
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
let codec = self.get_codec();
(*ctx).codec_id = (*codec).id;
(*ctx).codec_type = (*codec).type_;
(*ctx).time_base = self.time_base();
(*ctx).bit_rate = self.bitrate as i64;
(*ctx).width = self.width as libc::c_int;
(*ctx).height = self.height as libc::c_int;
(*ctx).level = self.level as libc::c_int;
(*ctx).profile = self.profile as libc::c_int;
(*ctx).framerate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
(*ctx).gop_size = self.keyframe_interval as libc::c_int;
(*ctx).keyint_min = self.keyframe_interval as libc::c_int;
(*ctx).max_b_frames = 3;
(*ctx).pix_fmt = AV_PIX_FMT_YUV420P;
(*ctx).colorspace = AVCOL_SPC_BT709;
if (*codec).id == AV_CODEC_ID_H264 {
av_opt_set(
(*ctx).priv_data,
"preset\0".as_ptr() as *const libc::c_char,
"fast\0".as_ptr() as *const libc::c_char,
0,
);
av_opt_set(
(*ctx).priv_data,
"tune\0".as_ptr() as *const libc::c_char,
"zerolatency\0".as_ptr() as *const libc::c_char,
0,
);
}
}
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
let codec = self.get_codec();
(*params).codec_id = (*codec).id;
(*params).codec_type = (*codec).type_;
(*params).height = self.height as libc::c_int;
(*params).width = self.width as libc::c_int;
(*params).format = AV_PIX_FMT_YUV420P as i32;
(*params).framerate = AVRational {
num: self.fps as libc::c_int,
den: 1,
};
(*params).bit_rate = self.bitrate as i64;
(*params).color_space = AVCOL_SPC_BT709;
(*params).level = self.level as libc::c_int;
(*params).profile = self.profile as libc::c_int;
}
}

View File

@ -1,12 +1,14 @@
use ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P;
use uuid::Uuid;
use crate::demux::info::{DemuxStreamInfo, StreamChannelType};
use crate::demux::info::{DemuxerInfo, StreamChannelType};
use crate::egress::EgressConfig;
use crate::ingress::ConnectionInfo;
use crate::pipeline::{EgressType, PipelineConfig};
use crate::settings::Settings;
use crate::variant::{AudioVariant, VariantStream, VideoVariant};
use crate::variant::audio::AudioVariant;
use crate::variant::mapping::VariantMapping;
use crate::variant::video::VideoVariant;
use crate::variant::{StreamMapping, VariantStream};
#[derive(Clone)]
pub struct Webhook {
@ -18,21 +20,26 @@ impl Webhook {
Self { config }
}
pub async fn start(&self, _connection_info: ConnectionInfo) -> Result<(), anyhow::Error> {
Ok(())
}
pub fn configure(&self, stream_info: &DemuxStreamInfo) -> PipelineConfig {
pub fn start(&self, stream_info: &DemuxerInfo) -> PipelineConfig {
let mut vars: Vec<VariantStream> = vec![];
if let Some(video_src) = stream_info
.channels
.iter()
.find(|c| c.channel_type == StreamChannelType::Video)
{
vars.push(VariantStream::Video(VideoVariant {
vars.push(VariantStream::CopyVideo(VariantMapping {
id: Uuid::new_v4(),
src_index: video_src.index,
dst_index: 0,
group_id: 0,
}));
vars.push(VariantStream::Video(VideoVariant {
mapping: VariantMapping {
id: Uuid::new_v4(),
src_index: video_src.index,
dst_index: 1,
group_id: 1
},
width: 1280,
height: 720,
fps: video_src.fps as u16,
@ -40,8 +47,8 @@ impl Webhook {
codec: 27,
profile: 100,
level: 51,
keyframe_interval: 2,
pixel_format: AV_PIX_FMT_YUV420P as u32
keyframe_interval: video_src.fps as u16 * 2,
pixel_format: AV_PIX_FMT_YUV420P as u32,
}));
}
@ -50,11 +57,20 @@ impl Webhook {
.iter()
.find(|c| c.channel_type == StreamChannelType::Audio)
{
vars.push(VariantStream::Audio(AudioVariant {
vars.push(VariantStream::CopyAudio(VariantMapping {
id: Uuid::new_v4(),
src_index: audio_src.index,
dst_index: 0,
bitrate: 320_000,
dst_index: 2,
group_id: 0
}));
vars.push(VariantStream::Audio(AudioVariant {
mapping: VariantMapping {
id: Uuid::new_v4(),
src_index: audio_src.index,
dst_index: 3,
group_id: 1
},
bitrate: 192_000,
codec: 86018,
channels: 2,
sample_rate: 48_000,
@ -62,9 +78,10 @@ impl Webhook {
}));
}
let var_ids = vars.iter().map(|v| v.id()).collect();
PipelineConfig {
id: Uuid::new_v4(),
recording: vec![],
variants: vars,
egress: vec![
/*EgressType::Recorder(EgressConfig {
name: "REC".to_owned(),
@ -74,7 +91,7 @@ impl Webhook {
EgressType::HLS(EgressConfig {
name: "HLS".to_owned(),
out_dir: self.config.output_dir.clone(),
variants: vars.clone(),
variants: var_ids,
}),
],
}

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 78 KiB

After

Width:  |  Height:  |  Size: 118 KiB