something something something
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
kieran 2024-04-02 17:14:52 +01:00
parent 9c4969cf95
commit 8ed71bd48b
Signed by: Kieran
GPG Key ID: DE71CEB3925BE941
14 changed files with 700 additions and 352 deletions

View File

@ -3,7 +3,7 @@ use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{
av_frame_alloc, av_packet_unref, AVCodec, avcodec_alloc_context3,
av_frame_alloc, AVCodec, avcodec_alloc_context3,
avcodec_find_decoder, avcodec_free_context, avcodec_open2, avcodec_parameters_to_context,
avcodec_receive_frame, avcodec_send_packet, AVCodecContext, AVERROR, AVERROR_EOF, AVPacket, AVStream,
};
@ -85,7 +85,6 @@ impl Decoder {
}
if let Some(ctx) = self.codecs.get_mut(&stream_index) {
let mut ret = avcodec_send_packet(ctx.context, pkt);
av_packet_unref(pkt);
if ret < 0 {
return Err(Error::msg(format!("Failed to decode packet {}", ret)));
}

View File

@ -42,16 +42,15 @@ unsafe extern "C" fn read_data(
let chan = opaque as *mut UnboundedReceiver<Bytes>;
if let Some(data) = (*chan).blocking_recv() {
let buff_len = data.len();
let len = size.min(buff_len as libc::c_int);
if len > 0 {
assert!(size as usize >= buff_len);
if buff_len > 0 {
memcpy(
buffer as *mut libc::c_void,
data.as_ptr() as *const libc::c_void,
len as libc::c_ulonglong,
buff_len as libc::c_ulonglong,
);
}
len
buff_len as libc::c_int
} else {
AVERROR_EOF
}
@ -155,6 +154,9 @@ impl Demuxer {
if (*pkt).time_base.num == 0 {
(*pkt).time_base = (*stream).time_base;
}
if (*stream).start_time > 0 && (*pkt).pts != AV_NOPTS_VALUE {
(*pkt).pts -= (*stream).start_time;
}
(*pkt).opaque = stream as *mut libc::c_void;
let pkg = PipelinePayload::AvPacket("Demuxer packet".to_owned(), pkt);
@ -166,7 +168,7 @@ impl Demuxer {
unsafe {
let score = (*self.ctx).probe_score;
if score < 30 {
if (Instant::now() - self.started) > Duration::from_secs(1) {
if (Instant::now() - self.started) > Duration::from_millis(500) {
return Ok(Some(self.probe_input()?));
}
return Ok(None);

View File

@ -4,46 +4,34 @@ use std::mem::transmute;
use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{AV_CH_LAYOUT_STEREO, av_dump_format, av_get_sample_fmt, av_interleaved_write_frame, av_opt_set, AVChannelLayout, AVChannelLayout__bindgen_ty_1, avcodec_find_encoder, avcodec_parameters_from_context, AVCodecContext, avformat_alloc_output_context2, avformat_free_context, avformat_new_stream, avformat_write_header, AVFormatContext, AVPacket, AVRational};
use ffmpeg_sys_next::AVChannelOrder::AV_CHANNEL_ORDER_NATIVE;
use ffmpeg_sys_next::AVColorSpace::AVCOL_SPC_BT709;
use ffmpeg_sys_next::AVMediaType::{AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_VIDEO};
use ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P;
use ffmpeg_sys_next::{
av_dump_format, av_get_sample_fmt, av_interleaved_write_frame, av_opt_set,
avcodec_find_encoder, avcodec_parameters_from_context, avformat_alloc_output_context2,
avformat_free_context, avformat_new_stream, avformat_write_header, AVChannelLayout,
AVChannelLayout__bindgen_ty_1, AVCodecContext, AVFormatContext, AVPacket, AVRational,
AV_CH_LAYOUT_STEREO,
};
use itertools::Itertools;
use log::info;
use serde::{Deserialize, Serialize};
use tokio::sync::mpsc::UnboundedReceiver;
use uuid::Uuid;
use crate::pipeline::PipelinePayload;
use crate::egress::{map_variants_to_streams, EgressConfig, update_pkt_for_muxer, get_pkt_variant};
use crate::encode::dump_pkt_info;
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::utils::{get_ffmpeg_error_msg, id_ref_to_uuid};
use crate::variant::VariantStream;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct HLSEgressConfig {
pub out_dir: String,
pub variants: Vec<VariantStream>,
}
impl Display for HLSEgressConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "HLS: out_dir={}", self.out_dir)?;
if !self.variants.is_empty() {
write!(f, "\n\tStreams: ")?;
for v in &self.variants {
write!(f, "\n\t\t{}", v)?;
}
}
Ok(())
}
}
use crate::variant::{VariantStream, VariantStreamType};
pub struct HlsEgress {
id: Uuid,
config: HLSEgressConfig,
config: EgressConfig,
ctx: *mut AVFormatContext,
chan_in: UnboundedReceiver<PipelinePayload>,
stream_init: HashSet<i32>,
}
unsafe impl Send for HlsEgress {}
@ -63,14 +51,13 @@ impl HlsEgress {
pub fn new(
chan_in: UnboundedReceiver<PipelinePayload>,
id: Uuid,
config: HLSEgressConfig,
config: EgressConfig,
) -> Self {
Self {
id,
config,
ctx: ptr::null_mut(),
chan_in,
stream_init: HashSet::new(),
}
}
@ -83,7 +70,7 @@ impl HlsEgress {
&mut ctx,
ptr::null(),
"hls\0".as_ptr() as *const libc::c_char,
format!("{}/stream_%v/live.m3u8\0", base).as_ptr() as *const libc::c_char,
format!("{}/%v/live.m3u8\0", base).as_ptr() as *const libc::c_char,
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
@ -92,7 +79,7 @@ impl HlsEgress {
av_opt_set(
(*ctx).priv_data,
"hls_segment_filename\0".as_ptr() as *const libc::c_char,
format!("{}/stream_%v/seg_%05d.ts\0", base).as_ptr() as *const libc::c_char,
format!("{}/%v/%05d.ts\0", base).as_ptr() as *const libc::c_char,
0,
);
@ -110,6 +97,22 @@ impl HlsEgress {
0,
);
if let Some(first_video_track) = self.config.variants.iter().find_map(|v| {
if let VariantStream::Video(vv) = v {
Some(vv)
} else {
None
}
}) {
av_opt_set(
(*ctx).priv_data,
"hls_time\0".as_ptr() as *const libc::c_char,
format!("{}\0", first_video_track.keyframe_interval).as_ptr()
as *const libc::c_char,
0,
);
}
av_opt_set(
(*ctx).priv_data,
"hls_flags\0".as_ptr() as *const libc::c_char,
@ -130,10 +133,7 @@ impl HlsEgress {
stream_map.insert(var.dst_index(), vec![cfg]);
}
}
let stream_map = stream_map
.values()
.map(|v| v.join(","))
.join(" ");
let stream_map = stream_map.values().map(|v| v.join(",")).join(" ");
info!("map_str={}", stream_map);
@ -144,34 +144,7 @@ impl HlsEgress {
0,
);
for var in &mut self.config.variants {
match var {
VariantStream::Video(vs) => {
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// overwrite dst_index to match output stream
vs.dst_index = (*stream).index as usize;
vs.to_stream(stream);
vs.to_codec_params((*stream).codecpar);
}
VariantStream::Audio(va) => {
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// overwrite dst_index to match output stream
va.dst_index = (*stream).index as usize;
va.to_stream(stream);
va.to_codec_params((*stream).codecpar);
}
}
}
av_dump_format(ctx, 0, ptr::null(), 1);
map_variants_to_streams(ctx, &mut self.config.variants)?;
let ret = avformat_write_header(ctx, ptr::null_mut());
if ret < 0 {
@ -183,24 +156,10 @@ impl HlsEgress {
}
unsafe fn process_pkt(&mut self, pkt: *mut AVPacket) -> Result<(), Error> {
let variant_id = id_ref_to_uuid((*pkt).opaque_ref)?;
let variant = self.config.variants.iter().find(|v| v.id() == variant_id);
if variant.is_none() {
return Err(Error::msg(format!(
"No stream found with id={:?}",
variant_id
)));
}
let stream = *(*self.ctx).streams.add(variant.unwrap().dst_index());
let idx = (*stream).index;
(*pkt).stream_index = idx;
if !self.stream_init.contains(&idx) {
let encoder = (*pkt).opaque as *mut AVCodecContext;
avcodec_parameters_from_context((*stream).codecpar, encoder);
self.stream_init.insert(idx);
}
let variant = get_pkt_variant(&self.config.variants, pkt)?;
update_pkt_for_muxer(self.ctx, pkt, &variant);
//dump_pkt_info(pkt);
let ret = av_interleaved_write_frame(self.ctx, pkt);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
@ -208,8 +167,10 @@ impl HlsEgress {
Ok(())
}
}
pub fn process(&mut self) -> Result<(), Error> {
impl PipelineProcessor for HlsEgress {
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv() {
match pkg {
PipelinePayload::AvPacket(_, pkt) => unsafe {

View File

@ -1,2 +1,94 @@
use std::fmt::{Display, Formatter};
use std::ptr;
use anyhow::Error;
use ffmpeg_sys_next::{av_dump_format, avformat_new_stream, AVFormatContext, AVPacket};
use log::info;
use serde::{Deserialize, Serialize};
use crate::utils::id_ref_to_uuid;
use crate::variant::{VariantStream, VariantStreamType};
pub mod hls;
pub mod http;
pub mod http;
pub mod mpegts;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct EgressConfig {
pub name: String,
pub out_dir: String,
pub variants: Vec<VariantStream>,
}
impl Display for EgressConfig {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}: out_dir={}", self.name, self.out_dir)?;
if !self.variants.is_empty() {
write!(f, "\n\tStreams: ")?;
for v in &self.variants {
write!(f, "\n\t\t{}", v)?;
}
}
Ok(())
}
}
pub unsafe fn map_variants_to_streams(
ctx: *mut AVFormatContext,
variants: &mut Vec<VariantStream>,
) -> Result<(), Error> {
for var in variants {
match var {
VariantStream::Video(vs) => {
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// overwrite dst_index to match output stream
vs.dst_index = (*stream).index as usize;
vs.to_stream(stream);
}
VariantStream::Audio(va) => {
let stream = avformat_new_stream(ctx, ptr::null());
if stream.is_null() {
return Err(Error::msg("Failed to add stream to output"));
}
// overwrite dst_index to match output stream
va.dst_index = (*stream).index as usize;
va.to_stream(stream);
}
}
}
av_dump_format(ctx, 0, ptr::null(), 1);
Ok(())
}
pub unsafe fn get_pkt_variant(
vars: &Vec<VariantStream>,
pkt: *mut AVPacket,
) -> Result<&VariantStream, Error> {
let variant_id = id_ref_to_uuid((*pkt).opaque_ref)?;
let variant = vars.iter().find(|v| v.id() == variant_id);
if variant.is_none() {
return Err(Error::msg(format!(
"No stream found with id={:?}",
variant_id
)));
}
Ok(variant.unwrap())
}
pub unsafe fn update_pkt_for_muxer(
ctx: *mut AVFormatContext,
pkt: *mut AVPacket,
var: &VariantStream,
) {
let stream = *(*ctx).streams.add(var.dst_index());
let idx = (*stream).index;
if idx != (*pkt).stream_index {
(*pkt).stream_index = idx;
}
}

118
src/egress/mpegts.rs Normal file
View File

@ -0,0 +1,118 @@
use std::{fs, ptr};
use std::collections::HashSet;
use std::fmt::Display;
use anyhow::Error;
use ffmpeg_sys_next::{av_guess_format, av_interleaved_write_frame, av_strdup, avcodec_parameters_from_context, AVCodecContext, avformat_alloc_context, avformat_free_context, avformat_write_header, AVFormatContext, AVIO_FLAG_READ_WRITE, avio_open2, AVPacket};
use itertools::Itertools;
use tokio::sync::mpsc::UnboundedReceiver;
use uuid::Uuid;
use crate::egress::{EgressConfig, get_pkt_variant, map_variants_to_streams, update_pkt_for_muxer};
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::utils::get_ffmpeg_error_msg;
use crate::variant::VariantStreamType;
pub struct MPEGTSEgress {
id: Uuid,
config: EgressConfig,
ctx: *mut AVFormatContext,
chan_in: UnboundedReceiver<PipelinePayload>,
stream_init: HashSet<i32>,
}
unsafe impl Send for MPEGTSEgress {}
unsafe impl Sync for MPEGTSEgress {}
impl Drop for MPEGTSEgress {
fn drop(&mut self) {
unsafe {
avformat_free_context(self.ctx);
self.ctx = ptr::null_mut();
}
}
}
impl MPEGTSEgress {
pub fn new(
chan_in: UnboundedReceiver<PipelinePayload>,
id: Uuid,
config: EgressConfig,
) -> Self {
Self {
id,
config,
ctx: ptr::null_mut(),
chan_in,
stream_init: HashSet::new(),
}
}
unsafe fn setup_muxer(&mut self) -> Result<(), Error> {
let mut ctx = avformat_alloc_context();
if ctx.is_null() {
return Err(Error::msg("Failed to create muxer context"));
}
let base = format!("{}/{}", self.config.out_dir, self.id);
fs::create_dir_all(base.clone())?;
let ret = avio_open2(
&mut (*ctx).pb,
format!("{}/live.ts\0", base).as_ptr() as *const libc::c_char,
AVIO_FLAG_READ_WRITE,
ptr::null(),
ptr::null_mut(),
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
(*ctx).oformat = av_guess_format(
"mpegts\0".as_ptr() as *const libc::c_char,
ptr::null(),
ptr::null(),
);
if (*ctx).oformat.is_null() {
return Err(Error::msg("Output format not found"));
}
(*ctx).url = av_strdup(format!("{}/live.ts\0", base).as_ptr() as *const libc::c_char);
map_variants_to_streams(ctx, &mut self.config.variants)?;
let ret = avformat_write_header(ctx, ptr::null_mut());
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
self.ctx = ctx;
Ok(())
}
unsafe fn process_pkt(&mut self, pkt: *mut AVPacket) -> Result<(), Error> {
let variant = get_pkt_variant(&self.config.variants, pkt)?;
update_pkt_for_muxer(self.ctx, pkt, &variant);
let ret = av_interleaved_write_frame(self.ctx, pkt);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
Ok(())
}
}
impl PipelineProcessor for MPEGTSEgress {
fn process(&mut self) -> Result<(), Error> {
while let Ok(pkg) = self.chan_in.try_recv() {
match pkg {
PipelinePayload::AvPacket(_, pkt) => unsafe {
if self.ctx.is_null() {
self.setup_muxer()?;
}
self.process_pkt(pkt)?;
},
_ => return Err(Error::msg("Payload not supported")),
}
}
Ok(())
}
}

View File

@ -1,3 +1,4 @@
use std::ffi::CStr;
use std::mem::transmute;
use std::ptr;
@ -5,30 +6,34 @@ use anyhow::Error;
use ffmpeg_sys_next::{
av_audio_fifo_alloc, av_audio_fifo_free, av_audio_fifo_read, av_audio_fifo_realloc,
av_audio_fifo_size, av_audio_fifo_write, av_buffer_ref, av_buffer_unref,
av_channel_layout_copy, av_frame_alloc, av_frame_free, av_frame_get_buffer, av_freep,
av_packet_alloc, av_packet_free, av_samples_alloc_array_and_samples, AVAudioFifo,
av_channel_layout_copy, av_frame_alloc, av_frame_clone, av_frame_free, av_frame_get_buffer,
av_frame_unref, av_freep, av_get_sample_fmt_name, av_packet_alloc, av_packet_free,
av_rescale_rnd, av_samples_alloc, av_samples_alloc_array_and_samples, AVAudioFifo,
AVBufferRef, AVCodec, avcodec_alloc_context3, avcodec_free_context,
avcodec_open2, avcodec_receive_packet, avcodec_send_frame, AVCodecContext, AVERROR, AVFrame, swr_alloc_set_opts2,
swr_convert, swr_free, swr_init, SwrContext,
avcodec_open2, avcodec_receive_packet, avcodec_send_frame, AVCodecContext, AVERROR, AVFrame,
swr_alloc_set_opts2, swr_convert, swr_convert_frame, swr_free, swr_get_delay, swr_init, SwrContext,
};
use ffmpeg_sys_next::AVRounding::AV_ROUND_UP;
use libc::EAGAIN;
use log::info;
use tokio::sync::mpsc::UnboundedSender;
use crate::encode::set_encoded_pkt_timing;
use crate::encode::{dump_pkt_info, set_encoded_pkt_timing};
use crate::ipc::Rx;
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::utils::{audio_variant_id_ref, get_ffmpeg_error_msg, id_ref_to_uuid};
use crate::variant::AudioVariant;
use crate::variant::{AudioVariant, VariantStreamType};
pub struct AudioEncoder<T> {
variant: AudioVariant,
ctx: *mut AVCodecContext,
codec: *const AVCodec,
fifo: *mut AVAudioFifo,
swr_ctx: *mut SwrContext,
fifo: *mut AVAudioFifo,
chan_in: T,
chan_out: UnboundedSender<PipelinePayload>,
var_id_ref: *mut AVBufferRef,
pts: i64,
}
unsafe impl<T> Send for AudioEncoder<T> {}
@ -59,12 +64,13 @@ where
Self {
ctx: ptr::null_mut(),
codec: ptr::null(),
fifo: ptr::null_mut(),
swr_ctx: ptr::null_mut(),
fifo: ptr::null_mut(),
variant,
chan_in,
chan_out,
var_id_ref: id_ref,
pts: 0,
}
}
@ -82,31 +88,52 @@ where
self.variant.to_codec_context(ctx);
// setup audio FIFO
let fifo = av_audio_fifo_alloc((*ctx).sample_fmt, 2, 1);
if fifo.is_null() {
return Err(Error::msg("Failed to allocate audio FiFO buffer"));
}
// setup re-sampler if output format does not match input format
if (*ctx).sample_fmt != transmute((*frame).format)
|| (*ctx).sample_rate != (*frame).sample_rate
|| (*ctx).ch_layout.nb_channels != (*frame).ch_layout.nb_channels
{
info!(
"Setup audio resampler: {}@{}->{}@{}",
CStr::from_ptr(av_get_sample_fmt_name(transmute((*frame).format)))
.to_str()
.unwrap(),
(*frame).sample_rate,
CStr::from_ptr(av_get_sample_fmt_name((*ctx).sample_fmt))
.to_str()
.unwrap(),
(*ctx).sample_rate
);
let mut swr_ctx = ptr::null_mut();
let ret = swr_alloc_set_opts2(
&mut swr_ctx,
&(*ctx).ch_layout,
(*ctx).sample_fmt,
(*ctx).sample_rate,
&(*frame).ch_layout,
transmute((*frame).format),
(*frame).sample_rate,
0,
ptr::null_mut(),
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
let mut swr_ctx = ptr::null_mut();
let ret = swr_alloc_set_opts2(
&mut swr_ctx,
&(*ctx).ch_layout,
(*ctx).sample_fmt,
(*ctx).sample_rate,
&(*frame).ch_layout,
transmute((*frame).format),
(*frame).sample_rate,
0,
ptr::null_mut(),
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
let ret = swr_init(swr_ctx);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
let ret = swr_init(swr_ctx);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
self.swr_ctx = swr_ctx;
let fifo = av_audio_fifo_alloc((*ctx).sample_fmt, (*ctx).ch_layout.nb_channels, 1);
if fifo.is_null() {
return Err(Error::msg("Failed to allocate audio FIFO"));
}
self.fifo = fifo;
}
let ret = avcodec_open2(ctx, encoder, ptr::null_mut());
@ -116,87 +143,97 @@ where
self.ctx = ctx;
self.codec = encoder;
self.swr_ctx = swr_ctx;
self.fifo = fifo;
}
Ok(())
}
/// Returns true if we should process audio frame from FIFO
/// false if nothing to process this frame
unsafe fn process_audio_frame(&mut self, frame: *mut AVFrame) -> Result<bool, Error> {
unsafe fn process_audio_frame(
&mut self,
frame: *mut AVFrame,
) -> Result<Option<*mut AVFrame>, Error> {
if self.swr_ctx.is_null() {
// no re-sampler, return input frame
return Ok(Some(frame));
}
let in_samples = (*frame).nb_samples;
let mut dst_samples: *mut *mut u8 = ptr::null_mut();
let out_samples = av_rescale_rnd(
swr_get_delay(self.swr_ctx, (*frame).sample_rate as i64) + in_samples as i64,
(*self.ctx).sample_rate as i64,
(*frame).sample_rate as i64,
AV_ROUND_UP,
) as libc::c_int;
let mut out_frame = self.new_frame();
(*out_frame).nb_samples = out_samples;
let ret = swr_convert_frame(self.swr_ctx, out_frame, frame);
if ret < 0 {
av_frame_free(&mut out_frame);
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
let ret = av_audio_fifo_write(
self.fifo,
(*out_frame).extended_data as *const *mut libc::c_void,
(*out_frame).nb_samples,
);
if ret < 0 {
av_frame_free(&mut out_frame);
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
av_frame_free(&mut out_frame);
let buff = av_audio_fifo_size(self.fifo);
return if buff < (*self.ctx).frame_size {
Ok(None)
} else {
let out_frame = self.read_fifo_frame()?;
(*out_frame).opaque = (*frame).opaque;
Ok(Some(out_frame))
};
}
unsafe fn read_fifo_frame(&mut self) -> Result<*mut AVFrame, Error> {
let mut out_frame = self.new_frame();
let ret = av_samples_alloc_array_and_samples(
&mut dst_samples,
&mut (*out_frame).extended_data,
ptr::null_mut(),
2,
in_samples,
(*self.ctx).sample_fmt,
(*out_frame).ch_layout.nb_channels,
(*out_frame).nb_samples,
transmute((*out_frame).format),
0,
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
// resample audio
let ret = swr_convert(
self.swr_ctx,
dst_samples,
in_samples,
(*frame).extended_data as *const *const u8,
in_samples,
);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
// push resampled audio into fifo
let ret = av_audio_fifo_realloc(self.fifo, av_audio_fifo_size(self.fifo) + in_samples);
if ret < 0 {
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
if av_audio_fifo_write(
self.fifo,
dst_samples as *const *mut libc::c_void,
in_samples,
) < in_samples
{
return Err(Error::msg("Failed to write samples to FIFO"));
}
if !dst_samples.is_null() {
av_freep(dst_samples.add(0) as *mut libc::c_void);
}
let buffered = av_audio_fifo_size(self.fifo);
Ok(buffered >= (*self.ctx).frame_size)
}
unsafe fn get_fifo_frame(&mut self) -> Result<*mut AVFrame, Error> {
let mut frame = av_frame_alloc();
let frame_size = (*self.ctx).frame_size.min(av_audio_fifo_size(self.fifo));
(*frame).nb_samples = frame_size;
av_channel_layout_copy(&mut (*frame).ch_layout, &(*self.ctx).ch_layout);
(*frame).format = (*self.ctx).sample_fmt as libc::c_int;
(*frame).sample_rate = (*self.ctx).sample_rate;
let ret = av_frame_get_buffer(frame, 0);
if ret < 0 {
av_frame_free(&mut out_frame);
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
let ret = av_audio_fifo_read(
self.fifo,
ptr::addr_of_mut!((*frame).data) as *const *mut libc::c_void,
frame_size,
(*out_frame).extended_data as *const *mut libc::c_void,
(*out_frame).nb_samples,
);
if ret < frame_size {
av_frame_free(&mut frame);
return Err(Error::msg("Failed to read frame from FIFO"));
if ret < 0 {
av_frame_free(&mut out_frame);
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
Ok(frame)
Ok(out_frame)
}
unsafe fn new_frame(&self) -> *mut AVFrame {
let mut out_frame = av_frame_alloc();
(*out_frame).nb_samples = (*self.ctx).frame_size;
av_channel_layout_copy(&mut (*out_frame).ch_layout, &(*self.ctx).ch_layout);
(*out_frame).format = (*self.ctx).sample_fmt as libc::c_int;
(*out_frame).sample_rate = (*self.ctx).sample_rate;
(*out_frame).time_base = (*self.ctx).time_base;
out_frame
}
unsafe fn process_frame(&mut self, frame: *mut AVFrame) -> Result<(), Error> {
@ -204,19 +241,15 @@ where
assert_eq!(var_id, self.variant.id);
self.setup_encoder(frame)?;
if !self.process_audio_frame(frame)? {
let mut frame = self.process_audio_frame(frame)?;
if frame.is_none() {
return Ok(());
}
// read audio from FIFO
let fifo_frame = self.get_fifo_frame()?;
// copy pointer to input stream
(*fifo_frame).opaque = (*frame).opaque;
let frame = fifo_frame;
let mut frame = frame.unwrap();
let mut ret = avcodec_send_frame(self.ctx, frame);
if ret < 0 && ret != AVERROR(EAGAIN) {
av_frame_free(&mut frame);
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
@ -224,6 +257,7 @@ where
let mut pkt = av_packet_alloc();
ret = avcodec_receive_packet(self.ctx, pkt);
if ret < 0 {
av_frame_free(&mut frame);
av_packet_free(&mut pkt);
if ret == AVERROR(EAGAIN) {
return Ok(());
@ -231,13 +265,14 @@ where
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
set_encoded_pkt_timing(self.ctx, pkt, frame);
set_encoded_pkt_timing(self.ctx, pkt, &mut self.pts, &self.variant);
(*pkt).opaque = self.ctx as *mut libc::c_void;
(*pkt).opaque_ref = av_buffer_ref(self.var_id_ref);
self.chan_out
.send(PipelinePayload::AvPacket("Encoder packet".to_owned(), pkt))?;
.send(PipelinePayload::AvPacket("Audio Encoder packet".to_owned(), pkt))?;
}
av_frame_free(&mut frame);
Ok(())
}
}

View File

@ -1,22 +1,62 @@
use ffmpeg_sys_next::{av_packet_rescale_ts, AVCodecContext, AVFrame, AVPacket, AVStream};
use std::ptr;
use ffmpeg_sys_next::{
AV_LOG_INFO, AV_NOPTS_VALUE, av_packet_rescale_ts, av_pkt_dump_log2, AV_PKT_FLAG_KEY, av_q2d, AVCodecContext,
AVPacket, AVRational, AVStream,
};
use ffmpeg_sys_next::AVMediaType::AVMEDIA_TYPE_VIDEO;
use log::info;
use crate::variant::VariantStreamType;
pub mod audio;
pub mod video;
/// Set packet details based on decoded frame
pub unsafe fn set_encoded_pkt_timing(
pub unsafe fn set_encoded_pkt_timing<TVar>(
ctx: *mut AVCodecContext,
pkt: *mut AVPacket,
in_frame: *mut AVFrame,
) {
assert!(!(*in_frame).opaque.is_null());
let in_stream = (*in_frame).opaque as *mut AVStream;
pts: &mut i64,
var: &TVar,
) where
TVar: VariantStreamType,
{
let tb = (*ctx).time_base;
(*pkt).stream_index = (*in_stream).index;
if (*ctx).codec_type == AVMEDIA_TYPE_VIDEO {
(*pkt).duration = tb.den as i64 / tb.num as i64 / (*in_stream).avg_frame_rate.num as i64
* (*in_stream).avg_frame_rate.den as i64;
(*pkt).stream_index = var.dst_index() as libc::c_int;
(*pkt).time_base = var.time_base();
if (*ctx).codec_type == AVMEDIA_TYPE_VIDEO && (*pkt).duration == 0 {
let tb_sec = tb.den as i64 / tb.num as i64;
let fps = (*ctx).framerate.num as i64 * (*ctx).framerate.den as i64;
(*pkt).duration = tb_sec / fps;
}
if (*pkt).pts == AV_NOPTS_VALUE {
(*pkt).pts = *pts;
*pts += (*pkt).duration;
} else {
*pts = (*pkt).pts;
}
if (*pkt).dts == AV_NOPTS_VALUE {
(*pkt).dts = (*pkt).pts;
}
av_packet_rescale_ts(pkt, (*in_stream).time_base, (*ctx).time_base);
}
pub unsafe fn dump_pkt_info(pkt: *const AVPacket) {
let tb = (*pkt).time_base;
info!(
"stream #{}: keyframe={}, duration={:.3}, dts={}, pts={}, size={}",
(*pkt).stream_index,
((*pkt).flags & AV_PKT_FLAG_KEY) != 0,
(*pkt).duration as f64 * av_q2d(tb),
if (*pkt).dts == AV_NOPTS_VALUE {
"N/A".to_owned()
} else {
format!("{}", (*pkt).dts)
},
if (*pkt).pts == AV_NOPTS_VALUE {
"N/A".to_owned()
} else {
format!("{}", (*pkt).pts)
},
(*pkt).size
);
}

View File

@ -14,7 +14,7 @@ use crate::encode::set_encoded_pkt_timing;
use crate::ipc::Rx;
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::utils::{get_ffmpeg_error_msg, id_ref_to_uuid, video_variant_id_ref};
use crate::variant::VideoVariant;
use crate::variant::{VariantStreamType, VideoVariant};
pub struct VideoEncoder<T> {
variant: VideoVariant,
@ -23,6 +23,7 @@ pub struct VideoEncoder<T> {
chan_in: T,
chan_out: UnboundedSender<PipelinePayload>,
var_id_ref: *mut AVBufferRef,
pts: i64,
}
unsafe impl<T> Send for VideoEncoder<T> {}
@ -46,6 +47,7 @@ where
chan_in,
chan_out,
var_id_ref: id_ref,
pts: 0,
}
}
@ -89,7 +91,7 @@ where
while ret > 0 || ret == AVERROR(EAGAIN) {
let mut pkt = av_packet_alloc();
ret = avcodec_receive_packet(self.ctx, pkt);
if ret < 0 {
if ret != 0 {
av_packet_free(&mut pkt);
if ret == AVERROR(EAGAIN) {
return Ok(());
@ -97,11 +99,12 @@ where
return Err(Error::msg(get_ffmpeg_error_msg(ret)));
}
set_encoded_pkt_timing(self.ctx, pkt, frame);
set_encoded_pkt_timing(self.ctx, pkt, &mut self.pts, &self.variant);
(*pkt).opaque = self.ctx as *mut libc::c_void;
(*pkt).opaque_ref = av_buffer_ref(self.var_id_ref);
assert_ne!((*pkt).data, ptr::null_mut());
self.chan_out
.send(PipelinePayload::AvPacket("Encoder packet".to_owned(), pkt))?;
.send(PipelinePayload::AvPacket("Video Encoder packet".to_owned(), pkt))?;
}
Ok(())

View File

@ -5,7 +5,7 @@ use ffmpeg_sys_next::{av_frame_clone, av_frame_copy_props, av_frame_free, av_pac
use serde::{Deserialize, Serialize};
use crate::demux::info::DemuxStreamInfo;
use crate::egress::hls::HLSEgressConfig;
use crate::egress::EgressConfig;
use crate::variant::VariantStream;
pub mod builder;
@ -13,10 +13,10 @@ pub mod runner;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum EgressType {
HLS(HLSEgressConfig),
HLS(EgressConfig),
DASH,
WHEP,
MPEGTS,
MPEGTS(EgressConfig),
}
impl Display for EgressType {
@ -28,7 +28,7 @@ impl Display for EgressType {
EgressType::HLS(c) => format!("{}", c),
EgressType::DASH => "DASH".to_owned(),
EgressType::WHEP => "WHEP".to_owned(),
EgressType::MPEGTS => "MPEGTS".to_owned(),
EgressType::MPEGTS(c) => format!("{}", c),
}
)
}
@ -84,11 +84,13 @@ impl Clone for PipelinePayload {
PipelinePayload::Empty => PipelinePayload::Empty,
PipelinePayload::Bytes(b) => PipelinePayload::Bytes(b.clone()),
PipelinePayload::AvPacket(t, p) => unsafe {
assert!(!(**p).data.is_null(), "Cannot clone empty packet");
let new_pkt = av_packet_clone(*p);
av_packet_copy_props(new_pkt, *p);
PipelinePayload::AvPacket(t.clone(), new_pkt)
},
PipelinePayload::AvFrame(t, p, idx) => unsafe {
assert!(!(**p).extended_data.is_null(), "Cannot clone empty frame");
let new_frame = av_frame_clone(*p);
av_frame_copy_props(new_frame, *p);
PipelinePayload::AvFrame(t.clone(), new_frame, *idx)

View File

@ -4,12 +4,14 @@ use std::time::{Duration, Instant};
use anyhow::Error;
use log::{info, warn};
use tokio::sync::broadcast;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use crate::decode::Decoder;
use crate::demux::Demuxer;
use crate::demux::info::{DemuxStreamInfo, StreamChannelType};
use crate::egress::EgressConfig;
use crate::egress::hls::HlsEgress;
use crate::egress::mpegts::MPEGTSEgress;
use crate::encode::audio::AudioEncoder;
use crate::encode::video::VideoEncoder;
use crate::pipeline::{EgressType, PipelineConfig, PipelinePayload, PipelineProcessor};
@ -29,7 +31,7 @@ pub struct PipelineRunner {
decoder: Decoder,
decoder_output: broadcast::Receiver<PipelinePayload>,
encoders: Vec<PipelineChain>,
egress: Vec<HlsEgress>,
egress: Vec<Box<dyn PipelineProcessor + Sync + Send>>,
started: Instant,
frame_no: u64,
stream_info: Option<DemuxStreamInfo>,
@ -106,57 +108,36 @@ impl PipelineRunner {
// re-configure with demuxer info
self.config = self.webhook.configure(&info);
info!("Configuring pipeline {}", self.config);
info!(
"Livestream url: http://localhost:8080/{}/live.m3u8",
self.config.id
);
let video_stream = info
.channels
.iter()
.find(|s| s.channel_type == StreamChannelType::Video);
if let Some(_vs) = video_stream {
for eg in &self.config.egress {
match eg {
EgressType::HLS(cfg) => {
let (egress_tx, egress_rx) = unbounded_channel();
self.egress
.push(HlsEgress::new(egress_rx, self.config.id, cfg.clone()));
for v in &cfg.variants {
match v {
VariantStream::Video(vs) => {
let (sw_tx, sw_rx) = unbounded_channel();
self.encoders.push(PipelineChain {
first: Box::new(Scaler::new(
self.decoder_output.resubscribe(),
sw_tx.clone(),
vs.clone(),
)),
second: Box::new(VideoEncoder::new(
sw_rx,
egress_tx.clone(),
vs.clone(),
)),
});
}
VariantStream::Audio(va) => {
let (tag_tx, tag_rx) = unbounded_channel();
self.encoders.push(PipelineChain {
first: Box::new(TagFrame::new(
v.clone(),
self.decoder_output.resubscribe(),
tag_tx,
)),
second: Box::new(AudioEncoder::new(
tag_rx,
egress_tx.clone(),
va.clone(),
)),
});
}
}
}
for eg in &self.config.egress {
match eg {
EgressType::HLS(cfg) => {
let (egress_tx, egress_rx) = unbounded_channel();
self.egress.push(Box::new(HlsEgress::new(
egress_rx,
self.config.id,
cfg.clone(),
)));
for x in self.add_egress_variants(cfg, egress_tx) {
self.encoders.push(x);
}
_ => return Err(Error::msg("Egress config not supported")),
}
EgressType::MPEGTS(cfg) => {
let (egress_tx, egress_rx) = unbounded_channel();
self.egress.push(Box::new(MPEGTSEgress::new(
egress_rx,
self.config.id,
cfg.clone(),
)));
for x in self.add_egress_variants(cfg, egress_tx) {
self.encoders.push(x);
}
}
_ => return Err(Error::msg("Egress config not supported")),
}
}
@ -166,4 +147,39 @@ impl PipelineRunner {
Ok(())
}
}
fn add_egress_variants(
&self,
cfg: &EgressConfig,
egress_tx: UnboundedSender<PipelinePayload>,
) -> Vec<PipelineChain> {
let mut ret = vec![];
for v in &cfg.variants {
match v {
VariantStream::Video(vs) => {
let (sw_tx, sw_rx) = unbounded_channel();
ret.push(PipelineChain {
first: Box::new(Scaler::new(
self.decoder_output.resubscribe(),
sw_tx.clone(),
vs.clone(),
)),
second: Box::new(VideoEncoder::new(sw_rx, egress_tx.clone(), vs.clone())),
});
}
VariantStream::Audio(va) => {
let (tag_tx, tag_rx) = unbounded_channel();
ret.push(PipelineChain {
first: Box::new(TagFrame::new(
v.clone(),
self.decoder_output.resubscribe(),
tag_tx,
)),
second: Box::new(AudioEncoder::new(tag_rx, egress_tx.clone(), va.clone())),
});
}
}
}
ret
}
}

View File

@ -5,7 +5,7 @@ use tokio::sync::mpsc::UnboundedSender;
use crate::ipc::Rx;
use crate::pipeline::{PipelinePayload, PipelineProcessor};
use crate::utils::variant_id_ref;
use crate::variant::VariantStream;
use crate::variant::{VariantStream, VariantStreamType};
pub struct TagFrame<TRecv> {
variant: VariantStream,

View File

@ -3,16 +3,16 @@ use std::fmt::{Display, Formatter};
use std::mem::transmute;
use std::ptr;
use ffmpeg_sys_next::{
AV_CH_LAYOUT_STEREO, av_get_sample_fmt, av_opt_set, AVChannelLayout,
AVChannelLayout__bindgen_ty_1, AVCodec, avcodec_find_encoder, avcodec_find_encoder_by_name, avcodec_get_name,
AVCodecContext, AVCodecParameters, AVRational, AVStream,
};
use ffmpeg_sys_next::AVChannelOrder::AV_CHANNEL_ORDER_NATIVE;
use ffmpeg_sys_next::AVCodecID::{AV_CODEC_ID_AAC, AV_CODEC_ID_H264};
use ffmpeg_sys_next::AVColorRange::AVCOL_RANGE_MPEG;
use ffmpeg_sys_next::AVColorSpace::AVCOL_SPC_BT709;
use ffmpeg_sys_next::AVPixelFormat::AV_PIX_FMT_YUV420P;
use ffmpeg_sys_next::{
av_get_sample_fmt, av_opt_set, avcodec_find_encoder, avcodec_find_encoder_by_name,
avcodec_get_name, AVChannelLayout, AVChannelLayout__bindgen_ty_1, AVCodec, AVCodecContext,
AVCodecParameters, AVRational, AVStream, AV_CH_LAYOUT_STEREO,
};
use ffmpeg_sys_next::AVColorRange::{AVCOL_RANGE_JPEG, AVCOL_RANGE_MPEG};
use serde::{Deserialize, Serialize};
use uuid::Uuid;
@ -33,6 +33,64 @@ impl Display for VariantStream {
}
}
impl VariantStreamType for VariantStream {
fn id(&self) -> Uuid {
match self {
VariantStream::Video(v) => v.id,
VariantStream::Audio(v) => v.id,
}
}
fn src_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.src_index,
VariantStream::Audio(v) => v.src_index,
}
}
fn dst_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.dst_index,
VariantStream::Audio(v) => v.dst_index,
}
}
fn time_base(&self) -> AVRational {
match self {
VariantStream::Video(v) => v.time_base(),
VariantStream::Audio(v) => v.time_base(),
}
}
unsafe fn get_codec(&self) -> *const AVCodec {
match self {
VariantStream::Video(v) => v.get_codec(),
VariantStream::Audio(v) => v.get_codec(),
}
}
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
match self {
VariantStream::Video(v) => v.to_codec_context(ctx),
VariantStream::Audio(v) => v.to_codec_context(ctx),
}
}
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
match self {
VariantStream::Video(v) => v.to_codec_params(params),
VariantStream::Audio(v) => v.to_codec_params(params),
}
}
unsafe fn to_stream(&self, stream: *mut AVStream) {
match self {
VariantStream::Video(v) => v.to_stream(stream),
VariantStream::Audio(v) => v.to_stream(stream),
}
}
}
/// Information related to variant streams for a given egress
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct VideoVariant {
@ -135,49 +193,42 @@ impl Display for AudioVariant {
}
}
impl VariantStream {
pub fn id(&self) -> Uuid {
match self {
VariantStream::Video(v) => v.id,
VariantStream::Audio(v) => v.id,
}
}
pub fn src_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.src_index,
VariantStream::Audio(v) => v.src_index,
}
}
pub fn dst_index(&self) -> usize {
match self {
VariantStream::Video(v) => v.dst_index,
VariantStream::Audio(v) => v.dst_index,
}
}
pub fn time_base(&self) -> AVRational {
match &self {
VariantStream::Video(vv) => vv.time_base(),
VariantStream::Audio(va) => va.time_base(),
}
}
pub trait VariantStreamType {
fn id(&self) -> Uuid;
fn src_index(&self) -> usize;
fn dst_index(&self) -> usize;
fn time_base(&self) -> AVRational;
unsafe fn get_codec(&self) -> *const AVCodec;
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext);
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters);
unsafe fn to_stream(&self, stream: *mut AVStream);
}
impl VideoVariant {
pub fn time_base(&self) -> AVRational {
impl VariantStreamType for VideoVariant {
fn id(&self) -> Uuid {
self.id
}
fn src_index(&self) -> usize {
self.src_index
}
fn dst_index(&self) -> usize {
self.dst_index
}
fn time_base(&self) -> AVRational {
AVRational {
num: 1,
den: 90_000,
}
}
pub fn get_codec(&self) -> *const AVCodec {
unsafe { avcodec_find_encoder(transmute(self.codec as u32)) }
unsafe fn get_codec(&self) -> *const AVCodec {
avcodec_find_encoder(transmute(self.codec as u32))
}
pub unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
let codec = self.get_codec();
(*ctx).codec_id = (*codec).id;
(*ctx).codec_type = (*codec).type_;
@ -215,7 +266,7 @@ impl VideoVariant {
}
}
pub unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
let codec = self.get_codec();
(*params).codec_id = (*codec).id;
(*params).codec_type = (*codec).type_;
@ -232,7 +283,7 @@ impl VideoVariant {
(*params).profile = self.profile as libc::c_int;
}
pub unsafe fn to_stream(&self, stream: *mut AVStream) {
unsafe fn to_stream(&self, stream: *mut AVStream) {
(*stream).time_base = self.time_base();
(*stream).avg_frame_rate = AVRational {
num: self.fps as libc::c_int,
@ -242,28 +293,39 @@ impl VideoVariant {
num: self.fps as libc::c_int,
den: 1,
};
self.to_codec_params((*stream).codecpar);
}
}
impl AudioVariant {
pub fn time_base(&self) -> AVRational {
impl VariantStreamType for AudioVariant {
fn id(&self) -> Uuid {
self.id
}
fn src_index(&self) -> usize {
self.src_index
}
fn dst_index(&self) -> usize {
self.dst_index
}
fn time_base(&self) -> AVRational {
AVRational {
num: 1,
den: self.sample_rate as libc::c_int,
}
}
pub fn get_codec(&self) -> *const AVCodec {
unsafe {
if self.codec == AV_CODEC_ID_AAC as usize {
avcodec_find_encoder_by_name("libfdk_aac\0".as_ptr() as *const libc::c_char)
} else {
avcodec_find_encoder(transmute(self.codec as u32))
}
unsafe fn get_codec(&self) -> *const AVCodec {
if self.codec == AV_CODEC_ID_AAC as usize {
avcodec_find_encoder_by_name("libfdk_aac\0".as_ptr() as *const libc::c_char)
} else {
avcodec_find_encoder(transmute(self.codec as u32))
}
}
pub unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
unsafe fn to_codec_context(&self, ctx: *mut AVCodecContext) {
let codec = self.get_codec();
(*ctx).codec_id = (*codec).id;
(*ctx).codec_type = (*codec).type_;
@ -275,7 +337,7 @@ impl AudioVariant {
(*ctx).ch_layout = self.channel_layout();
}
pub unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
unsafe fn to_codec_params(&self, params: *mut AVCodecParameters) {
let codec = self.get_codec();
(*params).codec_id = (*codec).id;
(*params).codec_type = (*codec).type_;
@ -287,15 +349,19 @@ impl AudioVariant {
(*params).ch_layout = self.channel_layout();
}
pub unsafe fn to_stream(&self, stream: *mut AVStream) {
unsafe fn to_stream(&self, stream: *mut AVStream) {
(*stream).time_base = self.time_base();
(*stream).r_frame_rate = AVRational {
num: (*stream).time_base.den,
den: (*stream).time_base.num,
};
}
pub fn channel_layout(&self) -> AVChannelLayout {
self.to_codec_params((*stream).codecpar);
}
}
impl AudioVariant {
fn channel_layout(&self) -> AVChannelLayout {
AVChannelLayout {
order: AV_CHANNEL_ORDER_NATIVE,
nb_channels: 2,

View File

@ -1,8 +1,7 @@
use ffmpeg_sys_next::AVCodecID::{AV_CODEC_ID_AAC, AV_CODEC_ID_AAC_LATM};
use uuid::Uuid;
use crate::demux::info::{DemuxStreamInfo, StreamChannelType};
use crate::egress::hls::HLSEgressConfig;
use crate::egress::EgressConfig;
use crate::ingress::ConnectionInfo;
use crate::pipeline::{EgressType, PipelineConfig};
use crate::settings::Settings;
@ -24,40 +23,47 @@ impl Webhook {
pub fn configure(&self, stream_info: &DemuxStreamInfo) -> PipelineConfig {
let mut vars: Vec<VariantStream> = vec![];
vars.push(VariantStream::Video(VideoVariant {
id: Uuid::new_v4(),
src_index: 0,
dst_index: 0,
width: 1280,
height: 720,
fps: 30,
bitrate: 3_000_000,
codec: 27,
profile: 100,
level: 51,
keyframe_interval: 2,
}));
vars.push(VariantStream::Video(VideoVariant {
id: Uuid::new_v4(),
src_index: 0,
dst_index: 1,
width: 640,
height: 360,
fps: 30,
bitrate: 1_000_000,
codec: 27,
profile: 100,
level: 51,
keyframe_interval: 2,
}));
let has_audio = stream_info
if let Some(video_src) = stream_info
.channels
.iter()
.any(|c| c.channel_type == StreamChannelType::Audio);
if has_audio {
.find(|c| c.channel_type == StreamChannelType::Video)
{
vars.push(VariantStream::Video(VideoVariant {
id: Uuid::new_v4(),
src_index: video_src.index,
dst_index: 0,
width: 1280,
height: 720,
fps: video_src.fps as u16,
bitrate: 3_000_000,
codec: 27,
profile: 100,
level: 51,
keyframe_interval: 2,
}));
vars.push(VariantStream::Video(VideoVariant {
id: Uuid::new_v4(),
src_index: video_src.index,
dst_index: 1,
width: 640,
height: 360,
fps: video_src.fps as u16,
bitrate: 1_000_000,
codec: 27,
profile: 100,
level: 51,
keyframe_interval: 2,
}));
}
if let Some(audio_src) = stream_info
.channels
.iter()
.find(|c| c.channel_type == StreamChannelType::Audio)
{
vars.push(VariantStream::Audio(AudioVariant {
id: Uuid::new_v4(),
src_index: 1,
src_index: audio_src.index,
dst_index: 0,
bitrate: 320_000,
codec: 86018,
@ -67,7 +73,7 @@ impl Webhook {
}));
vars.push(VariantStream::Audio(AudioVariant {
id: Uuid::new_v4(),
src_index: 1,
src_index: audio_src.index,
dst_index: 1,
bitrate: 220_000,
codec: 86018,
@ -80,10 +86,18 @@ impl Webhook {
PipelineConfig {
id: Uuid::new_v4(),
recording: vec![],
egress: vec![EgressType::HLS(HLSEgressConfig {
out_dir: self.config.output_dir.clone(),
variants: vars,
})],
egress: vec![
EgressType::HLS(EgressConfig {
name: "HLS".to_owned(),
out_dir: self.config.output_dir.clone(),
variants: vars.clone(),
}),
/*EgressType::MPEGTS(EgressConfig {
name: "MPEGTS".to_owned(),
out_dir: self.config.output_dir.clone(),
variants: vars.clone(),
}),*/
],
}
}
}

View File

@ -3,4 +3,4 @@
ffmpeg \
-f lavfi -i "sine=frequency=1000:sample_rate=48000" \
-re -f lavfi -i testsrc -g 300 -r 60 -pix_fmt yuv420p -s 1280x720 \
-c:v h264 -b:v 2000k -c:a aac -b:a 192k $@ -f mpegts srt://localhost:3333
-c:v h264 -b:v 2000k -c:a aac -ac 2 -b:a 192k -fflags +genpts -f mpegts srt://localhost:3333