mirror of
https://github.com/v0l/zap-stream-core.git
synced 2025-06-16 08:59:35 +00:00
refactor: convert to workspace
This commit is contained in:
4818
crates/core/Cargo.lock
generated
Executable file
4818
crates/core/Cargo.lock
generated
Executable file
File diff suppressed because it is too large
Load Diff
44
crates/core/Cargo.toml
Normal file
44
crates/core/Cargo.toml
Normal file
@ -0,0 +1,44 @@
|
||||
[package]
|
||||
name = "zap-stream-core"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = ["test-pattern", "srt", "rtmp"]
|
||||
srt = ["dep:srt-tokio"]
|
||||
rtmp = ["dep:rml_rtmp"]
|
||||
local-overseer = [] # WIP
|
||||
webhook-overseer = [] # WIP
|
||||
test-pattern = [
|
||||
"dep:resvg",
|
||||
"dep:usvg",
|
||||
"dep:tiny-skia",
|
||||
"dep:fontdue",
|
||||
"dep:ringbuf",
|
||||
]
|
||||
|
||||
[dependencies]
|
||||
ffmpeg-rs-raw.workspace = true
|
||||
tokio.workspace = true
|
||||
anyhow.workspace = true
|
||||
async-trait.workspace = true
|
||||
log.workspace = true
|
||||
uuid.workspace = true
|
||||
serde.workspace = true
|
||||
hex.workspace = true
|
||||
itertools.workspace = true
|
||||
futures-util = "0.3.30"
|
||||
m3u8-rs = "6.0.0"
|
||||
|
||||
# srt
|
||||
srt-tokio = { version = "0.4.3", optional = true }
|
||||
|
||||
# rtmp
|
||||
rml_rtmp = { version = "0.8.0", optional = true }
|
||||
|
||||
# test-pattern
|
||||
resvg = { version = "0.44.0", optional = true }
|
||||
usvg = { version = "0.44.0", optional = true }
|
||||
tiny-skia = { version = "0.11.4", optional = true }
|
||||
fontdue = { version = "0.9.2", optional = true }
|
||||
ringbuf = { version = "0.4.7", optional = true }
|
BIN
crates/core/SourceCodePro-Regular.ttf
Normal file
BIN
crates/core/SourceCodePro-Regular.ttf
Normal file
Binary file not shown.
2
crates/core/dev-setup/db.sql
Normal file
2
crates/core/dev-setup/db.sql
Normal file
@ -0,0 +1,2 @@
|
||||
create database route96;
|
||||
create database zap_stream;
|
29
crates/core/dev-setup/route96.toml
Normal file
29
crates/core/dev-setup/route96.toml
Normal file
@ -0,0 +1,29 @@
|
||||
# Listen address for webserver
|
||||
listen = "0.0.0.0:8000"
|
||||
|
||||
# Database connection string (MYSQL)
|
||||
database = "mysql://root:root@db:3306/route96"
|
||||
|
||||
# Directory to store uploads
|
||||
storage_dir = "./data"
|
||||
|
||||
# Maximum support filesize for uploading
|
||||
max_upload_bytes = 5e+9
|
||||
|
||||
# Public facing url
|
||||
public_url = "http://localhost:8881"
|
||||
|
||||
# Whitelisted pubkeys, leave out to disable
|
||||
# whitelist = ["63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed"]
|
||||
|
||||
# Path for ViT(224) image model (https://huggingface.co/google/vit-base-patch16-224)
|
||||
# vit_model_path = "model.safetennsors"
|
||||
|
||||
# Webhook api endpoint
|
||||
# webhook_url = "https://api.snort.social/api/v1/media/webhook"
|
||||
|
||||
# Analytics support
|
||||
# plausible_url = "https://plausible.com/"
|
||||
|
||||
# Support legacy void
|
||||
# void_cat_database = "postgres://postgres:postgres@localhost:41911/void"
|
144
crates/core/dev-setup/strfry.conf
Normal file
144
crates/core/dev-setup/strfry.conf
Normal file
@ -0,0 +1,144 @@
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "./strfry-db/"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
|
||||
mapsize = 10995116277760
|
||||
|
||||
# Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
|
||||
noReadAhead = false
|
||||
}
|
||||
|
||||
events {
|
||||
# Maximum size of normalised JSON, in bytes
|
||||
maxEventSize = 65536
|
||||
|
||||
# Events newer than this will be rejected
|
||||
rejectEventsNewerThanSeconds = 900
|
||||
|
||||
# Events older than this will be rejected
|
||||
rejectEventsOlderThanSeconds = 94608000
|
||||
|
||||
# Ephemeral events older than this will be rejected
|
||||
rejectEphemeralEventsOlderThanSeconds = 60
|
||||
|
||||
# Ephemeral events will be deleted from the DB when older than this
|
||||
ephemeralEventsLifetimeSeconds = 300
|
||||
|
||||
# Maximum number of tags allowed
|
||||
maxNumTags = 2000
|
||||
|
||||
# Maximum size for tag values, in bytes
|
||||
maxTagValSize = 1024
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 7777
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 0
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry default"
|
||||
|
||||
# NIP-11: Detailed information about relay, free-form
|
||||
description = "This is a strfry instance."
|
||||
|
||||
# NIP-11: Administrative nostr pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative administrative contact (email, website, etc)
|
||||
contact = ""
|
||||
|
||||
# NIP-11: URL pointing to an image to be used as an icon for the relay
|
||||
icon = ""
|
||||
|
||||
# List of supported lists as JSON array, or empty string to use default. Example: "[1,2]"
|
||||
nips = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = ""
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
|
||||
# Log reason for invalid event rejection? Can be disabled to silence excessive logging
|
||||
invalidEvents = true
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# negentropy threads: Handle negentropy protocol messages (restart required)
|
||||
negentropy = 2
|
||||
}
|
||||
|
||||
negentropy {
|
||||
# Support negentropy protocol messages
|
||||
enabled = true
|
||||
|
||||
# Maximum records that sync will process before returning an error
|
||||
maxSyncEvents = 1000000
|
||||
}
|
||||
}
|
30
crates/core/src/egress/hls.rs
Normal file
30
crates/core/src/egress/hls.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use anyhow::Result;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPacket;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::egress::{Egress, EgressResult};
|
||||
use crate::mux::HlsMuxer;
|
||||
|
||||
/// Alias the muxer directly
|
||||
pub type HlsEgress = HlsMuxer;
|
||||
|
||||
impl Egress for HlsMuxer {
|
||||
unsafe fn process_pkt(
|
||||
&mut self,
|
||||
packet: *mut AVPacket,
|
||||
variant: &Uuid,
|
||||
) -> Result<EgressResult> {
|
||||
if let Some(ns) = self.mux_packet(packet, variant)? {
|
||||
Ok(EgressResult::NewSegment(ns))
|
||||
} else {
|
||||
Ok(EgressResult::None)
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn reset(&mut self) -> Result<()> {
|
||||
for var in &mut self.variants {
|
||||
var.reset()?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
43
crates/core/src/egress/mod.rs
Normal file
43
crates/core/src/egress/mod.rs
Normal file
@ -0,0 +1,43 @@
|
||||
use anyhow::Result;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPacket;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub mod hls;
|
||||
pub mod recorder;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct EgressConfig {
|
||||
pub name: String,
|
||||
/// Which variants will be used in this muxer
|
||||
pub variants: HashSet<Uuid>,
|
||||
}
|
||||
|
||||
pub trait Egress {
|
||||
unsafe fn process_pkt(&mut self, packet: *mut AVPacket, variant: &Uuid)
|
||||
-> Result<EgressResult>;
|
||||
unsafe fn reset(&mut self) -> Result<()>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum EgressResult {
|
||||
/// Nothing to report
|
||||
None,
|
||||
/// A new segment was created
|
||||
NewSegment(NewSegment),
|
||||
}
|
||||
|
||||
/// Basic details of new segment created by a muxer
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NewSegment {
|
||||
/// The id of the variant (video or audio)
|
||||
pub variant: Uuid,
|
||||
/// Segment index
|
||||
pub idx: u64,
|
||||
/// Duration in seconds
|
||||
pub duration: f32,
|
||||
/// Path on disk to the segment file
|
||||
pub path: PathBuf,
|
||||
}
|
70
crates/core/src/egress/recorder.rs
Normal file
70
crates/core/src/egress/recorder.rs
Normal file
@ -0,0 +1,70 @@
|
||||
use anyhow::Result;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPacket;
|
||||
use ffmpeg_rs_raw::{Encoder, Muxer};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::egress::{Egress, EgressResult};
|
||||
use crate::variant::{StreamMapping, VariantStream};
|
||||
|
||||
pub struct RecorderEgress {
|
||||
/// Pipeline ID
|
||||
id: Uuid,
|
||||
/// Internal muxer writing the output packets
|
||||
muxer: Muxer,
|
||||
/// Mapping from Variant ID to stream index
|
||||
var_map: HashMap<Uuid, i32>,
|
||||
}
|
||||
|
||||
impl RecorderEgress {
|
||||
pub fn new<'a>(
|
||||
id: &Uuid,
|
||||
out_dir: &str,
|
||||
variants: impl Iterator<Item = (&'a VariantStream, &'a Encoder)>,
|
||||
) -> Result<Self> {
|
||||
let base = PathBuf::from(out_dir).join(id.to_string());
|
||||
|
||||
let out_file = base.join("recording.ts");
|
||||
fs::create_dir_all(&base)?;
|
||||
|
||||
let mut var_map = HashMap::new();
|
||||
let muxer = unsafe {
|
||||
let mut m = Muxer::builder()
|
||||
.with_output_path(out_file.to_str().unwrap(), None)?
|
||||
.build()?;
|
||||
for (var, enc) in variants {
|
||||
let stream = m.add_stream_encoder(enc)?;
|
||||
var_map.insert(var.id(), (*stream).index);
|
||||
}
|
||||
m.open(None)?;
|
||||
m
|
||||
};
|
||||
Ok(Self {
|
||||
id: *id,
|
||||
muxer,
|
||||
var_map,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl Egress for RecorderEgress {
|
||||
unsafe fn process_pkt(
|
||||
&mut self,
|
||||
packet: *mut AVPacket,
|
||||
variant: &Uuid,
|
||||
) -> Result<EgressResult> {
|
||||
if let Some(stream) = self.var_map.get(variant) {
|
||||
// very important for muxer to know which stream this pkt belongs to
|
||||
(*packet).stream_index = *stream;
|
||||
|
||||
self.muxer.write_packet(packet)?;
|
||||
}
|
||||
Ok(EgressResult::None)
|
||||
}
|
||||
|
||||
unsafe fn reset(&mut self) -> Result<()> {
|
||||
self.muxer.close()
|
||||
}
|
||||
}
|
28
crates/core/src/ingress/file.rs
Normal file
28
crates/core/src/ingress/file.rs
Normal file
@ -0,0 +1,28 @@
|
||||
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
||||
use crate::overseer::Overseer;
|
||||
use anyhow::Result;
|
||||
use log::info;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::runtime::Handle;
|
||||
|
||||
pub async fn listen(out_dir: String, path: PathBuf, overseer: Arc<dyn Overseer>) -> Result<()> {
|
||||
info!("Sending file: {}", path.display());
|
||||
|
||||
let info = ConnectionInfo {
|
||||
ip_addr: "127.0.0.1:6969".to_string(),
|
||||
endpoint: "file-input".to_owned(),
|
||||
app_name: "".to_string(),
|
||||
key: "test".to_string(),
|
||||
};
|
||||
let file = std::fs::File::open(path)?;
|
||||
spawn_pipeline(
|
||||
Handle::current(),
|
||||
info,
|
||||
out_dir.clone(),
|
||||
overseer.clone(),
|
||||
Box::new(file),
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
69
crates/core/src/ingress/mod.rs
Normal file
69
crates/core/src/ingress/mod.rs
Normal file
@ -0,0 +1,69 @@
|
||||
use crate::overseer::Overseer;
|
||||
use crate::pipeline::runner::PipelineRunner;
|
||||
use log::{error, info};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use tokio::runtime::Handle;
|
||||
|
||||
pub mod file;
|
||||
#[cfg(feature = "rtmp")]
|
||||
pub mod rtmp;
|
||||
#[cfg(feature = "srt")]
|
||||
pub mod srt;
|
||||
pub mod tcp;
|
||||
#[cfg(feature = "test-pattern")]
|
||||
pub mod test;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ConnectionInfo {
|
||||
/// Endpoint of the ingress
|
||||
pub endpoint: String,
|
||||
|
||||
/// IP address of the connection
|
||||
pub ip_addr: String,
|
||||
|
||||
/// App name, empty unless RTMP ingress
|
||||
pub app_name: String,
|
||||
|
||||
/// Stream key
|
||||
pub key: String,
|
||||
}
|
||||
|
||||
pub fn spawn_pipeline(
|
||||
handle: Handle,
|
||||
info: ConnectionInfo,
|
||||
out_dir: String,
|
||||
seer: Arc<dyn Overseer>,
|
||||
reader: Box<dyn Read + Send>,
|
||||
) {
|
||||
info!("New client connected: {}", &info.ip_addr);
|
||||
let seer = seer.clone();
|
||||
let out_dir = out_dir.to_string();
|
||||
std::thread::spawn(move || unsafe {
|
||||
match PipelineRunner::new(handle, out_dir, seer, info, reader) {
|
||||
Ok(mut pl) => loop {
|
||||
match pl.run() {
|
||||
Ok(c) => {
|
||||
if !c {
|
||||
if let Err(e) = pl.flush() {
|
||||
error!("Pipeline flush failed: {}", e);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if let Err(e) = pl.flush() {
|
||||
error!("Pipeline flush failed: {}", e);
|
||||
}
|
||||
error!("Pipeline run failed: {}", e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
error!("Failed to create PipelineRunner: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
238
crates/core/src/ingress/rtmp.rs
Normal file
238
crates/core/src/ingress/rtmp.rs
Normal file
@ -0,0 +1,238 @@
|
||||
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
||||
use crate::overseer::Overseer;
|
||||
use anyhow::{bail, Result};
|
||||
use log::{error, info};
|
||||
use rml_rtmp::handshake::{Handshake, HandshakeProcessResult, PeerType};
|
||||
use rml_rtmp::sessions::{
|
||||
ServerSession, ServerSessionConfig, ServerSessionEvent, ServerSessionResult,
|
||||
};
|
||||
use std::collections::VecDeque;
|
||||
use std::io::{ErrorKind, Read, Write};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::runtime::Handle;
|
||||
use tokio::time::Instant;
|
||||
#[derive(PartialEq, Eq, Clone, Hash)]
|
||||
struct RtmpPublishedStream(String, String);
|
||||
|
||||
struct RtmpClient {
|
||||
socket: std::net::TcpStream,
|
||||
media_buf: Vec<u8>,
|
||||
session: ServerSession,
|
||||
msg_queue: VecDeque<ServerSessionResult>,
|
||||
reader_buf: [u8; 4096],
|
||||
pub published_stream: Option<RtmpPublishedStream>,
|
||||
}
|
||||
|
||||
impl RtmpClient {
|
||||
async fn start(mut socket: TcpStream) -> Result<Self> {
|
||||
let mut hs = Handshake::new(PeerType::Server);
|
||||
|
||||
let exchange = hs.generate_outbound_p0_and_p1()?;
|
||||
socket.write_all(&exchange).await?;
|
||||
|
||||
let mut buf = [0; 4096];
|
||||
loop {
|
||||
let r = socket.read(&mut buf).await?;
|
||||
if r == 0 {
|
||||
bail!("EOF reached while reading");
|
||||
}
|
||||
|
||||
match hs.process_bytes(&buf[..r])? {
|
||||
HandshakeProcessResult::InProgress { response_bytes } => {
|
||||
socket.write_all(&response_bytes).await?;
|
||||
}
|
||||
HandshakeProcessResult::Completed {
|
||||
response_bytes,
|
||||
remaining_bytes,
|
||||
} => {
|
||||
socket.write_all(&response_bytes).await?;
|
||||
|
||||
let cfg = ServerSessionConfig::new();
|
||||
let (mut ses, mut res) = ServerSession::new(cfg)?;
|
||||
let q = ses.handle_input(&remaining_bytes)?;
|
||||
res.extend(q);
|
||||
|
||||
let ret = Self {
|
||||
socket: socket.into_std()?,
|
||||
media_buf: vec![],
|
||||
session: ses,
|
||||
msg_queue: VecDeque::from(res),
|
||||
reader_buf: [0; 4096],
|
||||
published_stream: None,
|
||||
};
|
||||
|
||||
return Ok(ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Read data until we get the publish request
|
||||
pub fn read_until_publish_request(&mut self, timeout: Duration) -> Result<()> {
|
||||
let start = Instant::now();
|
||||
while self.published_stream.is_none() {
|
||||
if (Instant::now() - start) > timeout {
|
||||
bail!("Timed out waiting for publish request");
|
||||
}
|
||||
self.read_data()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_data(&mut self) -> Result<()> {
|
||||
let r = match self.socket.read(&mut self.reader_buf) {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
return match e.kind() {
|
||||
ErrorKind::WouldBlock => Ok(()),
|
||||
ErrorKind::Interrupted => Ok(()),
|
||||
_ => Err(anyhow::Error::new(e)),
|
||||
};
|
||||
}
|
||||
};
|
||||
if r == 0 {
|
||||
bail!("EOF");
|
||||
}
|
||||
|
||||
let mx = self.session.handle_input(&self.reader_buf[..r])?;
|
||||
if !mx.is_empty() {
|
||||
self.msg_queue.extend(mx);
|
||||
self.process_msg_queue()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn process_msg_queue(&mut self) -> Result<()> {
|
||||
while let Some(msg) = self.msg_queue.pop_front() {
|
||||
match msg {
|
||||
ServerSessionResult::OutboundResponse(data) => {
|
||||
self.socket.write_all(&data.bytes)?
|
||||
}
|
||||
ServerSessionResult::RaisedEvent(ev) => self.handle_event(ev)?,
|
||||
ServerSessionResult::UnhandleableMessageReceived(m) => {
|
||||
// treat any non-flv streams as raw media stream in rtmp
|
||||
self.media_buf.extend(&m.data);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_event(&mut self, event: ServerSessionEvent) -> Result<()> {
|
||||
match event {
|
||||
ServerSessionEvent::ClientChunkSizeChanged { new_chunk_size } => {
|
||||
info!("New client chunk size: {}", new_chunk_size);
|
||||
}
|
||||
ServerSessionEvent::ConnectionRequested { request_id, .. } => {
|
||||
let mx = self.session.accept_request(request_id)?;
|
||||
self.msg_queue.extend(mx);
|
||||
}
|
||||
ServerSessionEvent::ReleaseStreamRequested { .. } => {}
|
||||
ServerSessionEvent::PublishStreamRequested {
|
||||
request_id,
|
||||
app_name,
|
||||
stream_key,
|
||||
mode,
|
||||
} => {
|
||||
if self.published_stream.is_some() {
|
||||
let mx =
|
||||
self.session
|
||||
.reject_request(request_id, "0", "stream already published")?;
|
||||
self.msg_queue.extend(mx);
|
||||
} else {
|
||||
let mx = self.session.accept_request(request_id)?;
|
||||
self.msg_queue.extend(mx);
|
||||
info!(
|
||||
"Published stream request: {app_name}/{stream_key} [{:?}]",
|
||||
mode
|
||||
);
|
||||
self.published_stream = Some(RtmpPublishedStream(app_name, stream_key));
|
||||
}
|
||||
}
|
||||
ServerSessionEvent::PublishStreamFinished { .. } => {}
|
||||
ServerSessionEvent::StreamMetadataChanged {
|
||||
app_name,
|
||||
stream_key,
|
||||
metadata,
|
||||
} => {
|
||||
info!(
|
||||
"Metadata configured: {}/{} {:?}",
|
||||
app_name, stream_key, metadata
|
||||
);
|
||||
}
|
||||
ServerSessionEvent::AudioDataReceived { data, .. } => {
|
||||
self.media_buf.extend(data);
|
||||
}
|
||||
ServerSessionEvent::VideoDataReceived { data, .. } => {
|
||||
self.media_buf.extend(data);
|
||||
}
|
||||
ServerSessionEvent::UnhandleableAmf0Command { .. } => {}
|
||||
ServerSessionEvent::PlayStreamRequested { request_id, .. } => {
|
||||
let mx = self
|
||||
.session
|
||||
.reject_request(request_id, "0", "playback not supported")?;
|
||||
self.msg_queue.extend(mx);
|
||||
}
|
||||
ServerSessionEvent::PlayStreamFinished { .. } => {}
|
||||
ServerSessionEvent::AcknowledgementReceived { .. } => {}
|
||||
ServerSessionEvent::PingResponseReceived { .. } => {}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for RtmpClient {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
// block this thread until something comes into [media_buf]
|
||||
while self.media_buf.is_empty() {
|
||||
if let Err(e) = self.read_data() {
|
||||
error!("Error reading data: {}", e);
|
||||
return Ok(0);
|
||||
};
|
||||
}
|
||||
|
||||
let to_read = buf.len().min(self.media_buf.len());
|
||||
let drain = self.media_buf.drain(..to_read);
|
||||
buf[..to_read].copy_from_slice(drain.as_slice());
|
||||
Ok(to_read)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>) -> Result<()> {
|
||||
let listener = TcpListener::bind(&addr).await?;
|
||||
|
||||
info!("RTMP listening on: {}", &addr);
|
||||
while let Ok((socket, ip)) = listener.accept().await {
|
||||
let mut cc = RtmpClient::start(socket).await?;
|
||||
let addr = addr.clone();
|
||||
let overseer = overseer.clone();
|
||||
let out_dir = out_dir.clone();
|
||||
let handle = Handle::current();
|
||||
std::thread::Builder::new()
|
||||
.name("rtmp-client".to_string())
|
||||
.spawn(move || {
|
||||
if let Err(e) = cc.read_until_publish_request(Duration::from_secs(10)) {
|
||||
error!("{}", e);
|
||||
} else {
|
||||
let pr = cc.published_stream.as_ref().unwrap();
|
||||
let info = ConnectionInfo {
|
||||
ip_addr: ip.to_string(),
|
||||
endpoint: addr.clone(),
|
||||
app_name: pr.0.clone(),
|
||||
key: pr.1.clone(),
|
||||
};
|
||||
spawn_pipeline(
|
||||
handle,
|
||||
info,
|
||||
out_dir.clone(),
|
||||
overseer.clone(),
|
||||
Box::new(cc),
|
||||
);
|
||||
}
|
||||
})?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
66
crates/core/src/ingress/srt.rs
Normal file
66
crates/core/src/ingress/srt.rs
Normal file
@ -0,0 +1,66 @@
|
||||
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
||||
use crate::overseer::Overseer;
|
||||
use anyhow::Result;
|
||||
use futures_util::stream::FusedStream;
|
||||
use futures_util::StreamExt;
|
||||
use log::info;
|
||||
use srt_tokio::{SrtListener, SrtSocket};
|
||||
use std::io::Read;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::runtime::Handle;
|
||||
|
||||
pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>) -> Result<()> {
|
||||
let binder: SocketAddr = addr.parse()?;
|
||||
let (_binding, mut packets) = SrtListener::builder().bind(binder).await?;
|
||||
|
||||
info!("SRT listening on: {}", &addr);
|
||||
while let Some(request) = packets.incoming().next().await {
|
||||
let socket = request.accept(None).await?;
|
||||
let info = ConnectionInfo {
|
||||
endpoint: addr.clone(),
|
||||
ip_addr: socket.settings().remote.to_string(),
|
||||
app_name: "".to_string(),
|
||||
key: socket
|
||||
.settings()
|
||||
.stream_id
|
||||
.as_ref()
|
||||
.map_or(String::new(), |s| s.to_string()),
|
||||
};
|
||||
spawn_pipeline(
|
||||
Handle::current(),
|
||||
info,
|
||||
out_dir.clone(),
|
||||
overseer.clone(),
|
||||
Box::new(SrtReader {
|
||||
handle: Handle::current(),
|
||||
socket,
|
||||
buf: Vec::with_capacity(4096),
|
||||
}),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct SrtReader {
|
||||
pub handle: Handle,
|
||||
pub socket: SrtSocket,
|
||||
pub buf: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Read for SrtReader {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
let (mut rx, _) = self.socket.split_mut();
|
||||
while self.buf.len() < buf.len() {
|
||||
if rx.is_terminated() {
|
||||
return Ok(0);
|
||||
}
|
||||
if let Some((_, data)) = self.handle.block_on(rx.next()) {
|
||||
self.buf.extend(data.iter().as_slice());
|
||||
}
|
||||
}
|
||||
let drain = self.buf.drain(..buf.len());
|
||||
buf.copy_from_slice(drain.as_slice());
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
30
crates/core/src/ingress/tcp.rs
Normal file
30
crates/core/src/ingress/tcp.rs
Normal file
@ -0,0 +1,30 @@
|
||||
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
||||
use crate::overseer::Overseer;
|
||||
use anyhow::Result;
|
||||
use log::info;
|
||||
use std::sync::Arc;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::runtime::Handle;
|
||||
|
||||
pub async fn listen(out_dir: String, addr: String, overseer: Arc<dyn Overseer>) -> Result<()> {
|
||||
let listener = TcpListener::bind(&addr).await?;
|
||||
|
||||
info!("TCP listening on: {}", &addr);
|
||||
while let Ok((socket, ip)) = listener.accept().await {
|
||||
let info = ConnectionInfo {
|
||||
ip_addr: ip.to_string(),
|
||||
endpoint: addr.clone(),
|
||||
app_name: "".to_string(),
|
||||
key: "no-key-tcp".to_string(),
|
||||
};
|
||||
let socket = socket.into_std()?;
|
||||
spawn_pipeline(
|
||||
Handle::current(),
|
||||
info,
|
||||
out_dir.clone(),
|
||||
overseer.clone(),
|
||||
Box::new(socket),
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
187
crates/core/src/ingress/test.rs
Normal file
187
crates/core/src/ingress/test.rs
Normal file
@ -0,0 +1,187 @@
|
||||
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
||||
use crate::overseer::Overseer;
|
||||
use anyhow::Result;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVColorSpace::AVCOL_SPC_RGB;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::{AV_PIX_FMT_RGBA, AV_PIX_FMT_YUV420P};
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
||||
av_frame_alloc, av_frame_free, av_frame_get_buffer, av_packet_free, AV_PROFILE_H264_MAIN,
|
||||
};
|
||||
use ffmpeg_rs_raw::{Encoder, Muxer, Scaler};
|
||||
use fontdue::layout::{CoordinateSystem, Layout, TextStyle};
|
||||
use fontdue::Font;
|
||||
use log::info;
|
||||
use ringbuf::traits::{Observer, Split};
|
||||
use ringbuf::{HeapCons, HeapRb};
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tiny_skia::Pixmap;
|
||||
use tokio::runtime::Handle;
|
||||
|
||||
pub async fn listen(out_dir: String, overseer: Arc<dyn Overseer>) -> Result<()> {
|
||||
info!("Test pattern enabled");
|
||||
|
||||
let info = ConnectionInfo {
|
||||
endpoint: "test-pattern".to_string(),
|
||||
ip_addr: "test-pattern".to_string(),
|
||||
app_name: "".to_string(),
|
||||
key: "test".to_string(),
|
||||
};
|
||||
let src = TestPatternSrc::new()?;
|
||||
spawn_pipeline(
|
||||
Handle::current(),
|
||||
info,
|
||||
out_dir.clone(),
|
||||
overseer.clone(),
|
||||
Box::new(src),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
struct TestPatternSrc {
|
||||
encoder: Encoder,
|
||||
scaler: Scaler,
|
||||
muxer: Muxer,
|
||||
background: Pixmap,
|
||||
font: [Font; 1],
|
||||
frame_no: u64,
|
||||
start: Instant,
|
||||
reader: HeapCons<u8>,
|
||||
}
|
||||
|
||||
unsafe impl Send for TestPatternSrc {}
|
||||
|
||||
impl TestPatternSrc {
|
||||
pub fn new() -> Result<Self> {
|
||||
let scaler = Scaler::new();
|
||||
let encoder = unsafe {
|
||||
Encoder::new_with_name("libx264")?
|
||||
.with_stream_index(0)
|
||||
.with_framerate(30.0)?
|
||||
.with_bitrate(1_000_000)
|
||||
.with_pix_fmt(AV_PIX_FMT_YUV420P)
|
||||
.with_width(1280)
|
||||
.with_height(720)
|
||||
.with_level(51)
|
||||
.with_profile(AV_PROFILE_H264_MAIN)
|
||||
.open(None)?
|
||||
};
|
||||
|
||||
let svg_data = include_bytes!("../../test.svg");
|
||||
let tree = usvg::Tree::from_data(svg_data, &Default::default())?;
|
||||
let mut pixmap = Pixmap::new(1280, 720).unwrap();
|
||||
let render_ts = tiny_skia::Transform::from_scale(
|
||||
pixmap.width() as f32 / tree.size().width(),
|
||||
pixmap.height() as f32 / tree.size().height(),
|
||||
);
|
||||
resvg::render(&tree, render_ts, &mut pixmap.as_mut());
|
||||
|
||||
let font = include_bytes!("../../SourceCodePro-Regular.ttf") as &[u8];
|
||||
let font = Font::from_bytes(font, Default::default()).unwrap();
|
||||
|
||||
let buf = HeapRb::new(1024 * 1024);
|
||||
let (writer, reader) = buf.split();
|
||||
|
||||
let muxer = unsafe {
|
||||
let mut m = Muxer::builder()
|
||||
.with_output_write(writer, Some("mpegts"))?
|
||||
.with_stream_encoder(&encoder)?
|
||||
.build()?;
|
||||
m.open(None)?;
|
||||
m
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
encoder,
|
||||
scaler,
|
||||
muxer,
|
||||
background: pixmap,
|
||||
font: [font],
|
||||
frame_no: 0,
|
||||
start: Instant::now(),
|
||||
reader,
|
||||
})
|
||||
}
|
||||
|
||||
pub unsafe fn next_pkt(&mut self) -> Result<()> {
|
||||
let stream_time = Duration::from_secs_f64(self.frame_no as f64 / 30.0);
|
||||
let real_time = Instant::now().duration_since(self.start);
|
||||
let wait_time = if stream_time > real_time {
|
||||
stream_time - real_time
|
||||
} else {
|
||||
Duration::new(0, 0)
|
||||
};
|
||||
if !wait_time.is_zero() {
|
||||
std::thread::sleep(wait_time);
|
||||
}
|
||||
|
||||
self.frame_no += 1;
|
||||
|
||||
let mut src_frame = unsafe {
|
||||
let src_frame = av_frame_alloc();
|
||||
|
||||
(*src_frame).width = 1280;
|
||||
(*src_frame).height = 720;
|
||||
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
|
||||
(*src_frame).key_frame = 1;
|
||||
(*src_frame).colorspace = AVCOL_SPC_RGB;
|
||||
(*src_frame).format = AV_PIX_FMT_RGBA as _;
|
||||
(*src_frame).pts = self.frame_no as i64;
|
||||
(*src_frame).duration = 1;
|
||||
av_frame_get_buffer(src_frame, 0);
|
||||
|
||||
self.background
|
||||
.data()
|
||||
.as_ptr()
|
||||
.copy_to((*src_frame).data[0] as *mut _, 1280 * 720 * 4);
|
||||
src_frame
|
||||
};
|
||||
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
|
||||
layout.clear();
|
||||
layout.append(
|
||||
&self.font,
|
||||
&TextStyle::new(&format!("frame={}", self.frame_no), 40.0, 0),
|
||||
);
|
||||
for g in layout.glyphs() {
|
||||
let (metrics, bitmap) = self.font[0].rasterize_config_subpixel(g.key);
|
||||
for y in 0..metrics.height {
|
||||
for x in 0..metrics.width {
|
||||
let dst_x = x + g.x as usize;
|
||||
let dst_y = y + g.y as usize;
|
||||
let offset_src = (x + y * metrics.width) * 3;
|
||||
unsafe {
|
||||
let offset_dst = 4 * dst_x + dst_y * (*src_frame).linesize[0] as usize;
|
||||
let pixel_dst = (*src_frame).data[0].add(offset_dst);
|
||||
*pixel_dst.offset(0) = bitmap[offset_src];
|
||||
*pixel_dst.offset(1) = bitmap[offset_src + 1];
|
||||
*pixel_dst.offset(2) = bitmap[offset_src + 2];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scale/encode
|
||||
let mut frame = self
|
||||
.scaler
|
||||
.process_frame(src_frame, 1280, 720, AV_PIX_FMT_YUV420P)?;
|
||||
for mut pkt in self.encoder.encode_frame(frame)? {
|
||||
self.muxer.write_packet(pkt)?;
|
||||
av_packet_free(&mut pkt);
|
||||
}
|
||||
av_frame_free(&mut frame);
|
||||
av_frame_free(&mut src_frame);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for TestPatternSrc {
|
||||
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
|
||||
unsafe {
|
||||
while self.reader.occupied_len() < buf.len() {
|
||||
self.next_pkt().map_err(std::io::Error::other)?;
|
||||
}
|
||||
}
|
||||
self.reader.read(buf)
|
||||
}
|
||||
}
|
6
crates/core/src/lib.rs
Normal file
6
crates/core/src/lib.rs
Normal file
@ -0,0 +1,6 @@
|
||||
pub mod egress;
|
||||
pub mod ingress;
|
||||
pub mod mux;
|
||||
pub mod overseer;
|
||||
pub mod pipeline;
|
||||
pub mod variant;
|
443
crates/core/src/mux/hls.rs
Normal file
443
crates/core/src/mux/hls.rs
Normal file
@ -0,0 +1,443 @@
|
||||
use crate::egress::NewSegment;
|
||||
use crate::variant::{StreamMapping, VariantStream};
|
||||
use anyhow::{bail, Result};
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_H264;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVMediaType::AVMEDIA_TYPE_VIDEO;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
||||
av_free, av_opt_set, av_q2d, av_write_frame, avio_flush, avio_open, AVPacket, AVStream,
|
||||
AVIO_FLAG_WRITE, AV_PKT_FLAG_KEY,
|
||||
};
|
||||
use ffmpeg_rs_raw::{cstr, Encoder, Muxer};
|
||||
use itertools::Itertools;
|
||||
use log::{info, warn};
|
||||
use m3u8_rs::MediaSegment;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Display;
|
||||
use std::fs::File;
|
||||
use std::path::PathBuf;
|
||||
use std::ptr;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum SegmentType {
|
||||
MPEGTS,
|
||||
FMP4,
|
||||
}
|
||||
|
||||
pub enum HlsVariantStream {
|
||||
Video {
|
||||
group: usize,
|
||||
index: usize,
|
||||
id: Uuid,
|
||||
},
|
||||
Audio {
|
||||
group: usize,
|
||||
index: usize,
|
||||
id: Uuid,
|
||||
},
|
||||
Subtitle {
|
||||
group: usize,
|
||||
index: usize,
|
||||
id: Uuid,
|
||||
},
|
||||
}
|
||||
|
||||
impl HlsVariantStream {
|
||||
pub fn id(&self) -> &Uuid {
|
||||
match self {
|
||||
HlsVariantStream::Video { id, .. } => id,
|
||||
HlsVariantStream::Audio { id, .. } => id,
|
||||
HlsVariantStream::Subtitle { id, .. } => id,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn index(&self) -> &usize {
|
||||
match self {
|
||||
HlsVariantStream::Video { index, .. } => index,
|
||||
HlsVariantStream::Audio { index, .. } => index,
|
||||
HlsVariantStream::Subtitle { index, .. } => index,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for HlsVariantStream {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
HlsVariantStream::Video { index, .. } => write!(f, "v:{}", index),
|
||||
HlsVariantStream::Audio { index, .. } => write!(f, "a:{}", index),
|
||||
HlsVariantStream::Subtitle { index, .. } => write!(f, "s:{}", index),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HlsVariant {
|
||||
/// Name of this variant (720p)
|
||||
pub name: String,
|
||||
/// MPEG-TS muxer for this variant
|
||||
pub mux: Muxer,
|
||||
/// List of streams ids in this variant
|
||||
pub streams: Vec<HlsVariantStream>,
|
||||
/// Segment length in seconds
|
||||
pub segment_length: f32,
|
||||
/// Current segment index
|
||||
pub idx: u64,
|
||||
/// Current segment start time in seconds (duration)
|
||||
pub pkt_start: f32,
|
||||
/// Output directory (base)
|
||||
pub out_dir: String,
|
||||
/// List of segments to be included in the playlist
|
||||
pub segments: Vec<SegmentInfo>,
|
||||
/// Type of segments to create
|
||||
pub segment_type: SegmentType,
|
||||
}
|
||||
|
||||
struct SegmentInfo(u64, f32, SegmentType);
|
||||
|
||||
impl SegmentInfo {
|
||||
fn to_media_segment(&self) -> MediaSegment {
|
||||
MediaSegment {
|
||||
uri: self.filename(),
|
||||
duration: self.1,
|
||||
title: None,
|
||||
..MediaSegment::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn filename(&self) -> String {
|
||||
HlsVariant::segment_name(self.2, self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl HlsVariant {
|
||||
pub fn new<'a>(
|
||||
out_dir: &'a str,
|
||||
segment_length: f32,
|
||||
group: usize,
|
||||
encoded_vars: impl Iterator<Item = (&'a VariantStream, &'a Encoder)>,
|
||||
segment_type: SegmentType,
|
||||
) -> Result<Self> {
|
||||
let name = format!("stream_{}", group);
|
||||
let first_seg = Self::map_segment_path(out_dir, &name, 1, segment_type);
|
||||
std::fs::create_dir_all(PathBuf::from(&first_seg).parent().unwrap())?;
|
||||
|
||||
let mut opts = HashMap::new();
|
||||
if let SegmentType::FMP4 = segment_type {
|
||||
opts.insert("fflags".to_string(), "-autobsf".to_string());
|
||||
opts.insert(
|
||||
"movflags".to_string(),
|
||||
"+frag_custom+dash+delay_moov".to_string(),
|
||||
);
|
||||
};
|
||||
let mut mux = unsafe {
|
||||
Muxer::builder()
|
||||
.with_output_path(
|
||||
first_seg.as_str(),
|
||||
match segment_type {
|
||||
SegmentType::MPEGTS => Some("mpegts"),
|
||||
SegmentType::FMP4 => Some("mp4"),
|
||||
},
|
||||
)?
|
||||
.build()?
|
||||
};
|
||||
let mut streams = Vec::new();
|
||||
for (var, enc) in encoded_vars {
|
||||
match var {
|
||||
VariantStream::Video(v) => unsafe {
|
||||
let stream = mux.add_stream_encoder(enc)?;
|
||||
streams.push(HlsVariantStream::Video {
|
||||
group,
|
||||
index: (*stream).index as usize,
|
||||
id: v.id(),
|
||||
})
|
||||
},
|
||||
VariantStream::Audio(a) => unsafe {
|
||||
let stream = mux.add_stream_encoder(enc)?;
|
||||
streams.push(HlsVariantStream::Audio {
|
||||
group,
|
||||
index: (*stream).index as usize,
|
||||
id: a.id(),
|
||||
})
|
||||
},
|
||||
VariantStream::Subtitle(s) => unsafe {
|
||||
let stream = mux.add_stream_encoder(enc)?;
|
||||
streams.push(HlsVariantStream::Subtitle {
|
||||
group,
|
||||
index: (*stream).index as usize,
|
||||
id: s.id(),
|
||||
})
|
||||
},
|
||||
_ => panic!("unsupported variant stream"),
|
||||
}
|
||||
}
|
||||
unsafe {
|
||||
mux.open(Some(opts))?;
|
||||
}
|
||||
Ok(Self {
|
||||
name: name.clone(),
|
||||
segment_length,
|
||||
mux,
|
||||
streams,
|
||||
idx: 1,
|
||||
pkt_start: 0.0,
|
||||
segments: Vec::from([SegmentInfo(1, segment_length, segment_type)]),
|
||||
out_dir: out_dir.to_string(),
|
||||
segment_type,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn segment_name(t: SegmentType, idx: u64) -> String {
|
||||
match t {
|
||||
SegmentType::MPEGTS => format!("{}.ts", idx),
|
||||
SegmentType::FMP4 => format!("{}.m4s", idx),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn out_dir(&self) -> PathBuf {
|
||||
PathBuf::from(&self.out_dir).join(&self.name)
|
||||
}
|
||||
|
||||
pub fn map_segment_path(out_dir: &str, name: &str, idx: u64, typ: SegmentType) -> String {
|
||||
PathBuf::from(out_dir)
|
||||
.join(name)
|
||||
.join(Self::segment_name(typ, idx))
|
||||
.to_string_lossy()
|
||||
.to_string()
|
||||
}
|
||||
|
||||
/// Mux a packet created by the encoder for this variant
|
||||
pub unsafe fn mux_packet(&mut self, pkt: *mut AVPacket) -> Result<Option<NewSegment>> {
|
||||
let pkt_q = av_q2d((*pkt).time_base);
|
||||
// time of this packet in seconds
|
||||
let pkt_time = (*pkt).pts as f32 * pkt_q as f32;
|
||||
// what segment this pkt should be in (index)
|
||||
let pkt_seg = 1 + (pkt_time / self.segment_length).floor() as u64;
|
||||
|
||||
let mut result = None;
|
||||
let pkt_stream = *(*self.mux.context())
|
||||
.streams
|
||||
.add((*pkt).stream_index as usize);
|
||||
let can_split = (*pkt).flags & AV_PKT_FLAG_KEY == AV_PKT_FLAG_KEY
|
||||
&& (*(*pkt_stream).codecpar).codec_type == AVMEDIA_TYPE_VIDEO;
|
||||
if pkt_seg != self.idx && can_split {
|
||||
result = Some(self.split_next_seg(pkt_time)?);
|
||||
}
|
||||
self.mux.write_packet(pkt)?;
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub unsafe fn reset(&mut self) -> Result<()> {
|
||||
self.mux.close()
|
||||
}
|
||||
|
||||
unsafe fn split_next_seg(&mut self, pkt_time: f32) -> Result<NewSegment> {
|
||||
self.idx += 1;
|
||||
|
||||
// Manually reset muxer avio
|
||||
let ctx = self.mux.context();
|
||||
av_write_frame(ctx, ptr::null_mut());
|
||||
avio_flush((*ctx).pb);
|
||||
av_free((*ctx).url as *mut _);
|
||||
|
||||
let next_seg_url =
|
||||
Self::map_segment_path(&self.out_dir, &self.name, self.idx, self.segment_type);
|
||||
(*ctx).url = cstr!(next_seg_url.as_str());
|
||||
|
||||
let ret = avio_open(&mut (*ctx).pb, (*ctx).url, AVIO_FLAG_WRITE);
|
||||
if ret < 0 {
|
||||
bail!("Failed to re-init avio");
|
||||
}
|
||||
|
||||
// tell muxer it needs to write headers again
|
||||
av_opt_set(
|
||||
(*ctx).priv_data,
|
||||
cstr!("events_flags"),
|
||||
cstr!("resend_headers"),
|
||||
0,
|
||||
);
|
||||
|
||||
let duration = pkt_time - self.pkt_start;
|
||||
info!("Writing segment {} [{}s]", &next_seg_url, duration);
|
||||
if let Err(e) = self.add_segment(self.idx, duration) {
|
||||
warn!("Failed to update playlist: {}", e);
|
||||
}
|
||||
|
||||
/// Get the video variant for this group
|
||||
/// since this could actually be audio which would not be useful for
|
||||
/// [Overseer] impl
|
||||
let video_var = self.video_stream().unwrap_or(self.streams.first().unwrap());
|
||||
|
||||
// emit result of the previously completed segment,
|
||||
let prev_seg = self.idx - 1;
|
||||
let ret = NewSegment {
|
||||
variant: *video_var.id(),
|
||||
idx: prev_seg,
|
||||
duration,
|
||||
path: PathBuf::from(Self::map_segment_path(
|
||||
&self.out_dir,
|
||||
&self.name,
|
||||
prev_seg,
|
||||
self.segment_type,
|
||||
)),
|
||||
};
|
||||
self.pkt_start = pkt_time;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn video_stream(&self) -> Option<&HlsVariantStream> {
|
||||
self.streams
|
||||
.iter()
|
||||
.find(|a| matches!(*a, HlsVariantStream::Video { .. }))
|
||||
}
|
||||
|
||||
fn add_segment(&mut self, idx: u64, duration: f32) -> Result<()> {
|
||||
self.segments
|
||||
.push(SegmentInfo(idx, duration, self.segment_type));
|
||||
|
||||
const MAX_SEGMENTS: usize = 10;
|
||||
|
||||
if self.segments.len() > MAX_SEGMENTS {
|
||||
let n_drain = self.segments.len() - MAX_SEGMENTS;
|
||||
let seg_dir = self.out_dir();
|
||||
for seg in self.segments.drain(..n_drain) {
|
||||
// delete file
|
||||
let seg_path = seg_dir.join(seg.filename());
|
||||
std::fs::remove_file(seg_path)?;
|
||||
}
|
||||
}
|
||||
self.write_playlist()
|
||||
}
|
||||
|
||||
fn write_playlist(&mut self) -> Result<()> {
|
||||
let mut pl = m3u8_rs::MediaPlaylist::default();
|
||||
pl.target_duration = self.segment_length as u64;
|
||||
pl.segments = self.segments.iter().map(|s| s.to_media_segment()).collect();
|
||||
pl.version = Some(3);
|
||||
pl.media_sequence = self.segments.first().map(|s| s.0).unwrap_or(0);
|
||||
|
||||
let mut f_out = File::create(self.out_dir().join("live.m3u8"))?;
|
||||
pl.write_to(&mut f_out)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// https://git.ffmpeg.org/gitweb/ffmpeg.git/blob/HEAD:/libavformat/hlsenc.c#l351
|
||||
unsafe fn to_codec_attr(&self, stream: *mut AVStream) -> Option<String> {
|
||||
let p = (*stream).codecpar;
|
||||
if (*p).codec_id == AV_CODEC_ID_H264 {
|
||||
let data = (*p).extradata;
|
||||
if !data.is_null() {
|
||||
let mut id_ptr = ptr::null_mut();
|
||||
let ds: *mut u16 = data as *mut u16;
|
||||
if (*ds) == 1 && (*data.add(4)) & 0x1F == 7 {
|
||||
id_ptr = data.add(5);
|
||||
} else if (*ds) == 1 && (*data.add(3)) & 0x1F == 7 {
|
||||
id_ptr = data.add(4);
|
||||
} else if *data.add(0) == 1 {
|
||||
id_ptr = data.add(1);
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
|
||||
return Some(format!(
|
||||
"avc1.{}",
|
||||
hex::encode([*id_ptr.add(0), *id_ptr.add(1), *id_ptr.add(2)])
|
||||
));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn to_playlist_variant(&self) -> m3u8_rs::VariantStream {
|
||||
unsafe {
|
||||
let pes = self.video_stream().unwrap_or(self.streams.first().unwrap());
|
||||
let av_stream = *(*self.mux.context()).streams.add(*pes.index());
|
||||
let codec_par = (*av_stream).codecpar;
|
||||
m3u8_rs::VariantStream {
|
||||
is_i_frame: false,
|
||||
uri: format!("{}/live.m3u8", self.name),
|
||||
bandwidth: 0,
|
||||
average_bandwidth: Some((*codec_par).bit_rate as u64),
|
||||
codecs: self.to_codec_attr(av_stream),
|
||||
resolution: Some(m3u8_rs::Resolution {
|
||||
width: (*codec_par).width as _,
|
||||
height: (*codec_par).height as _,
|
||||
}),
|
||||
frame_rate: Some(av_q2d((*codec_par).framerate)),
|
||||
hdcp_level: None,
|
||||
audio: None,
|
||||
video: None,
|
||||
subtitles: None,
|
||||
closed_captions: None,
|
||||
other_attributes: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HlsMuxer {
|
||||
pub out_dir: PathBuf,
|
||||
pub variants: Vec<HlsVariant>,
|
||||
}
|
||||
|
||||
impl HlsMuxer {
|
||||
pub fn new<'a>(
|
||||
id: &Uuid,
|
||||
out_dir: &str,
|
||||
segment_length: f32,
|
||||
encoders: impl Iterator<Item = (&'a VariantStream, &'a Encoder)>,
|
||||
segment_type: SegmentType,
|
||||
) -> Result<Self> {
|
||||
let base = PathBuf::from(out_dir).join(id.to_string());
|
||||
|
||||
let mut vars = Vec::new();
|
||||
for (k, group) in &encoders
|
||||
.sorted_by(|a, b| a.0.group_id().cmp(&b.0.group_id()))
|
||||
.chunk_by(|a| a.0.group_id())
|
||||
{
|
||||
let var = HlsVariant::new(
|
||||
base.to_str().unwrap(),
|
||||
segment_length,
|
||||
k,
|
||||
group,
|
||||
segment_type,
|
||||
)?;
|
||||
vars.push(var);
|
||||
}
|
||||
|
||||
let ret = Self {
|
||||
out_dir: base,
|
||||
variants: vars,
|
||||
};
|
||||
ret.write_master_playlist()?;
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
fn write_master_playlist(&self) -> Result<()> {
|
||||
let mut pl = m3u8_rs::MasterPlaylist::default();
|
||||
pl.version = Some(3);
|
||||
pl.variants = self
|
||||
.variants
|
||||
.iter()
|
||||
.map(|v| v.to_playlist_variant())
|
||||
.collect();
|
||||
|
||||
let mut f_out = File::create(self.out_dir.join("live.m3u8"))?;
|
||||
pl.write_to(&mut f_out)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Mux an encoded packet from [Encoder]
|
||||
pub unsafe fn mux_packet(
|
||||
&mut self,
|
||||
pkt: *mut AVPacket,
|
||||
variant: &Uuid,
|
||||
) -> Result<Option<NewSegment>> {
|
||||
for var in self.variants.iter_mut() {
|
||||
if let Some(vs) = var.streams.iter().find(|s| s.id() == variant) {
|
||||
// very important for muxer to know which stream this pkt belongs to
|
||||
(*pkt).stream_index = *vs.index() as _;
|
||||
return var.mux_packet(pkt);
|
||||
}
|
||||
}
|
||||
bail!("Packet doesnt match any variants");
|
||||
}
|
||||
}
|
2
crates/core/src/mux/mod.rs
Normal file
2
crates/core/src/mux/mod.rs
Normal file
@ -0,0 +1,2 @@
|
||||
mod hls;
|
||||
pub use hls::*;
|
71
crates/core/src/overseer/local.rs
Normal file
71
crates/core/src/overseer/local.rs
Normal file
@ -0,0 +1,71 @@
|
||||
use crate::egress::EgressConfig;
|
||||
use crate::ingress::ConnectionInfo;
|
||||
use crate::overseer::{get_default_variants, IngressInfo, Overseer};
|
||||
use crate::pipeline::{EgressType, PipelineConfig};
|
||||
use crate::variant::StreamMapping;
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Simple static file output without any access controls
|
||||
/// Useful for testing or self-hosting
|
||||
pub struct LocalOverseer;
|
||||
|
||||
impl LocalOverseer {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Overseer for LocalOverseer {
|
||||
async fn check_streams(&self) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn start_stream(
|
||||
&self,
|
||||
_connection: &ConnectionInfo,
|
||||
stream_info: &IngressInfo,
|
||||
) -> Result<PipelineConfig> {
|
||||
let vars = get_default_variants(stream_info)?;
|
||||
let var_ids = vars.iter().map(|v| v.id()).collect();
|
||||
Ok(PipelineConfig {
|
||||
id: Uuid::new_v4(),
|
||||
variants: vars,
|
||||
egress: vec![EgressType::HLS(EgressConfig {
|
||||
name: "HLS".to_owned(),
|
||||
variants: var_ids,
|
||||
})],
|
||||
})
|
||||
}
|
||||
|
||||
async fn on_segment(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
variant_id: &Uuid,
|
||||
index: u64,
|
||||
duration: f32,
|
||||
path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
// nothing to do here
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_thumbnail(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
width: usize,
|
||||
height: usize,
|
||||
path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
// nothing to do here
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_end(&self, pipeline_id: &Uuid) -> Result<()> {
|
||||
// nothing to do here
|
||||
Ok(())
|
||||
}
|
||||
}
|
84
crates/core/src/overseer/mod.rs
Normal file
84
crates/core/src/overseer/mod.rs
Normal file
@ -0,0 +1,84 @@
|
||||
use crate::ingress::ConnectionInfo;
|
||||
|
||||
use crate::pipeline::PipelineConfig;
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use std::cmp::PartialEq;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(feature = "local-overseer")]
|
||||
mod local;
|
||||
|
||||
#[cfg(feature = "webhook-overseer")]
|
||||
mod webhook;
|
||||
|
||||
#[cfg(feature = "zap-stream")]
|
||||
mod zap_stream;
|
||||
|
||||
/// A copy of [ffmpeg_rs_raw::DemuxerInfo] without internal ptr
|
||||
#[derive(PartialEq, Clone)]
|
||||
pub struct IngressInfo {
|
||||
pub bitrate: usize,
|
||||
pub streams: Vec<IngressStream>,
|
||||
}
|
||||
|
||||
/// A copy of [ffmpeg_rs_raw::StreamInfo] without ptr
|
||||
#[derive(PartialEq, Clone)]
|
||||
pub struct IngressStream {
|
||||
pub index: usize,
|
||||
pub stream_type: IngressStreamType,
|
||||
pub codec: isize,
|
||||
pub format: isize,
|
||||
pub width: usize,
|
||||
pub height: usize,
|
||||
pub fps: f32,
|
||||
pub sample_rate: usize,
|
||||
pub language: String,
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Clone)]
|
||||
pub enum IngressStreamType {
|
||||
Video,
|
||||
Audio,
|
||||
Subtitle,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
/// The control process that oversees streaming operations
|
||||
pub trait Overseer: Send + Sync {
|
||||
/// Check all streams
|
||||
async fn check_streams(&self) -> Result<()>;
|
||||
|
||||
/// Set up a new streaming pipeline
|
||||
async fn start_stream(
|
||||
&self,
|
||||
connection: &ConnectionInfo,
|
||||
stream_info: &IngressInfo,
|
||||
) -> Result<PipelineConfig>;
|
||||
|
||||
/// A new segment (HLS etc.) was generated for a stream variant
|
||||
///
|
||||
/// This handler is usually used for distribution / billing
|
||||
async fn on_segment(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
variant_id: &Uuid,
|
||||
index: u64,
|
||||
duration: f32,
|
||||
path: &PathBuf,
|
||||
) -> Result<()>;
|
||||
|
||||
/// At a regular interval, pipeline will emit one of the frames for processing as a
|
||||
/// thumbnail
|
||||
async fn on_thumbnail(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
width: usize,
|
||||
height: usize,
|
||||
path: &PathBuf,
|
||||
) -> Result<()>;
|
||||
|
||||
/// Stream is finished
|
||||
async fn on_end(&self, pipeline_id: &Uuid) -> Result<()>;
|
||||
}
|
60
crates/core/src/overseer/webhook.rs
Normal file
60
crates/core/src/overseer/webhook.rs
Normal file
@ -0,0 +1,60 @@
|
||||
use crate::ingress::ConnectionInfo;
|
||||
use crate::overseer::{IngressInfo, Overseer};
|
||||
use crate::pipeline::PipelineConfig;
|
||||
use anyhow::Result;
|
||||
use async_trait::async_trait;
|
||||
use std::path::PathBuf;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct WebhookOverseer {
|
||||
url: String,
|
||||
}
|
||||
|
||||
impl WebhookOverseer {
|
||||
pub fn new(url: &str) -> Self {
|
||||
Self {
|
||||
url: url.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Overseer for WebhookOverseer {
|
||||
async fn check_streams(&self) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn start_stream(
|
||||
&self,
|
||||
connection: &ConnectionInfo,
|
||||
stream_info: &IngressInfo,
|
||||
) -> Result<PipelineConfig> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn on_segment(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
variant_id: &Uuid,
|
||||
index: u64,
|
||||
duration: f32,
|
||||
path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn on_thumbnail(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
width: usize,
|
||||
height: usize,
|
||||
path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn on_end(&self, pipeline_id: &Uuid) -> Result<()> {
|
||||
todo!()
|
||||
}
|
||||
}
|
66
crates/core/src/pipeline/mod.rs
Normal file
66
crates/core/src/pipeline/mod.rs
Normal file
@ -0,0 +1,66 @@
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
use crate::egress::EgressConfig;
|
||||
use crate::variant::VariantStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub mod runner;
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub enum EgressType {
|
||||
/// HLS output egress
|
||||
HLS(EgressConfig),
|
||||
|
||||
/// Record streams to local disk
|
||||
Recorder(EgressConfig),
|
||||
|
||||
/// Forward streams to another RTMP server
|
||||
RTMPForwarder(EgressConfig),
|
||||
}
|
||||
|
||||
impl EgressType {
|
||||
pub fn config(&self) -> &EgressConfig {
|
||||
match self {
|
||||
EgressType::HLS(c) => c,
|
||||
EgressType::Recorder(c) => c,
|
||||
EgressType::RTMPForwarder(c) => c,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for EgressType {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
EgressType::HLS(_) => write!(f, "HLS"),
|
||||
EgressType::Recorder(_) => write!(f, "Recorder"),
|
||||
EgressType::RTMPForwarder(_) => write!(f, "RTMPForwarder"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
||||
pub struct PipelineConfig {
|
||||
pub id: Uuid,
|
||||
/// Transcoded/Copied stream config
|
||||
pub variants: Vec<VariantStream>,
|
||||
/// Output muxers
|
||||
pub egress: Vec<EgressType>,
|
||||
}
|
||||
|
||||
impl Display for PipelineConfig {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "\nPipeline Config ID={}", self.id)?;
|
||||
write!(f, "\nVariants:")?;
|
||||
for v in &self.variants {
|
||||
write!(f, "\n\t{}", v)?;
|
||||
}
|
||||
if !self.egress.is_empty() {
|
||||
write!(f, "\nEgress:")?;
|
||||
for e in &self.egress {
|
||||
write!(f, "\n\t{}", e)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
406
crates/core/src/pipeline/runner.rs
Normal file
406
crates/core/src/pipeline/runner.rs
Normal file
@ -0,0 +1,406 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::io::Read;
|
||||
use std::mem::transmute;
|
||||
use std::ops::Sub;
|
||||
use std::path::PathBuf;
|
||||
use std::ptr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::egress::hls::HlsEgress;
|
||||
use crate::egress::recorder::RecorderEgress;
|
||||
use crate::egress::{Egress, EgressResult};
|
||||
use crate::ingress::ConnectionInfo;
|
||||
use crate::mux::SegmentType;
|
||||
use crate::overseer::{IngressInfo, IngressStream, IngressStreamType, Overseer};
|
||||
use crate::pipeline::{EgressType, PipelineConfig};
|
||||
use crate::variant::{StreamMapping, VariantStream};
|
||||
use anyhow::{bail, Result};
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_WEBP;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
||||
av_frame_free, av_get_sample_fmt, av_packet_free, av_q2d, av_rescale_q, AVMediaType,
|
||||
};
|
||||
use ffmpeg_rs_raw::{
|
||||
cstr, get_frame_from_hw, AudioFifo, Decoder, Demuxer, DemuxerInfo, Encoder, Resample, Scaler,
|
||||
StreamType,
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use log::{error, info, warn};
|
||||
use tokio::runtime::Handle;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Pipeline runner is the main entry process for stream transcoding
|
||||
///
|
||||
/// Each client connection spawns a new [PipelineRunner] and it should be run in its own thread
|
||||
/// using [crate::ingress::spawn_pipeline]
|
||||
pub struct PipelineRunner {
|
||||
/// Async runtime handle
|
||||
handle: Handle,
|
||||
|
||||
/// Input stream connection info
|
||||
connection: ConnectionInfo,
|
||||
|
||||
/// Configuration for this pipeline (variants, egress config etc.)
|
||||
config: Option<PipelineConfig>,
|
||||
|
||||
/// Singleton demuxer for this input
|
||||
demuxer: Demuxer,
|
||||
|
||||
/// Singleton decoder for all stream
|
||||
decoder: Decoder,
|
||||
|
||||
/// Scaler for a variant (variant_id, Scaler)
|
||||
scalers: HashMap<Uuid, Scaler>,
|
||||
|
||||
/// Resampler for a variant (variant_id, Resample+FIFO)
|
||||
resampler: HashMap<Uuid, (Resample, AudioFifo)>,
|
||||
|
||||
/// Encoder for a variant (variant_id, Encoder)
|
||||
encoders: HashMap<Uuid, Encoder>,
|
||||
|
||||
/// Simple mapping to copy streams
|
||||
copy_stream: HashMap<Uuid, Uuid>,
|
||||
|
||||
/// All configured egress'
|
||||
egress: Vec<Box<dyn Egress>>,
|
||||
|
||||
/// Info about the input stream
|
||||
info: Option<IngressInfo>,
|
||||
|
||||
/// Overseer managing this pipeline
|
||||
overseer: Arc<dyn Overseer>,
|
||||
|
||||
fps_counter_start: Instant,
|
||||
fps_last_frame_ctr: u64,
|
||||
|
||||
/// Total number of frames produced
|
||||
frame_ctr: u64,
|
||||
out_dir: String,
|
||||
}
|
||||
|
||||
impl PipelineRunner {
|
||||
pub fn new(
|
||||
handle: Handle,
|
||||
out_dir: String,
|
||||
overseer: Arc<dyn Overseer>,
|
||||
connection: ConnectionInfo,
|
||||
recv: Box<dyn Read + Send>,
|
||||
) -> Result<Self> {
|
||||
Ok(Self {
|
||||
handle,
|
||||
out_dir,
|
||||
overseer,
|
||||
connection,
|
||||
config: Default::default(),
|
||||
demuxer: Demuxer::new_custom_io(recv, None)?,
|
||||
decoder: Decoder::new(),
|
||||
scalers: Default::default(),
|
||||
resampler: Default::default(),
|
||||
encoders: Default::default(),
|
||||
copy_stream: Default::default(),
|
||||
fps_counter_start: Instant::now(),
|
||||
egress: Vec::new(),
|
||||
frame_ctr: 0,
|
||||
fps_last_frame_ctr: 0,
|
||||
info: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// EOF, cleanup
|
||||
pub unsafe fn flush(&mut self) -> Result<()> {
|
||||
for (var, enc) in &mut self.encoders {
|
||||
for mut pkt in enc.encode_frame(ptr::null_mut())? {
|
||||
for eg in self.egress.iter_mut() {
|
||||
eg.process_pkt(pkt, var)?;
|
||||
}
|
||||
av_packet_free(&mut pkt);
|
||||
}
|
||||
}
|
||||
for eg in self.egress.iter_mut() {
|
||||
eg.reset()?;
|
||||
}
|
||||
|
||||
if let Some(config) = &self.config {
|
||||
self.handle.block_on(async {
|
||||
if let Err(e) = self.overseer.on_end(&config.id).await {
|
||||
error!("Failed to end stream: {e}");
|
||||
}
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Main processor, should be called in a loop
|
||||
/// Returns false when stream data ended (EOF)
|
||||
pub unsafe fn run(&mut self) -> Result<bool> {
|
||||
self.setup()?;
|
||||
|
||||
let config = if let Some(config) = &self.config {
|
||||
config
|
||||
} else {
|
||||
bail!("Pipeline not configured, cannot run")
|
||||
};
|
||||
|
||||
// run transcoder pipeline
|
||||
let (mut pkt, stream) = self.demuxer.get_packet()?;
|
||||
if pkt.is_null() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// TODO: For copy streams, skip decoder
|
||||
let frames = match self.decoder.decode_pkt(pkt) {
|
||||
Ok(f) => f,
|
||||
Err(e) => {
|
||||
warn!("Error decoding frames, {e}");
|
||||
return Ok(true);
|
||||
}
|
||||
};
|
||||
|
||||
let mut egress_results = vec![];
|
||||
for frame in frames {
|
||||
// Copy frame from GPU if using hwaccel decoding
|
||||
let mut frame = get_frame_from_hw(frame)?;
|
||||
(*frame).time_base = (*stream).time_base;
|
||||
|
||||
let p = (*stream).codecpar;
|
||||
if (*p).codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO {
|
||||
if (self.frame_ctr % 1800) == 0 {
|
||||
let dst_pic = PathBuf::from(&self.out_dir)
|
||||
.join(config.id.to_string())
|
||||
.join("thumb.webp");
|
||||
let mut sw = Scaler::new();
|
||||
let mut frame = sw.process_frame(
|
||||
frame,
|
||||
(*frame).width as _,
|
||||
(*frame).height as _,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
)?;
|
||||
Encoder::new(AV_CODEC_ID_WEBP)?
|
||||
.with_height((*frame).height)
|
||||
.with_width((*frame).width)
|
||||
.with_pix_fmt(transmute((*frame).format))
|
||||
.open(None)?
|
||||
.save_picture(frame, dst_pic.to_str().unwrap())?;
|
||||
info!("Saved thumb to: {}", dst_pic.display());
|
||||
av_frame_free(&mut frame);
|
||||
}
|
||||
|
||||
// TODO: fix this, multiple video streams in
|
||||
self.frame_ctr += 1;
|
||||
}
|
||||
|
||||
// Get the variants which want this pkt
|
||||
let pkt_vars = config
|
||||
.variants
|
||||
.iter()
|
||||
.filter(|v| v.src_index() == (*stream).index as usize);
|
||||
for var in pkt_vars {
|
||||
let enc = if let Some(enc) = self.encoders.get_mut(&var.id()) {
|
||||
enc
|
||||
} else {
|
||||
//warn!("Frame had nowhere to go in {} :/", var.id());
|
||||
continue;
|
||||
};
|
||||
// before encoding frame, rescale timestamps
|
||||
if !frame.is_null() {
|
||||
let enc_ctx = enc.codec_context();
|
||||
(*frame).pict_type = AV_PICTURE_TYPE_NONE;
|
||||
(*frame).pts =
|
||||
av_rescale_q((*frame).pts, (*frame).time_base, (*enc_ctx).time_base);
|
||||
(*frame).pkt_dts =
|
||||
av_rescale_q((*frame).pkt_dts, (*frame).time_base, (*enc_ctx).time_base);
|
||||
(*frame).duration =
|
||||
av_rescale_q((*frame).duration, (*frame).time_base, (*enc_ctx).time_base);
|
||||
(*frame).time_base = (*enc_ctx).time_base;
|
||||
}
|
||||
|
||||
let mut new_frame = false;
|
||||
let mut frame = match var {
|
||||
VariantStream::Video(v) => {
|
||||
if let Some(s) = self.scalers.get_mut(&v.id()) {
|
||||
new_frame = true;
|
||||
s.process_frame(frame, v.width, v.height, transmute(v.pixel_format))?
|
||||
} else {
|
||||
frame
|
||||
}
|
||||
}
|
||||
VariantStream::Audio(a) => {
|
||||
if let Some((r, f)) = self.resampler.get_mut(&a.id()) {
|
||||
let frame_size = (*enc.codec_context()).frame_size;
|
||||
new_frame = true;
|
||||
let mut resampled_frame = r.process_frame(frame)?;
|
||||
if let Some(ret) =
|
||||
f.buffer_frame(resampled_frame, frame_size as usize)?
|
||||
{
|
||||
av_frame_free(&mut resampled_frame);
|
||||
ret
|
||||
} else {
|
||||
av_frame_free(&mut resampled_frame);
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
frame
|
||||
}
|
||||
}
|
||||
_ => frame,
|
||||
};
|
||||
|
||||
let packets = enc.encode_frame(frame)?;
|
||||
// pass new packets to egress
|
||||
for mut pkt in packets {
|
||||
for eg in self.egress.iter_mut() {
|
||||
let er = eg.process_pkt(pkt, &var.id())?;
|
||||
egress_results.push(er);
|
||||
}
|
||||
av_packet_free(&mut pkt);
|
||||
}
|
||||
|
||||
if new_frame {
|
||||
av_frame_free(&mut frame);
|
||||
}
|
||||
}
|
||||
|
||||
av_frame_free(&mut frame);
|
||||
}
|
||||
|
||||
av_packet_free(&mut pkt);
|
||||
|
||||
// egress results
|
||||
self.handle.block_on(async {
|
||||
for er in egress_results {
|
||||
if let EgressResult::NewSegment(seg) = er {
|
||||
if let Err(e) = self
|
||||
.overseer
|
||||
.on_segment(&config.id, &seg.variant, seg.idx, seg.duration, &seg.path)
|
||||
.await
|
||||
{
|
||||
bail!("Failed to process segment {}", e.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})?;
|
||||
let elapsed = Instant::now().sub(self.fps_counter_start).as_secs_f32();
|
||||
if elapsed >= 2f32 {
|
||||
let n_frames = self.frame_ctr - self.fps_last_frame_ctr;
|
||||
info!("Average fps: {:.2}", n_frames as f32 / elapsed);
|
||||
self.fps_counter_start = Instant::now();
|
||||
self.fps_last_frame_ctr = self.frame_ctr;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
unsafe fn setup(&mut self) -> Result<()> {
|
||||
if self.info.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let info = self.demuxer.probe_input()?;
|
||||
|
||||
// convert to internal type
|
||||
let i_info = IngressInfo {
|
||||
bitrate: info.bitrate,
|
||||
streams: info
|
||||
.streams
|
||||
.iter()
|
||||
.map(|s| IngressStream {
|
||||
index: s.index,
|
||||
stream_type: match s.stream_type {
|
||||
StreamType::Video => IngressStreamType::Video,
|
||||
StreamType::Audio => IngressStreamType::Audio,
|
||||
StreamType::Subtitle => IngressStreamType::Subtitle,
|
||||
},
|
||||
codec: s.codec,
|
||||
format: s.format,
|
||||
width: s.width,
|
||||
height: s.height,
|
||||
fps: s.fps,
|
||||
sample_rate: s.sample_rate,
|
||||
language: s.language.clone(),
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
|
||||
let cfg = self
|
||||
.handle
|
||||
.block_on(async { self.overseer.start_stream(&self.connection, &i_info).await })?;
|
||||
self.config = Some(cfg);
|
||||
self.info = Some(i_info);
|
||||
|
||||
self.setup_pipeline(&info)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
unsafe fn setup_pipeline(&mut self, demux_info: &DemuxerInfo) -> Result<()> {
|
||||
let cfg = if let Some(ref cfg) = self.config {
|
||||
cfg
|
||||
} else {
|
||||
bail!("Cannot setup pipeline without config");
|
||||
};
|
||||
|
||||
// src stream indexes
|
||||
let inputs: HashSet<usize> = cfg.variants.iter().map(|e| e.src_index()).collect();
|
||||
|
||||
// enable hardware decoding
|
||||
self.decoder.enable_hw_decoder_any();
|
||||
|
||||
// setup decoders
|
||||
for input_idx in inputs {
|
||||
let stream = demux_info
|
||||
.streams
|
||||
.iter()
|
||||
.find(|f| f.index == input_idx)
|
||||
.unwrap();
|
||||
self.decoder.setup_decoder(stream, None)?;
|
||||
}
|
||||
|
||||
// setup scaler/encoders
|
||||
for out_stream in &cfg.variants {
|
||||
match out_stream {
|
||||
VariantStream::Video(v) => {
|
||||
self.encoders.insert(out_stream.id(), v.try_into()?);
|
||||
self.scalers.insert(out_stream.id(), Scaler::new());
|
||||
}
|
||||
VariantStream::Audio(a) => {
|
||||
let enc = a.try_into()?;
|
||||
let fmt = av_get_sample_fmt(cstr!(a.sample_fmt.as_str()));
|
||||
let rs = Resample::new(fmt, a.sample_rate as _, a.channels as _);
|
||||
let f = AudioFifo::new(fmt, a.channels as _)?;
|
||||
self.resampler.insert(out_stream.id(), (rs, f));
|
||||
self.encoders.insert(out_stream.id(), enc);
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Setup copy streams
|
||||
|
||||
// Setup egress
|
||||
for e in &cfg.egress {
|
||||
let c = e.config();
|
||||
let encoders = self.encoders.iter().filter_map(|(k, v)| {
|
||||
if c.variants.contains(k) {
|
||||
let var = cfg.variants.iter().find(|x| x.id() == *k)?;
|
||||
Some((var, v))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
match e {
|
||||
EgressType::HLS(_) => {
|
||||
let hls =
|
||||
HlsEgress::new(&cfg.id, &self.out_dir, 2.0, encoders, SegmentType::MPEGTS)?;
|
||||
self.egress.push(Box::new(hls));
|
||||
}
|
||||
EgressType::Recorder(_) => {
|
||||
let rec = RecorderEgress::new(&cfg.id, &self.out_dir, encoders)?;
|
||||
self.egress.push(Box::new(rec));
|
||||
}
|
||||
_ => warn!("{} is not implemented", e),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
79
crates/core/src/variant/audio.rs
Normal file
79
crates/core/src/variant/audio.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::av_get_sample_fmt;
|
||||
use ffmpeg_rs_raw::{cstr, Encoder};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::variant::{StreamMapping, VariantMapping};
|
||||
|
||||
/// Information related to variant streams for a given egress
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AudioVariant {
|
||||
/// Id, Src, Dst
|
||||
pub mapping: VariantMapping,
|
||||
|
||||
/// Bitrate of this stream
|
||||
pub bitrate: u64,
|
||||
|
||||
/// Codec name
|
||||
pub codec: String,
|
||||
|
||||
/// Number of channels
|
||||
pub channels: u16,
|
||||
|
||||
/// Sample rate
|
||||
pub sample_rate: usize,
|
||||
|
||||
/// Sample format as ffmpeg sample format string
|
||||
pub sample_fmt: String,
|
||||
}
|
||||
|
||||
impl Display for AudioVariant {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Audio #{}->{}: {}, {}kbps",
|
||||
self.mapping.src_index,
|
||||
self.mapping.dst_index,
|
||||
self.codec,
|
||||
self.bitrate / 1000
|
||||
)
|
||||
}
|
||||
}
|
||||
impl StreamMapping for AudioVariant {
|
||||
fn id(&self) -> Uuid {
|
||||
self.mapping.id
|
||||
}
|
||||
fn src_index(&self) -> usize {
|
||||
self.mapping.src_index
|
||||
}
|
||||
|
||||
fn dst_index(&self) -> usize {
|
||||
self.mapping.dst_index
|
||||
}
|
||||
|
||||
fn set_dst_index(&mut self, dst: usize) {
|
||||
self.mapping.dst_index = dst;
|
||||
}
|
||||
|
||||
fn group_id(&self) -> usize {
|
||||
self.mapping.group_id
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<Encoder> for &AudioVariant {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> Result<Encoder, Self::Error> {
|
||||
unsafe {
|
||||
let enc = Encoder::new_with_name(&self.codec)?
|
||||
.with_sample_rate(self.sample_rate as _)?
|
||||
.with_bitrate(self.bitrate as _)
|
||||
.with_default_channel_layout(self.channels as _)
|
||||
.with_sample_format(av_get_sample_fmt(cstr!(self.sample_fmt.as_bytes())))
|
||||
.open(None)?;
|
||||
|
||||
Ok(enc)
|
||||
}
|
||||
}
|
||||
}
|
48
crates/core/src/variant/mapping.rs
Normal file
48
crates/core/src/variant/mapping.rs
Normal file
@ -0,0 +1,48 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::variant::StreamMapping;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, Default)]
|
||||
pub struct VariantMapping {
|
||||
/// Unique ID of this variant
|
||||
pub id: Uuid,
|
||||
|
||||
/// Source video stream to use for this variant
|
||||
pub src_index: usize,
|
||||
|
||||
/// Index of this variant stream in the output
|
||||
pub dst_index: usize,
|
||||
|
||||
/// Stream group, groups one or more streams into a variant
|
||||
pub group_id: usize,
|
||||
}
|
||||
|
||||
impl Display for VariantMapping {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "Copy #{}->{}", self.src_index, self.dst_index)
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamMapping for VariantMapping {
|
||||
fn id(&self) -> Uuid {
|
||||
self.id
|
||||
}
|
||||
|
||||
fn src_index(&self) -> usize {
|
||||
self.src_index
|
||||
}
|
||||
|
||||
fn dst_index(&self) -> usize {
|
||||
self.dst_index
|
||||
}
|
||||
|
||||
fn set_dst_index(&mut self, dst: usize) {
|
||||
self.dst_index = dst;
|
||||
}
|
||||
|
||||
fn group_id(&self) -> usize {
|
||||
self.group_id
|
||||
}
|
||||
}
|
113
crates/core/src/variant/mod.rs
Normal file
113
crates/core/src/variant/mod.rs
Normal file
@ -0,0 +1,113 @@
|
||||
use crate::variant::audio::AudioVariant;
|
||||
use crate::variant::mapping::VariantMapping;
|
||||
use crate::variant::video::VideoVariant;
|
||||
use anyhow::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::{Display, Formatter};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub mod audio;
|
||||
pub mod mapping;
|
||||
pub mod video;
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub enum VariantStream {
|
||||
/// Video stream mapping
|
||||
Video(VideoVariant),
|
||||
/// Audio stream mapping
|
||||
Audio(AudioVariant),
|
||||
Subtitle(VariantMapping),
|
||||
/// Copy stream src<>dst stream
|
||||
CopyVideo(VariantMapping),
|
||||
/// Copy stream src<>dst stream
|
||||
CopyAudio(VariantMapping),
|
||||
}
|
||||
|
||||
impl StreamMapping for VariantStream {
|
||||
fn id(&self) -> Uuid {
|
||||
match self {
|
||||
VariantStream::Video(v) => v.id(),
|
||||
VariantStream::Audio(v) => v.id(),
|
||||
VariantStream::Subtitle(v) => v.id(),
|
||||
VariantStream::CopyAudio(v) => v.id(),
|
||||
VariantStream::CopyVideo(v) => v.id(),
|
||||
}
|
||||
}
|
||||
|
||||
fn src_index(&self) -> usize {
|
||||
match self {
|
||||
VariantStream::Video(v) => v.src_index(),
|
||||
VariantStream::Audio(v) => v.src_index(),
|
||||
VariantStream::Subtitle(v) => v.src_index(),
|
||||
VariantStream::CopyAudio(v) => v.src_index(),
|
||||
VariantStream::CopyVideo(v) => v.src_index(),
|
||||
}
|
||||
}
|
||||
|
||||
fn dst_index(&self) -> usize {
|
||||
match self {
|
||||
VariantStream::Video(v) => v.dst_index(),
|
||||
VariantStream::Audio(v) => v.dst_index(),
|
||||
VariantStream::Subtitle(v) => v.dst_index(),
|
||||
VariantStream::CopyAudio(v) => v.dst_index(),
|
||||
VariantStream::CopyVideo(v) => v.dst_index(),
|
||||
}
|
||||
}
|
||||
|
||||
fn set_dst_index(&mut self, dst: usize) {
|
||||
match self {
|
||||
VariantStream::Video(v) => v.set_dst_index(dst),
|
||||
VariantStream::Audio(v) => v.set_dst_index(dst),
|
||||
VariantStream::Subtitle(v) => v.set_dst_index(dst),
|
||||
VariantStream::CopyAudio(v) => v.set_dst_index(dst),
|
||||
VariantStream::CopyVideo(v) => v.set_dst_index(dst),
|
||||
}
|
||||
}
|
||||
|
||||
fn group_id(&self) -> usize {
|
||||
match self {
|
||||
VariantStream::Video(v) => v.group_id(),
|
||||
VariantStream::Audio(v) => v.group_id(),
|
||||
VariantStream::Subtitle(v) => v.group_id(),
|
||||
VariantStream::CopyAudio(v) => v.group_id(),
|
||||
VariantStream::CopyVideo(v) => v.group_id(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for VariantStream {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VariantStream::Video(v) => write!(f, "{}", v),
|
||||
VariantStream::Audio(a) => write!(f, "{}", a),
|
||||
VariantStream::Subtitle(s) => write!(f, "{}", s),
|
||||
VariantStream::CopyVideo(c) => write!(f, "{}", c),
|
||||
VariantStream::CopyAudio(c) => write!(f, "{}", c),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait StreamMapping {
|
||||
fn id(&self) -> Uuid;
|
||||
fn src_index(&self) -> usize;
|
||||
fn dst_index(&self) -> usize;
|
||||
fn set_dst_index(&mut self, dst: usize);
|
||||
fn group_id(&self) -> usize;
|
||||
}
|
||||
|
||||
/// Find a stream by ID in a vec of streams
|
||||
pub fn find_stream<'a>(
|
||||
config: &'a Vec<VariantStream>,
|
||||
id: &Uuid,
|
||||
) -> Result<&'a VariantStream, Error> {
|
||||
config
|
||||
.iter()
|
||||
.find(|x| match x {
|
||||
VariantStream::Video(v) => v.id() == *id,
|
||||
VariantStream::Audio(a) => a.id() == *id,
|
||||
VariantStream::Subtitle(v) => v.id() == *id,
|
||||
VariantStream::CopyVideo(c) => c.id() == *id,
|
||||
VariantStream::CopyAudio(c) => c.id() == *id,
|
||||
})
|
||||
.ok_or(Error::msg("Variant does not exist"))
|
||||
}
|
111
crates/core/src/variant/video.rs
Normal file
111
crates/core/src/variant/video.rs
Normal file
@ -0,0 +1,111 @@
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVColorSpace::AVCOL_SPC_BT709;
|
||||
use ffmpeg_rs_raw::Encoder;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::{Display, Formatter};
|
||||
use std::intrinsics::transmute;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::variant::{StreamMapping, VariantMapping};
|
||||
|
||||
/// Information related to variant streams for a given egress
|
||||
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct VideoVariant {
|
||||
/// Id, Src, Dst
|
||||
pub mapping: VariantMapping,
|
||||
|
||||
/// Width of this video stream
|
||||
pub width: u16,
|
||||
|
||||
/// Height of this video stream
|
||||
pub height: u16,
|
||||
|
||||
/// FPS for this stream
|
||||
pub fps: f32,
|
||||
|
||||
/// Bitrate of this stream
|
||||
pub bitrate: u64,
|
||||
|
||||
/// Codec name
|
||||
pub codec: String,
|
||||
|
||||
/// Codec profile
|
||||
pub profile: usize,
|
||||
|
||||
/// Codec level
|
||||
pub level: usize,
|
||||
|
||||
/// Keyframe interval in frames
|
||||
pub keyframe_interval: u16,
|
||||
|
||||
/// Pixel Format
|
||||
pub pixel_format: u32,
|
||||
}
|
||||
|
||||
impl Display for VideoVariant {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Video #{}->{}: {}, {}x{}, {}fps, {}kbps",
|
||||
self.mapping.src_index,
|
||||
self.mapping.dst_index,
|
||||
self.codec,
|
||||
self.width,
|
||||
self.height,
|
||||
self.fps,
|
||||
self.bitrate / 1000
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl StreamMapping for VideoVariant {
|
||||
fn id(&self) -> Uuid {
|
||||
self.mapping.id
|
||||
}
|
||||
fn src_index(&self) -> usize {
|
||||
self.mapping.src_index
|
||||
}
|
||||
|
||||
fn dst_index(&self) -> usize {
|
||||
self.mapping.dst_index
|
||||
}
|
||||
|
||||
fn set_dst_index(&mut self, dst: usize) {
|
||||
self.mapping.dst_index = dst;
|
||||
}
|
||||
|
||||
fn group_id(&self) -> usize {
|
||||
self.mapping.group_id
|
||||
}
|
||||
}
|
||||
|
||||
impl TryInto<Encoder> for &VideoVariant {
|
||||
type Error = anyhow::Error;
|
||||
|
||||
fn try_into(self) -> Result<Encoder, Self::Error> {
|
||||
unsafe {
|
||||
let mut opt = HashMap::new();
|
||||
if self.codec == "x264" {
|
||||
opt.insert("preset".to_string(), "fast".to_string());
|
||||
//opt.insert("tune".to_string(), "zerolatency".to_string());
|
||||
}
|
||||
let enc = Encoder::new_with_name(&self.codec)?
|
||||
.with_bitrate(self.bitrate as _)
|
||||
.with_width(self.width as _)
|
||||
.with_height(self.height as _)
|
||||
.with_pix_fmt(transmute(self.pixel_format))
|
||||
.with_profile(transmute(self.profile as i32))
|
||||
.with_level(transmute(self.level as i32))
|
||||
.with_framerate(self.fps)?
|
||||
.with_options(|ctx| {
|
||||
(*ctx).gop_size = self.keyframe_interval as _;
|
||||
(*ctx).keyint_min = self.keyframe_interval as _;
|
||||
(*ctx).max_b_frames = 3;
|
||||
(*ctx).colorspace = AVCOL_SPC_BT709;
|
||||
})
|
||||
.open(Some(opt))?;
|
||||
|
||||
Ok(enc)
|
||||
}
|
||||
}
|
||||
}
|
2018
crates/core/test.svg
Normal file
2018
crates/core/test.svg
Normal file
File diff suppressed because it is too large
Load Diff
After Width: | Height: | Size: 39 KiB |
2017
crates/zap-stream-db/Cargo.lock
generated
Normal file
2017
crates/zap-stream-db/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
15
crates/zap-stream-db/Cargo.toml
Normal file
15
crates/zap-stream-db/Cargo.toml
Normal file
@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "zap-stream-db"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = []
|
||||
test-pattern = []
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
chrono.workspace = true
|
||||
uuid.workspace = true
|
||||
|
||||
sqlx = { version = "0.8.1", features = ["runtime-tokio", "migrate", "mysql", "chrono"] }
|
41
crates/zap-stream-db/migrations/20241115120541_init.sql
Normal file
41
crates/zap-stream-db/migrations/20241115120541_init.sql
Normal file
@ -0,0 +1,41 @@
|
||||
-- Add migration script here
|
||||
create table user
|
||||
(
|
||||
id integer unsigned not null auto_increment primary key,
|
||||
pubkey binary(32) not null,
|
||||
created timestamp default current_timestamp,
|
||||
balance bigint not null default 0,
|
||||
tos_accepted timestamp,
|
||||
stream_key text not null default uuid(),
|
||||
is_admin bool not null default false,
|
||||
is_blocked bool not null default false,
|
||||
recording bool not null default false
|
||||
);
|
||||
create unique index ix_user_pubkey on user (pubkey);
|
||||
create table user_stream
|
||||
(
|
||||
id varchar(50) not null primary key,
|
||||
user_id integer unsigned not null,
|
||||
starts timestamp not null,
|
||||
ends timestamp,
|
||||
state tinyint unsigned not null,
|
||||
title text,
|
||||
summary text,
|
||||
image text,
|
||||
thumb text,
|
||||
tags text,
|
||||
content_warning text,
|
||||
goal text,
|
||||
pinned text,
|
||||
-- milli-sats paid for this stream
|
||||
cost bigint unsigned not null default 0,
|
||||
-- duration in seconds
|
||||
duration float not null default 0,
|
||||
-- admission fee
|
||||
fee integer unsigned,
|
||||
-- current nostr event json
|
||||
event text,
|
||||
|
||||
constraint fk_user_stream_user
|
||||
foreign key (user_id) references user (id)
|
||||
);
|
155
crates/zap-stream-db/src/db.rs
Normal file
155
crates/zap-stream-db/src/db.rs
Normal file
@ -0,0 +1,155 @@
|
||||
use crate::{User, UserStream};
|
||||
use anyhow::Result;
|
||||
use sqlx::{Executor, MySqlPool, Row};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub struct ZapStreamDb {
|
||||
db: MySqlPool,
|
||||
}
|
||||
|
||||
impl ZapStreamDb {
|
||||
pub async fn new(db: &str) -> Result<Self> {
|
||||
let db = MySqlPool::connect(db).await?;
|
||||
Ok(ZapStreamDb { db })
|
||||
}
|
||||
|
||||
pub async fn migrate(&self) -> Result<()> {
|
||||
sqlx::migrate!().run(&self.db).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Find user by stream key, typical first lookup from ingress
|
||||
pub async fn find_user_stream_key(&self, key: &str) -> Result<Option<u64>> {
|
||||
#[cfg(feature = "test-pattern")]
|
||||
if key == "test" {
|
||||
// use the 00 pubkey for test sources
|
||||
return Ok(Some(self.upsert_user(&[0; 32]).await?));
|
||||
}
|
||||
|
||||
Ok(sqlx::query("select id from user where stream_key = ?")
|
||||
.bind(key)
|
||||
.fetch_optional(&self.db)
|
||||
.await?
|
||||
.map(|r| r.try_get(0).unwrap()))
|
||||
}
|
||||
|
||||
/// Get user by id
|
||||
pub async fn get_user(&self, uid: u64) -> Result<User> {
|
||||
Ok(sqlx::query_as("select * from user where id = ?")
|
||||
.bind(uid)
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(anyhow::Error::new)?)
|
||||
}
|
||||
|
||||
/// Update a users balance
|
||||
pub async fn update_user_balance(&self, uid: u64, diff: i64) -> Result<()> {
|
||||
sqlx::query("update user set balance = balance + ? where id = ?")
|
||||
.bind(diff)
|
||||
.bind(uid)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn upsert_user(&self, pubkey: &[u8; 32]) -> Result<u64> {
|
||||
let res = sqlx::query("insert ignore into user(pubkey) values(?) returning id")
|
||||
.bind(pubkey.as_slice())
|
||||
.fetch_optional(&self.db)
|
||||
.await?;
|
||||
match res {
|
||||
None => sqlx::query("select id from user where pubkey = ?")
|
||||
.bind(pubkey.as_slice())
|
||||
.fetch_one(&self.db)
|
||||
.await?
|
||||
.try_get(0)
|
||||
.map_err(anyhow::Error::new),
|
||||
Some(res) => res.try_get(0).map_err(anyhow::Error::new),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn insert_stream(&self, user_stream: &UserStream) -> Result<()> {
|
||||
sqlx::query("insert into user_stream (id, user_id, state, starts) values (?, ?, ?, ?)")
|
||||
.bind(&user_stream.id)
|
||||
.bind(&user_stream.user_id)
|
||||
.bind(&user_stream.state)
|
||||
.bind(&user_stream.starts)
|
||||
.execute(&self.db)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_stream(&self, user_stream: &UserStream) -> Result<()> {
|
||||
sqlx::query(
|
||||
"update user_stream set state = ?, starts = ?, ends = ?, title = ?, summary = ?, image = ?, thumb = ?, tags = ?, content_warning = ?, goal = ?, pinned = ?, fee = ?, event = ? where id = ?",
|
||||
)
|
||||
.bind(&user_stream.state)
|
||||
.bind(&user_stream.starts)
|
||||
.bind(&user_stream.ends)
|
||||
.bind(&user_stream.title)
|
||||
.bind(&user_stream.summary)
|
||||
.bind(&user_stream.image)
|
||||
.bind(&user_stream.thumb)
|
||||
.bind(&user_stream.tags)
|
||||
.bind(&user_stream.content_warning)
|
||||
.bind(&user_stream.goal)
|
||||
.bind(&user_stream.pinned)
|
||||
.bind(&user_stream.fee)
|
||||
.bind(&user_stream.event)
|
||||
.bind(&user_stream.id)
|
||||
.execute(&self.db)
|
||||
.await
|
||||
.map_err(anyhow::Error::new)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_stream(&self, id: &Uuid) -> Result<UserStream> {
|
||||
Ok(sqlx::query_as("select * from user_stream where id = ?")
|
||||
.bind(id.to_string())
|
||||
.fetch_one(&self.db)
|
||||
.await
|
||||
.map_err(anyhow::Error::new)?)
|
||||
}
|
||||
|
||||
/// Get the list of active streams
|
||||
pub async fn list_live_streams(&self) -> Result<Vec<UserStream>> {
|
||||
Ok(sqlx::query_as("select * from user_stream where state = 2")
|
||||
.fetch_all(&self.db)
|
||||
.await?)
|
||||
}
|
||||
|
||||
/// Add [duration] & [cost] to a stream and return the new user balance
|
||||
pub async fn tick_stream(
|
||||
&self,
|
||||
stream_id: &Uuid,
|
||||
user_id: u64,
|
||||
duration: f32,
|
||||
cost: i64,
|
||||
) -> Result<i64> {
|
||||
let mut tx = self.db.begin().await?;
|
||||
|
||||
sqlx::query("update user_stream set duration = duration + ?, cost = cost + ? where id = ?")
|
||||
.bind(&duration)
|
||||
.bind(&cost)
|
||||
.bind(stream_id.to_string())
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
sqlx::query("update user set balance = balance - ? where id = ?")
|
||||
.bind(&cost)
|
||||
.bind(&user_id)
|
||||
.execute(&mut *tx)
|
||||
.await?;
|
||||
|
||||
let balance: i64 = sqlx::query("select balance from user where id = ?")
|
||||
.bind(&user_id)
|
||||
.fetch_one(&mut *tx)
|
||||
.await?
|
||||
.try_get(0)?;
|
||||
|
||||
tx.commit().await?;
|
||||
|
||||
Ok(balance)
|
||||
}
|
||||
}
|
7
crates/zap-stream-db/src/lib.rs
Normal file
7
crates/zap-stream-db/src/lib.rs
Normal file
@ -0,0 +1,7 @@
|
||||
mod db;
|
||||
mod model;
|
||||
|
||||
pub use db::*;
|
||||
pub use model::*;
|
||||
|
||||
pub use sqlx;
|
67
crates/zap-stream-db/src/model.rs
Normal file
67
crates/zap-stream-db/src/model.rs
Normal file
@ -0,0 +1,67 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use sqlx::{FromRow, Type};
|
||||
use std::fmt::{Display, Formatter};
|
||||
|
||||
#[derive(Debug, Clone, FromRow)]
|
||||
pub struct User {
|
||||
/// Database ID for this uer
|
||||
pub id: u64,
|
||||
/// Nostr pubkey of this user
|
||||
pub pubkey: Vec<u8>,
|
||||
/// Timestamp when this user first used the service
|
||||
pub created: DateTime<Utc>,
|
||||
/// Current balance in milli-sats
|
||||
pub balance: i64,
|
||||
/// When the TOS was accepted
|
||||
pub tos_accepted: Option<DateTime<Utc>>,
|
||||
/// Primary stream key
|
||||
pub stream_key: String,
|
||||
/// If the user is an admin
|
||||
pub is_admin: bool,
|
||||
/// If the user is blocked from streaming
|
||||
pub is_blocked: bool,
|
||||
/// Streams are recorded
|
||||
pub recording: bool,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Type)]
|
||||
#[repr(u8)]
|
||||
pub enum UserStreamState {
|
||||
#[default]
|
||||
Unknown = 0,
|
||||
Planned = 1,
|
||||
Live = 2,
|
||||
Ended = 3,
|
||||
}
|
||||
|
||||
impl Display for UserStreamState {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
UserStreamState::Unknown => write!(f, "unknown"),
|
||||
UserStreamState::Planned => write!(f, "planned"),
|
||||
UserStreamState::Live => write!(f, "live"),
|
||||
UserStreamState::Ended => write!(f, "ended"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, FromRow)]
|
||||
pub struct UserStream {
|
||||
pub id: String,
|
||||
pub user_id: u64,
|
||||
pub starts: DateTime<Utc>,
|
||||
pub ends: Option<DateTime<Utc>>,
|
||||
pub state: UserStreamState,
|
||||
pub title: Option<String>,
|
||||
pub summary: Option<String>,
|
||||
pub image: Option<String>,
|
||||
pub thumb: Option<String>,
|
||||
pub tags: Option<String>,
|
||||
pub content_warning: Option<String>,
|
||||
pub goal: Option<String>,
|
||||
pub pinned: Option<String>,
|
||||
pub cost: u64,
|
||||
pub duration: f32,
|
||||
pub fee: Option<u32>,
|
||||
pub event: Option<String>,
|
||||
}
|
3
crates/zap-stream/.dockerignore
Normal file
3
crates/zap-stream/.dockerignore
Normal file
@ -0,0 +1,3 @@
|
||||
target/
|
||||
.git/
|
||||
out/
|
7
crates/zap-stream/Cargo.lock
generated
Normal file
7
crates/zap-stream/Cargo.lock
generated
Normal file
@ -0,0 +1,7 @@
|
||||
# This file is automatically @generated by Cargo.
|
||||
# It is not intended for manual editing.
|
||||
version = 4
|
||||
|
||||
[[package]]
|
||||
name = "zap-stream"
|
||||
version = "0.1.0"
|
43
crates/zap-stream/Cargo.toml
Normal file
43
crates/zap-stream/Cargo.toml
Normal file
@ -0,0 +1,43 @@
|
||||
[package]
|
||||
name = "zap-stream"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
[features]
|
||||
default = ["srt", "rtmp"]
|
||||
srt = ["zap-stream-core/srt"]
|
||||
rtmp = ["zap-stream-core/rtmp"]
|
||||
test-pattern = ["zap-stream-core/test-pattern", "zap-stream-db/test-pattern"]
|
||||
|
||||
[dependencies]
|
||||
zap-stream-db = { path = "../zap-stream-db" }
|
||||
zap-stream-core = { path = "../core" }
|
||||
|
||||
uuid.workspace = true
|
||||
ffmpeg-rs-raw.workspace = true
|
||||
anyhow.workspace = true
|
||||
log.workspace = true
|
||||
tokio.workspace = true
|
||||
async-trait.workspace = true
|
||||
serde.workspace = true
|
||||
chrono.workspace = true
|
||||
hex.workspace = true
|
||||
url.workspace = true
|
||||
|
||||
# http setuff
|
||||
hyper = { version = "1.5.1", features = ["server"] }
|
||||
bytes = "1.8.0"
|
||||
http-body-util = "0.1.2"
|
||||
tokio-util = "0.7.13"
|
||||
hyper-util = "0.1.10"
|
||||
|
||||
# direct deps
|
||||
config = { version = "0.15.6", features = ["yaml"] }
|
||||
nostr-sdk = { version = "0.38.0" }
|
||||
fedimint-tonic-lnd = { version = "0.2.0", default-features = false, features = ["invoicesrpc", "versionrpc"] }
|
||||
reqwest = { version = "0.12.9", features = ["stream", "json"] }
|
||||
base64 = { version = "0.22.1" }
|
||||
sha2 = { version = "0.10.8" }
|
||||
pretty_env_logger = "0.5.0"
|
||||
clap = { version = "4.5.16", features = ["derive"] }
|
||||
futures-util = "0.3.31"
|
43
crates/zap-stream/Dockerfile
Executable file
43
crates/zap-stream/Dockerfile
Executable file
@ -0,0 +1,43 @@
|
||||
ARG IMAGE=rust:bookworm
|
||||
|
||||
FROM $IMAGE AS build
|
||||
WORKDIR /app/src
|
||||
ENV FFMPEG_DIR=/app/ffmpeg
|
||||
COPY . .
|
||||
RUN apt update && \
|
||||
apt install -y \
|
||||
build-essential \
|
||||
libx264-dev \
|
||||
libx265-dev \
|
||||
libwebp-dev \
|
||||
libpng-dev \
|
||||
nasm \
|
||||
protobuf-compiler \
|
||||
libclang-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
RUN git clone --single-branch --branch release/7.1 https://git.ffmpeg.org/ffmpeg.git && \
|
||||
cd ffmpeg && \
|
||||
./configure \
|
||||
--prefix=$FFMPEG_DIR \
|
||||
--disable-programs \
|
||||
--disable-doc \
|
||||
--disable-network \
|
||||
--enable-gpl \
|
||||
--enable-version3 \
|
||||
--disable-postproc \
|
||||
--enable-libx264 \
|
||||
--enable-libx265 \
|
||||
--enable-libwebp \
|
||||
--disable-static \
|
||||
--enable-shared && \
|
||||
make -j$(nproc) && make install
|
||||
RUN cargo install --path . --bin zap-stream-core --root /app/build --features zap-stream
|
||||
|
||||
FROM $IMAGE AS runner
|
||||
WORKDIR /app
|
||||
RUN apt update && \
|
||||
apt install -y libx264-164 && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
COPY --from=build /app/build .
|
||||
COPY --from=build /app/ffmpeg/lib/ /lib
|
||||
ENTRYPOINT ["/app/bin/zap-stream-core"]
|
51
crates/zap-stream/config.yaml
Executable file
51
crates/zap-stream/config.yaml
Executable file
@ -0,0 +1,51 @@
|
||||
# List of endpoints to listen on
|
||||
# currently supporting srt/tcp/file/test-pattern
|
||||
# All the endpoints must be valid URI's
|
||||
endpoints:
|
||||
- "rtmp://127.0.0.1:3336"
|
||||
- "srt://127.0.0.1:3335"
|
||||
- "tcp://127.0.0.1:3334"
|
||||
|
||||
# Output directory for recording / hls
|
||||
output_dir: "./out"
|
||||
|
||||
# Public URL for serving files for [output_dir]
|
||||
public_url: "http://localhost:8080"
|
||||
|
||||
# Bind address for http server serving files from [output_dir]
|
||||
listen_http: "127.0.0.1:8080"
|
||||
|
||||
# Overseer is the main control structure which controls access to the service
|
||||
#
|
||||
# ** ONLY 1 OVERSEER CAN BE CONFIGURED AT A TIME **
|
||||
#
|
||||
# Supported overseers:
|
||||
# static:
|
||||
# egress-types:
|
||||
# - hls
|
||||
# - recorder
|
||||
# webhook:
|
||||
# url: <endpoint-url>
|
||||
# zap-stream:
|
||||
# private-key: "nsec1234"
|
||||
# relays:
|
||||
# - "wss://relay.com"
|
||||
# lnd:
|
||||
# address: <ip:port>
|
||||
# cert: <path-to-tls-cert>
|
||||
# macaroon: <path-to-macaroon>
|
||||
# database: <database-connection-string>
|
||||
#
|
||||
overseer:
|
||||
zap-stream:
|
||||
cost: 16
|
||||
nsec: "nsec1wya428srvpu96n4h78gualaj7wqw4ecgatgja8d5ytdqrxw56r2se440y4"
|
||||
blossom:
|
||||
- "http://localhost:8881"
|
||||
relays:
|
||||
- "ws://localhost:7766"
|
||||
database: "mysql://root:root@localhost:3368/zap_stream?max_connections=2"
|
||||
lnd:
|
||||
address: "https://127.0.0.1:10001"
|
||||
cert: "/home/kieran/.polar/networks/1/volumes/lnd/alice/tls.cert"
|
||||
macaroon: "/home/kieran/.polar/networks/1/volumes/lnd/alice/data/chain/bitcoin/regtest/admin.macaroon"
|
138
crates/zap-stream/dev-setup/strfry.conf/strfry.conf.default
Normal file
138
crates/zap-stream/dev-setup/strfry.conf/strfry.conf.default
Normal file
@ -0,0 +1,138 @@
|
||||
##
|
||||
## Default strfry config
|
||||
##
|
||||
|
||||
# Directory that contains the strfry LMDB database (restart required)
|
||||
db = "./strfry-db/"
|
||||
|
||||
dbParams {
|
||||
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
|
||||
maxreaders = 256
|
||||
|
||||
# Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
|
||||
mapsize = 10995116277760
|
||||
|
||||
# Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
|
||||
noReadAhead = false
|
||||
}
|
||||
|
||||
events {
|
||||
# Maximum size of normalised JSON, in bytes
|
||||
maxEventSize = 65536
|
||||
|
||||
# Events newer than this will be rejected
|
||||
rejectEventsNewerThanSeconds = 900
|
||||
|
||||
# Events older than this will be rejected
|
||||
rejectEventsOlderThanSeconds = 94608000
|
||||
|
||||
# Ephemeral events older than this will be rejected
|
||||
rejectEphemeralEventsOlderThanSeconds = 60
|
||||
|
||||
# Ephemeral events will be deleted from the DB when older than this
|
||||
ephemeralEventsLifetimeSeconds = 300
|
||||
|
||||
# Maximum number of tags allowed
|
||||
maxNumTags = 2000
|
||||
|
||||
# Maximum size for tag values, in bytes
|
||||
maxTagValSize = 1024
|
||||
}
|
||||
|
||||
relay {
|
||||
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
|
||||
bind = "0.0.0.0"
|
||||
|
||||
# Port to open for the nostr websocket protocol (restart required)
|
||||
port = 7777
|
||||
|
||||
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
|
||||
nofiles = 1000000
|
||||
|
||||
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
|
||||
realIpHeader = ""
|
||||
|
||||
info {
|
||||
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
|
||||
name = "strfry default"
|
||||
|
||||
# NIP-11: Detailed information about relay, free-form
|
||||
description = "This is a strfry instance."
|
||||
|
||||
# NIP-11: Administrative nostr pubkey, for contact purposes
|
||||
pubkey = ""
|
||||
|
||||
# NIP-11: Alternative administrative contact (email, website, etc)
|
||||
contact = ""
|
||||
}
|
||||
|
||||
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
|
||||
maxWebsocketPayloadSize = 131072
|
||||
|
||||
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
|
||||
autoPingSeconds = 55
|
||||
|
||||
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
|
||||
enableTcpKeepalive = false
|
||||
|
||||
# How much uninterrupted CPU time a REQ query should get during its DB scan
|
||||
queryTimesliceBudgetMicroseconds = 10000
|
||||
|
||||
# Maximum records that can be returned per filter
|
||||
maxFilterLimit = 500
|
||||
|
||||
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
|
||||
maxSubsPerConnection = 20
|
||||
|
||||
writePolicy {
|
||||
# If non-empty, path to an executable script that implements the writePolicy plugin logic
|
||||
plugin = "/app/write-policy.py"
|
||||
}
|
||||
|
||||
compression {
|
||||
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
|
||||
enabled = true
|
||||
|
||||
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
|
||||
slidingWindow = true
|
||||
}
|
||||
|
||||
logging {
|
||||
# Dump all incoming messages
|
||||
dumpInAll = false
|
||||
|
||||
# Dump all incoming EVENT messages
|
||||
dumpInEvents = false
|
||||
|
||||
# Dump all incoming REQ/CLOSE messages
|
||||
dumpInReqs = false
|
||||
|
||||
# Log performance metrics for initial REQ database scans
|
||||
dbScanPerf = false
|
||||
|
||||
# Log reason for invalid event rejection? Can be disabled to silence excessive logging
|
||||
invalidEvents = true
|
||||
}
|
||||
|
||||
numThreads {
|
||||
# Ingester threads: route incoming requests, validate events/sigs (restart required)
|
||||
ingester = 3
|
||||
|
||||
# reqWorker threads: Handle initial DB scan for events (restart required)
|
||||
reqWorker = 3
|
||||
|
||||
# reqMonitor threads: Handle filtering of new events (restart required)
|
||||
reqMonitor = 3
|
||||
|
||||
# negentropy threads: Handle negentropy protocol messages (restart required)
|
||||
negentropy = 2
|
||||
}
|
||||
|
||||
negentropy {
|
||||
# Support negentropy protocol messages
|
||||
enabled = true
|
||||
|
||||
# Maximum records that sync will process before returning an error
|
||||
maxSyncEvents = 1000000
|
||||
}
|
||||
}
|
32
crates/zap-stream/docker-compose.yml
Normal file
32
crates/zap-stream/docker-compose.yml
Normal file
@ -0,0 +1,32 @@
|
||||
name: zap-stream-core
|
||||
services:
|
||||
db:
|
||||
image: mariadb
|
||||
environment:
|
||||
- "MARIADB_ROOT_PASSWORD=root"
|
||||
ports:
|
||||
- "3368:3306"
|
||||
volumes:
|
||||
- "./dev-setup/db.sql:/docker-entrypoint-initdb.d/00-init.sql"
|
||||
relay:
|
||||
image: dockurr/strfry
|
||||
ports:
|
||||
- "7766:7777"
|
||||
volumes:
|
||||
- "relay:/app/strfry-db"
|
||||
- "./dev-setup/strfry.conf:/etc/strfry.conf"
|
||||
blossom:
|
||||
depends_on:
|
||||
- db
|
||||
image: voidic/route96
|
||||
environment:
|
||||
- "RUST_LOG=info"
|
||||
ports:
|
||||
- "8881:8000"
|
||||
volumes:
|
||||
- "blossom:/app/data"
|
||||
- "./dev-setup/route96.toml:/app/config.toml"
|
||||
volumes:
|
||||
db:
|
||||
blossom:
|
||||
relay:
|
17
crates/zap-stream/index.html
Normal file
17
crates/zap-stream/index.html
Normal file
@ -0,0 +1,17 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>zap-stream-core</title>
|
||||
<style>
|
||||
html, body {
|
||||
margin: 0;
|
||||
background: black;
|
||||
color: white;
|
||||
font-family: monospace;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Welcome to %%PUBLIC_URL%%</h1>
|
||||
</body>
|
||||
</html>
|
90
crates/zap-stream/src/blossom.rs
Normal file
90
crates/zap-stream/src/blossom.rs
Normal file
@ -0,0 +1,90 @@
|
||||
use anyhow::Result;
|
||||
use base64::Engine;
|
||||
use nostr_sdk::{EventBuilder, JsonUtil, Keys, Kind, Tag, Timestamp};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
use std::io::SeekFrom;
|
||||
use std::ops::Add;
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt};
|
||||
use url::Url;
|
||||
|
||||
pub struct Blossom {
|
||||
url: Url,
|
||||
client: reqwest::Client,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct BlobDescriptor {
|
||||
pub url: String,
|
||||
pub sha256: String,
|
||||
pub size: u64,
|
||||
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
|
||||
pub mime_type: Option<String>,
|
||||
pub created: u64,
|
||||
#[serde(rename = "nip94", skip_serializing_if = "Option::is_none")]
|
||||
pub nip94: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl Blossom {
|
||||
pub fn new(url: &str) -> Self {
|
||||
Self {
|
||||
url: url.parse().unwrap(),
|
||||
client: reqwest::Client::new(),
|
||||
}
|
||||
}
|
||||
|
||||
async fn hash_file(f: &mut File) -> Result<String> {
|
||||
let mut hash = Sha256::new();
|
||||
let mut buf: [u8; 1024] = [0; 1024];
|
||||
f.seek(SeekFrom::Start(0)).await?;
|
||||
while let Ok(data) = f.read(&mut buf[..]).await {
|
||||
if data == 0 {
|
||||
break;
|
||||
}
|
||||
hash.update(&buf[..data]);
|
||||
}
|
||||
let hash = hash.finalize();
|
||||
f.seek(SeekFrom::Start(0)).await?;
|
||||
Ok(hex::encode(hash))
|
||||
}
|
||||
|
||||
pub async fn upload(
|
||||
&self,
|
||||
from_file: &PathBuf,
|
||||
keys: &Keys,
|
||||
mime: Option<&str>,
|
||||
) -> Result<BlobDescriptor> {
|
||||
let mut f = File::open(from_file).await?;
|
||||
let hash = Self::hash_file(&mut f).await?;
|
||||
let auth_event = EventBuilder::new(Kind::Custom(24242), "Upload blob").tags([
|
||||
Tag::hashtag("upload"),
|
||||
Tag::parse(["x", &hash])?,
|
||||
Tag::expiration(Timestamp::now().add(60)),
|
||||
]);
|
||||
|
||||
let auth_event = auth_event.sign_with_keys(keys)?;
|
||||
|
||||
let rsp: BlobDescriptor = self
|
||||
.client
|
||||
.put(self.url.join("/upload").unwrap())
|
||||
.header("Content-Type", mime.unwrap_or("application/octet-stream"))
|
||||
.header(
|
||||
"Authorization",
|
||||
&format!(
|
||||
"Nostr {}",
|
||||
base64::engine::general_purpose::STANDARD
|
||||
.encode(auth_event.as_json().as_bytes())
|
||||
),
|
||||
)
|
||||
.body(f)
|
||||
.send()
|
||||
.await?
|
||||
.json()
|
||||
.await?;
|
||||
|
||||
Ok(rsp)
|
||||
}
|
||||
}
|
95
crates/zap-stream/src/http.rs
Normal file
95
crates/zap-stream/src/http.rs
Normal file
@ -0,0 +1,95 @@
|
||||
use bytes::Bytes;
|
||||
use futures_util::TryStreamExt;
|
||||
use http_body_util::combinators::BoxBody;
|
||||
use http_body_util::{BodyExt, Full, StreamBody};
|
||||
use hyper::body::{Frame, Incoming};
|
||||
use hyper::service::Service;
|
||||
use hyper::{Method, Request, Response};
|
||||
use log::error;
|
||||
use std::future::Future;
|
||||
use std::path::PathBuf;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::File;
|
||||
use tokio_util::io::ReaderStream;
|
||||
use zap_stream_core::overseer::Overseer;
|
||||
use crate::overseer::ZapStreamOverseer;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct HttpServer {
|
||||
index: String,
|
||||
files_dir: PathBuf,
|
||||
overseer: Arc<ZapStreamOverseer>,
|
||||
}
|
||||
|
||||
impl HttpServer {
|
||||
pub fn new(index: String, files_dir: PathBuf, overseer: Arc<ZapStreamOverseer>) -> Self {
|
||||
Self {
|
||||
index,
|
||||
files_dir,
|
||||
overseer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<Request<Incoming>> for HttpServer {
|
||||
type Response = Response<BoxBody<Bytes, Self::Error>>;
|
||||
type Error = anyhow::Error;
|
||||
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
|
||||
|
||||
fn call(&self, req: Request<Incoming>) -> Self::Future {
|
||||
// check is index.html
|
||||
if req.method() == Method::GET && req.uri().path() == "/"
|
||||
|| req.uri().path() == "/index.html"
|
||||
{
|
||||
let index = self.index.clone();
|
||||
return Box::pin(async move {
|
||||
Ok(Response::builder()
|
||||
.header("content-type", "text/html")
|
||||
.header("server", "zap-stream-core")
|
||||
.body(
|
||||
Full::new(Bytes::from(index))
|
||||
.map_err(|e| match e {})
|
||||
.boxed(),
|
||||
)?)
|
||||
});
|
||||
}
|
||||
|
||||
// check if mapped to file
|
||||
let mut dst_path = self.files_dir.join(req.uri().path()[1..].to_string());
|
||||
if dst_path.exists() {
|
||||
return Box::pin(async move {
|
||||
let mut rsp = Response::builder()
|
||||
.header("server", "zap-stream-core")
|
||||
.header("access-control-allow-origin", "*")
|
||||
.header("access-control-allow-headers", "*")
|
||||
.header("access-control-allow-methods", "HEAD, GET");
|
||||
|
||||
if req.method() == Method::HEAD {
|
||||
return Ok(rsp.body(BoxBody::default())?);
|
||||
}
|
||||
let f = File::open(&dst_path).await?;
|
||||
let f_stream = ReaderStream::new(f);
|
||||
let body = StreamBody::new(
|
||||
f_stream
|
||||
.map_ok(Frame::data)
|
||||
.map_err(|e| Self::Error::new(e)),
|
||||
)
|
||||
.boxed();
|
||||
Ok(rsp.body(body)?)
|
||||
});
|
||||
}
|
||||
|
||||
// otherwise handle in overseer
|
||||
let overseer = self.overseer.clone();
|
||||
Box::pin(async move {
|
||||
match overseer.api(req).await {
|
||||
Ok(res) => Ok(res),
|
||||
Err(e) => {
|
||||
error!("{}", e);
|
||||
Ok(Response::builder().status(500).body(BoxBody::default())?)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
148
crates/zap-stream/src/main.rs
Normal file
148
crates/zap-stream/src/main.rs
Normal file
@ -0,0 +1,148 @@
|
||||
use anyhow::{bail, Result};
|
||||
use clap::Parser;
|
||||
use config::Config;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{av_log_set_callback, av_version_info};
|
||||
use ffmpeg_rs_raw::{av_log_redirect, rstr};
|
||||
use hyper::server::conn::http1;
|
||||
use hyper_util::rt::TokioIo;
|
||||
use log::{error, info};
|
||||
use std::net::SocketAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::task::JoinHandle;
|
||||
use tokio::time::sleep;
|
||||
use url::Url;
|
||||
#[cfg(feature = "rtmp")]
|
||||
use zap_stream_core::ingress::rtmp;
|
||||
#[cfg(feature = "srt")]
|
||||
use zap_stream_core::ingress::srt;
|
||||
#[cfg(feature = "test-pattern")]
|
||||
use zap_stream_core::ingress::test;
|
||||
|
||||
use zap_stream_core::ingress::{file, tcp};
|
||||
use zap_stream_core::overseer::Overseer;
|
||||
use crate::http::HttpServer;
|
||||
use crate::monitor::BackgroundMonitor;
|
||||
use crate::overseer::ZapStreamOverseer;
|
||||
use crate::settings::Settings;
|
||||
|
||||
mod blossom;
|
||||
mod http;
|
||||
mod monitor;
|
||||
mod overseer;
|
||||
mod settings;
|
||||
|
||||
#[derive(Parser, Debug)]
|
||||
struct Args {}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<()> {
|
||||
pretty_env_logger::init();
|
||||
|
||||
let _args = Args::parse();
|
||||
|
||||
unsafe {
|
||||
av_log_set_callback(Some(av_log_redirect));
|
||||
info!("FFMPEG version={}", rstr!(av_version_info()));
|
||||
}
|
||||
|
||||
let builder = Config::builder()
|
||||
.add_source(config::File::with_name("config.yaml"))
|
||||
.add_source(config::Environment::with_prefix("APP"))
|
||||
.build()?;
|
||||
|
||||
let settings: Settings = builder.try_deserialize()?;
|
||||
let overseer = settings.get_overseer().await?;
|
||||
|
||||
let mut tasks = vec![];
|
||||
for e in &settings.endpoints {
|
||||
match try_create_listener(e, &settings.output_dir, &overseer) {
|
||||
Ok(l) => tasks.push(l),
|
||||
Err(e) => error!("{}", e),
|
||||
}
|
||||
}
|
||||
|
||||
let http_addr: SocketAddr = settings.listen_http.parse()?;
|
||||
let index_html = include_str!("../index.html").replace("%%PUBLIC_URL%%", &settings.public_url);
|
||||
|
||||
let server = HttpServer::new(
|
||||
index_html,
|
||||
PathBuf::from(settings.output_dir),
|
||||
overseer.clone(),
|
||||
);
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let listener = TcpListener::bind(&http_addr).await?;
|
||||
|
||||
loop {
|
||||
let (socket, _) = listener.accept().await?;
|
||||
let io = TokioIo::new(socket);
|
||||
let server = server.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = http1::Builder::new().serve_connection(io, server).await {
|
||||
error!("Failed to handle request: {}", e);
|
||||
}
|
||||
});
|
||||
}
|
||||
}));
|
||||
|
||||
// spawn background job
|
||||
let mut bg = BackgroundMonitor::new(overseer.clone());
|
||||
tasks.push(tokio::spawn(async move {
|
||||
loop {
|
||||
if let Err(e) = bg.check().await {
|
||||
error!("{}", e);
|
||||
}
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}));
|
||||
|
||||
for handle in tasks {
|
||||
if let Err(e) = handle.await? {
|
||||
error!("{e}");
|
||||
}
|
||||
}
|
||||
info!("Server closed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn try_create_listener(
|
||||
u: &str,
|
||||
out_dir: &str,
|
||||
overseer: &Arc<ZapStreamOverseer>,
|
||||
) -> Result<JoinHandle<Result<()>>> {
|
||||
let url: Url = u.parse()?;
|
||||
match url.scheme() {
|
||||
#[cfg(feature = "srt")]
|
||||
"srt" => Ok(tokio::spawn(srt::listen(
|
||||
out_dir.to_string(),
|
||||
format!("{}:{}", url.host().unwrap(), url.port().unwrap()),
|
||||
overseer.clone(),
|
||||
))),
|
||||
#[cfg(feature = "rtmp")]
|
||||
"rtmp" => Ok(tokio::spawn(rtmp::listen(
|
||||
out_dir.to_string(),
|
||||
format!("{}:{}", url.host().unwrap(), url.port().unwrap()),
|
||||
overseer.clone(),
|
||||
))),
|
||||
"tcp" => Ok(tokio::spawn(tcp::listen(
|
||||
out_dir.to_string(),
|
||||
format!("{}:{}", url.host().unwrap(), url.port().unwrap()),
|
||||
overseer.clone(),
|
||||
))),
|
||||
"file" => Ok(tokio::spawn(file::listen(
|
||||
out_dir.to_string(),
|
||||
PathBuf::from(url.path()),
|
||||
overseer.clone(),
|
||||
))),
|
||||
#[cfg(feature = "test-pattern")]
|
||||
"test-pattern" => Ok(tokio::spawn(test::listen(
|
||||
out_dir.to_string(),
|
||||
overseer.clone(),
|
||||
))),
|
||||
_ => {
|
||||
bail!("Unknown endpoint config: {u}");
|
||||
}
|
||||
}
|
||||
}
|
19
crates/zap-stream/src/monitor.rs
Normal file
19
crates/zap-stream/src/monitor.rs
Normal file
@ -0,0 +1,19 @@
|
||||
use crate::overseer::ZapStreamOverseer;
|
||||
use anyhow::Result;
|
||||
use std::sync::Arc;
|
||||
use zap_stream_core::overseer::Overseer;
|
||||
|
||||
/// Monitor stream status, perform any necessary cleanup
|
||||
pub struct BackgroundMonitor {
|
||||
overseer: Arc<ZapStreamOverseer>,
|
||||
}
|
||||
|
||||
impl BackgroundMonitor {
|
||||
pub fn new(overseer: Arc<ZapStreamOverseer>) -> Self {
|
||||
Self { overseer }
|
||||
}
|
||||
|
||||
pub async fn check(&mut self) -> Result<()> {
|
||||
self.overseer.check_streams().await
|
||||
}
|
||||
}
|
522
crates/zap-stream/src/overseer.rs
Normal file
522
crates/zap-stream/src/overseer.rs
Normal file
@ -0,0 +1,522 @@
|
||||
use crate::blossom::{BlobDescriptor, Blossom};
|
||||
use zap_stream_core::egress::hls::HlsEgress;
|
||||
use zap_stream_core::egress::EgressConfig;
|
||||
use zap_stream_core::ingress::ConnectionInfo;
|
||||
use zap_stream_core::overseer::{IngressInfo, IngressStreamType, Overseer};
|
||||
use zap_stream_core::pipeline::{EgressType, PipelineConfig};
|
||||
use zap_stream_core::variant::{StreamMapping, VariantStream};
|
||||
use anyhow::{anyhow, bail, Result};
|
||||
use async_trait::async_trait;
|
||||
use base64::alphabet::STANDARD;
|
||||
use base64::Engine;
|
||||
use bytes::Bytes;
|
||||
use chrono::Utc;
|
||||
use fedimint_tonic_lnd::verrpc::VersionRequest;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_MJPEG;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVFrame;
|
||||
use ffmpeg_rs_raw::Encoder;
|
||||
use futures_util::FutureExt;
|
||||
use http_body_util::combinators::BoxBody;
|
||||
use http_body_util::{BodyExt, Full};
|
||||
use hyper::body::Incoming;
|
||||
use hyper::{Method, Request, Response};
|
||||
use log::{error, info, warn};
|
||||
use nostr_sdk::bitcoin::PrivateKey;
|
||||
use nostr_sdk::prelude::Coordinate;
|
||||
use nostr_sdk::{Client, Event, EventBuilder, JsonUtil, Keys, Kind, Tag, ToBech32};
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
use std::env::temp_dir;
|
||||
use std::fs::create_dir_all;
|
||||
use std::path::PathBuf;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
|
||||
use tokio::sync::RwLock;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
use zap_stream_core::variant::audio::AudioVariant;
|
||||
use zap_stream_core::variant::mapping::VariantMapping;
|
||||
use zap_stream_core::variant::video::VideoVariant;
|
||||
use zap_stream_db::sqlx::Encode;
|
||||
use zap_stream_db::{UserStream, UserStreamState, ZapStreamDb};
|
||||
use crate::settings::LndSettings;
|
||||
|
||||
const STREAM_EVENT_KIND: u16 = 30_313;
|
||||
|
||||
/// zap.stream NIP-53 overseer
|
||||
pub struct ZapStreamOverseer {
|
||||
/// Dir where HTTP server serves files from
|
||||
out_dir: String,
|
||||
/// Database instance for accounts/streams
|
||||
db: ZapStreamDb,
|
||||
/// LND node connection
|
||||
lnd: fedimint_tonic_lnd::Client,
|
||||
/// Nostr client for publishing events
|
||||
client: Client,
|
||||
/// Nostr keys used to sign events
|
||||
keys: Keys,
|
||||
/// List of blossom servers to upload segments to
|
||||
blossom_servers: Vec<Blossom>,
|
||||
/// Public facing URL pointing to [out_dir]
|
||||
public_url: String,
|
||||
/// Cost / second / variant
|
||||
cost: i64,
|
||||
/// Currently active streams
|
||||
/// Any streams which are not contained in this set are dead
|
||||
active_streams: Arc<RwLock<HashSet<Uuid>>>,
|
||||
}
|
||||
|
||||
impl ZapStreamOverseer {
|
||||
pub async fn new(
|
||||
out_dir: &String,
|
||||
public_url: &String,
|
||||
private_key: &str,
|
||||
db: &str,
|
||||
lnd: &LndSettings,
|
||||
relays: &Vec<String>,
|
||||
blossom_servers: &Option<Vec<String>>,
|
||||
cost: i64,
|
||||
) -> Result<Self> {
|
||||
let db = ZapStreamDb::new(db).await?;
|
||||
db.migrate().await?;
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let uid = db.upsert_user(&[0; 32]).await?;
|
||||
db.update_user_balance(uid, 100_000_000).await?;
|
||||
let user = db.get_user(uid).await?;
|
||||
|
||||
info!(
|
||||
"ZERO pubkey: uid={},key={},balance={}",
|
||||
user.id,
|
||||
user.stream_key,
|
||||
user.balance / 1000
|
||||
);
|
||||
}
|
||||
|
||||
let mut lnd = fedimint_tonic_lnd::connect(
|
||||
lnd.address.clone(),
|
||||
PathBuf::from(&lnd.cert),
|
||||
PathBuf::from(&lnd.macaroon),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let version = lnd
|
||||
.versioner()
|
||||
.get_version(VersionRequest::default())
|
||||
.await?;
|
||||
info!("LND connected: v{}", version.into_inner().version);
|
||||
|
||||
let keys = Keys::from_str(private_key)?;
|
||||
let client = nostr_sdk::ClientBuilder::new().signer(keys.clone()).build();
|
||||
for r in relays {
|
||||
client.add_relay(r).await?;
|
||||
}
|
||||
client.connect().await;
|
||||
|
||||
Ok(Self {
|
||||
out_dir: out_dir.clone(),
|
||||
db,
|
||||
lnd,
|
||||
client,
|
||||
keys,
|
||||
blossom_servers: blossom_servers
|
||||
.as_ref()
|
||||
.unwrap_or(&Vec::new())
|
||||
.into_iter()
|
||||
.map(|b| Blossom::new(b))
|
||||
.collect(),
|
||||
public_url: public_url.clone(),
|
||||
cost,
|
||||
active_streams: Arc::new(RwLock::new(HashSet::new())),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) async fn api(&self, req: Request<Incoming>) -> Result<Response<BoxBody<Bytes, anyhow::Error>>> {
|
||||
let base = Response::builder()
|
||||
.header("server", "zap-stream-core")
|
||||
.header("access-control-allow-origin", "*")
|
||||
.header("access-control-allow-headers", "*")
|
||||
.header("access-control-allow-methods", "HEAD, GET");
|
||||
|
||||
Ok(match (req.method(), req.uri().path()) {
|
||||
(&Method::GET, "/api/v1/account") => {
|
||||
self.check_nip98_auth(req)?;
|
||||
base.body(Default::default())?
|
||||
}
|
||||
(&Method::PATCH, "/api/v1/account") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::GET, "/api/v1/topup") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::PATCH, "/api/v1/event") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::POST, "/api/v1/withdraw") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::POST, "/api/v1/account/forward") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::DELETE, "/api/v1/account/forward/<id>") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::GET, "/api/v1/account/history") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
(&Method::GET, "/api/v1/account/keys") => {
|
||||
bail!("Not implemented")
|
||||
}
|
||||
_ => {
|
||||
if req.method() == Method::OPTIONS {
|
||||
base.body(Default::default())?
|
||||
} else {
|
||||
base.status(404).body(Default::default())?
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn stream_to_event_builder(&self, stream: &UserStream) -> Result<EventBuilder> {
|
||||
let mut tags = vec![
|
||||
Tag::parse(&["d".to_string(), stream.id.to_string()])?,
|
||||
Tag::parse(&["status".to_string(), stream.state.to_string()])?,
|
||||
Tag::parse(&["starts".to_string(), stream.starts.timestamp().to_string()])?,
|
||||
];
|
||||
if let Some(ref ends) = stream.ends {
|
||||
tags.push(Tag::parse(&[
|
||||
"ends".to_string(),
|
||||
ends.timestamp().to_string(),
|
||||
])?);
|
||||
}
|
||||
if let Some(ref title) = stream.title {
|
||||
tags.push(Tag::parse(&["title".to_string(), title.to_string()])?);
|
||||
}
|
||||
if let Some(ref summary) = stream.summary {
|
||||
tags.push(Tag::parse(&["summary".to_string(), summary.to_string()])?);
|
||||
}
|
||||
if let Some(ref image) = stream.image {
|
||||
tags.push(Tag::parse(&["image".to_string(), image.to_string()])?);
|
||||
}
|
||||
if let Some(ref thumb) = stream.thumb {
|
||||
tags.push(Tag::parse(&["thumb".to_string(), thumb.to_string()])?);
|
||||
}
|
||||
if let Some(ref content_warning) = stream.content_warning {
|
||||
tags.push(Tag::parse(&[
|
||||
"content_warning".to_string(),
|
||||
content_warning.to_string(),
|
||||
])?);
|
||||
}
|
||||
if let Some(ref goal) = stream.goal {
|
||||
tags.push(Tag::parse(&["goal".to_string(), goal.to_string()])?);
|
||||
}
|
||||
if let Some(ref pinned) = stream.pinned {
|
||||
tags.push(Tag::parse(&["pinned".to_string(), pinned.to_string()])?);
|
||||
}
|
||||
if let Some(ref tags_csv) = stream.tags {
|
||||
for tag in tags_csv.split(',') {
|
||||
tags.push(Tag::parse(&["t".to_string(), tag.to_string()])?);
|
||||
}
|
||||
}
|
||||
|
||||
let kind = Kind::from(STREAM_EVENT_KIND);
|
||||
let coord = Coordinate::new(kind, self.keys.public_key).identifier(&stream.id);
|
||||
tags.push(Tag::parse([
|
||||
"alt",
|
||||
&format!("Watch live on https://zap.stream/{}", coord.to_bech32()?),
|
||||
])?);
|
||||
Ok(EventBuilder::new(kind, "").tags(tags))
|
||||
}
|
||||
|
||||
fn blob_to_event_builder(&self, stream: &BlobDescriptor) -> Result<EventBuilder> {
|
||||
let tags = if let Some(tags) = stream.nip94.as_ref() {
|
||||
tags.iter()
|
||||
.map_while(|(k, v)| Tag::parse([k, v]).ok())
|
||||
.collect()
|
||||
} else {
|
||||
let mut tags = vec![
|
||||
Tag::parse(["x", &stream.sha256])?,
|
||||
Tag::parse(["url", &stream.url])?,
|
||||
Tag::parse(["size", &stream.size.to_string()])?,
|
||||
];
|
||||
if let Some(m) = stream.mime_type.as_ref() {
|
||||
tags.push(Tag::parse(["m", m])?)
|
||||
}
|
||||
tags
|
||||
};
|
||||
|
||||
Ok(EventBuilder::new(Kind::FileMetadata, "").tags(tags))
|
||||
}
|
||||
|
||||
async fn publish_stream_event(&self, stream: &UserStream, pubkey: &Vec<u8>) -> Result<Event> {
|
||||
let extra_tags = vec![
|
||||
Tag::parse(["p", hex::encode(pubkey).as_str(), "", "host"])?,
|
||||
Tag::parse([
|
||||
"streaming",
|
||||
self.map_to_stream_public_url(stream, "live.m3u8")?.as_str(),
|
||||
])?,
|
||||
Tag::parse([
|
||||
"image",
|
||||
self.map_to_stream_public_url(stream, "thumb.webp")?
|
||||
.as_str(),
|
||||
])?,
|
||||
Tag::parse(["service", self.map_to_public_url("api/v1")?.as_str()])?,
|
||||
];
|
||||
let ev = self
|
||||
.stream_to_event_builder(stream)?
|
||||
.tags(extra_tags)
|
||||
.sign_with_keys(&self.keys)?;
|
||||
self.client.send_event(ev.clone()).await?;
|
||||
Ok(ev)
|
||||
}
|
||||
|
||||
fn map_to_stream_public_url(&self, stream: &UserStream, path: &str) -> Result<String> {
|
||||
self.map_to_public_url(&format!("{}/{}", stream.id, path))
|
||||
}
|
||||
|
||||
fn map_to_public_url(&self, path: &str) -> Result<String> {
|
||||
let u: Url = self.public_url.parse()?;
|
||||
Ok(u.join(path)?.to_string())
|
||||
}
|
||||
|
||||
fn check_nip98_auth(&self, req: Request<Incoming>) -> Result<()> {
|
||||
let auth = if let Some(a) = req.headers().get("authorization") {
|
||||
a.to_str()?
|
||||
} else {
|
||||
bail!("Authorization header missing");
|
||||
};
|
||||
|
||||
if !auth.starts_with("Nostr ") {
|
||||
bail!("Invalid authorization scheme");
|
||||
}
|
||||
|
||||
let json = String::from_utf8(
|
||||
base64::engine::general_purpose::STANDARD.decode(auth[6..].as_bytes())?,
|
||||
)?;
|
||||
info!("{}", json);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct Endpoint {}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct AccountInfo {
|
||||
pub endpoints: Vec<Endpoint>,
|
||||
pub event: Event,
|
||||
pub balance: u64,
|
||||
}
|
||||
#[async_trait]
|
||||
impl Overseer for ZapStreamOverseer {
|
||||
async fn check_streams(&self) -> Result<()> {
|
||||
let active_streams = self.db.list_live_streams().await?;
|
||||
for stream in active_streams {
|
||||
// check
|
||||
let id = Uuid::parse_str(&stream.id)?;
|
||||
info!("Checking stream is alive: {}", stream.id);
|
||||
let is_active = {
|
||||
let streams = self.active_streams.read().await;
|
||||
streams.contains(&id)
|
||||
};
|
||||
if !is_active {
|
||||
if let Err(e) = self.on_end(&id).await {
|
||||
error!("Failed to end dead stream {}: {}", &id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn start_stream(
|
||||
&self,
|
||||
connection: &ConnectionInfo,
|
||||
stream_info: &IngressInfo,
|
||||
) -> Result<PipelineConfig> {
|
||||
let uid = self
|
||||
.db
|
||||
.find_user_stream_key(&connection.key)
|
||||
.await?
|
||||
.ok_or_else(|| anyhow::anyhow!("User not found"))?;
|
||||
|
||||
let user = self.db.get_user(uid).await?;
|
||||
if user.balance <= 0 {
|
||||
bail!("Not enough balance");
|
||||
}
|
||||
|
||||
let variants = get_default_variants(&stream_info)?;
|
||||
|
||||
let mut egress = vec![];
|
||||
egress.push(EgressType::HLS(EgressConfig {
|
||||
name: "hls".to_string(),
|
||||
variants: variants.iter().map(|v| v.id()).collect(),
|
||||
}));
|
||||
|
||||
let stream_id = Uuid::new_v4();
|
||||
// insert new stream record
|
||||
let mut new_stream = UserStream {
|
||||
id: stream_id.to_string(),
|
||||
user_id: uid,
|
||||
starts: Utc::now(),
|
||||
state: UserStreamState::Live,
|
||||
..Default::default()
|
||||
};
|
||||
let stream_event = self.publish_stream_event(&new_stream, &user.pubkey).await?;
|
||||
new_stream.event = Some(stream_event.as_json());
|
||||
|
||||
let mut streams = self.active_streams.write().await;
|
||||
streams.insert(stream_id.clone());
|
||||
|
||||
self.db.insert_stream(&new_stream).await?;
|
||||
self.db.update_stream(&new_stream).await?;
|
||||
|
||||
Ok(PipelineConfig {
|
||||
id: stream_id,
|
||||
variants,
|
||||
egress,
|
||||
})
|
||||
}
|
||||
|
||||
async fn on_segment(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
variant_id: &Uuid,
|
||||
index: u64,
|
||||
duration: f32,
|
||||
path: &PathBuf,
|
||||
) -> Result<()> {
|
||||
let cost = self.cost * duration.round() as i64;
|
||||
let stream = self.db.get_stream(pipeline_id).await?;
|
||||
let bal = self
|
||||
.db
|
||||
.tick_stream(pipeline_id, stream.user_id, duration, cost)
|
||||
.await?;
|
||||
if bal <= 0 {
|
||||
bail!("Not enough balance");
|
||||
}
|
||||
|
||||
// Upload to blossom servers if configured
|
||||
let mut blobs = vec![];
|
||||
for b in &self.blossom_servers {
|
||||
blobs.push(b.upload(path, &self.keys, Some("video/mp2t")).await?);
|
||||
}
|
||||
if let Some(blob) = blobs.first() {
|
||||
let a_tag = format!(
|
||||
"{}:{}:{}",
|
||||
STREAM_EVENT_KIND,
|
||||
self.keys.public_key.to_hex(),
|
||||
pipeline_id
|
||||
);
|
||||
let mut n94 = self.blob_to_event_builder(blob)?.add_tags([
|
||||
Tag::parse(["a", &a_tag])?,
|
||||
Tag::parse(["d", variant_id.to_string().as_str()])?,
|
||||
Tag::parse(["duration", duration.to_string().as_str()])?,
|
||||
]);
|
||||
for b in blobs.iter().skip(1) {
|
||||
n94 = n94.tag(Tag::parse(["url", &b.url])?);
|
||||
}
|
||||
let n94 = n94.sign_with_keys(&self.keys)?;
|
||||
let cc = self.client.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = cc.send_event(n94).await {
|
||||
warn!("Error sending event: {}", e);
|
||||
}
|
||||
});
|
||||
info!("Published N94 segment to {}", blob.url);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_thumbnail(
|
||||
&self,
|
||||
pipeline_id: &Uuid,
|
||||
width: usize,
|
||||
height: usize,
|
||||
pixels: &PathBuf,
|
||||
) -> Result<()> {
|
||||
// nothing to do
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn on_end(&self, pipeline_id: &Uuid) -> Result<()> {
|
||||
let mut stream = self.db.get_stream(pipeline_id).await?;
|
||||
let user = self.db.get_user(stream.user_id).await?;
|
||||
|
||||
let mut streams = self.active_streams.write().await;
|
||||
streams.remove(pipeline_id);
|
||||
|
||||
stream.state = UserStreamState::Ended;
|
||||
let event = self.publish_stream_event(&stream, &user.pubkey).await?;
|
||||
stream.event = Some(event.as_json());
|
||||
self.db.update_stream(&stream).await?;
|
||||
|
||||
info!("Stream ended {}", stream.id);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn get_default_variants(info: &IngressInfo) -> Result<Vec<VariantStream>> {
|
||||
let mut vars: Vec<VariantStream> = vec![];
|
||||
if let Some(video_src) = info
|
||||
.streams
|
||||
.iter()
|
||||
.find(|c| c.stream_type == IngressStreamType::Video)
|
||||
{
|
||||
vars.push(VariantStream::CopyVideo(VariantMapping {
|
||||
id: Uuid::new_v4(),
|
||||
src_index: video_src.index,
|
||||
dst_index: 0,
|
||||
group_id: 0,
|
||||
}));
|
||||
vars.push(VariantStream::Video(VideoVariant {
|
||||
mapping: VariantMapping {
|
||||
id: Uuid::new_v4(),
|
||||
src_index: video_src.index,
|
||||
dst_index: 1,
|
||||
group_id: 1,
|
||||
},
|
||||
width: 1280,
|
||||
height: 720,
|
||||
fps: video_src.fps,
|
||||
bitrate: 3_000_000,
|
||||
codec: "libx264".to_string(),
|
||||
profile: 100,
|
||||
level: 51,
|
||||
keyframe_interval: video_src.fps as u16 * 2,
|
||||
pixel_format: AV_PIX_FMT_YUV420P as u32,
|
||||
}));
|
||||
}
|
||||
|
||||
if let Some(audio_src) = info
|
||||
.streams
|
||||
.iter()
|
||||
.find(|c| c.stream_type == IngressStreamType::Audio)
|
||||
{
|
||||
vars.push(VariantStream::CopyAudio(VariantMapping {
|
||||
id: Uuid::new_v4(),
|
||||
src_index: audio_src.index,
|
||||
dst_index: 2,
|
||||
group_id: 0,
|
||||
}));
|
||||
vars.push(VariantStream::Audio(AudioVariant {
|
||||
mapping: VariantMapping {
|
||||
id: Uuid::new_v4(),
|
||||
src_index: audio_src.index,
|
||||
dst_index: 3,
|
||||
group_id: 1,
|
||||
},
|
||||
bitrate: 192_000,
|
||||
codec: "aac".to_string(),
|
||||
channels: 2,
|
||||
sample_rate: 48_000,
|
||||
sample_fmt: "fltp".to_owned(),
|
||||
}));
|
||||
}
|
||||
|
||||
Ok(vars)
|
||||
}
|
90
crates/zap-stream/src/settings.rs
Normal file
90
crates/zap-stream/src/settings.rs
Normal file
@ -0,0 +1,90 @@
|
||||
use crate::overseer::ZapStreamOverseer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use zap_stream_core::overseer::Overseer;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Settings {
|
||||
/// List of listen endpoints
|
||||
///
|
||||
/// - srt://localhost:3333
|
||||
/// - tcp://localhost:3334
|
||||
/// - rtmp://localhost:1935
|
||||
pub endpoints: Vec<String>,
|
||||
|
||||
/// Where to store output (static files)
|
||||
pub output_dir: String,
|
||||
|
||||
/// Public facing URL that maps to [output_dir]
|
||||
pub public_url: String,
|
||||
|
||||
/// Binding address for http server serving files from [output_dir]
|
||||
pub listen_http: String,
|
||||
|
||||
/// Overseer service see [crate::overseer::Overseer] for more info
|
||||
pub overseer: OverseerConfig,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum OverseerConfig {
|
||||
/// Static output
|
||||
Local,
|
||||
/// Control system via external API
|
||||
Webhook {
|
||||
/// Webhook service URL
|
||||
url: String,
|
||||
},
|
||||
/// NIP-53 service (i.e. zap.stream backend)
|
||||
ZapStream {
|
||||
/// MYSQL database connection string
|
||||
database: String,
|
||||
/// LND node connection details
|
||||
lnd: LndSettings,
|
||||
/// Relays to publish events to
|
||||
relays: Vec<String>,
|
||||
/// Nsec to sign nostr events
|
||||
nsec: String,
|
||||
/// Blossom servers
|
||||
blossom: Option<Vec<String>>,
|
||||
/// Cost (milli-sats) / second / variant
|
||||
cost: i64,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct LndSettings {
|
||||
pub address: String,
|
||||
pub cert: String,
|
||||
pub macaroon: String,
|
||||
}
|
||||
|
||||
impl Settings {
|
||||
pub async fn get_overseer(&self) -> anyhow::Result<Arc<ZapStreamOverseer>> {
|
||||
match &self.overseer {
|
||||
OverseerConfig::ZapStream {
|
||||
nsec: private_key,
|
||||
database,
|
||||
lnd,
|
||||
relays,
|
||||
blossom,
|
||||
cost,
|
||||
} => Ok(Arc::new(
|
||||
ZapStreamOverseer::new(
|
||||
&self.output_dir,
|
||||
&self.public_url,
|
||||
private_key,
|
||||
database,
|
||||
lnd,
|
||||
relays,
|
||||
blossom,
|
||||
*cost,
|
||||
)
|
||||
.await?,
|
||||
)),
|
||||
_ => {
|
||||
panic!("Unsupported overseer");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
6
crates/zap-stream/test.sh
Executable file
6
crates/zap-stream/test.sh
Executable file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
ffmpeg \
|
||||
-f lavfi -i "sine=frequency=1000:sample_rate=48000" \
|
||||
-re -f lavfi -i testsrc -g 300 -r 60 -pix_fmt yuv420p -s 1280x720 \
|
||||
-c:v h264 -b:v 2000k -c:a aac -ac 2 -b:a 192k -fflags nobuffer -f mpegts srt://localhost:3333
|
1
crates/zap-stream/zap.stream.svg
Normal file
1
crates/zap-stream/zap.stream.svg
Normal file
File diff suppressed because one or more lines are too long
After Width: | Height: | Size: 118 KiB |
Reference in New Issue
Block a user