refactor: convert to workspace

This commit is contained in:
2025-01-29 11:48:57 +00:00
parent 20c9d107b7
commit 9045bb93e4
56 changed files with 6215 additions and 1123 deletions

View File

@ -0,0 +1,3 @@
target/
.git/
out/

7
crates/zap-stream/Cargo.lock generated Normal file
View File

@ -0,0 +1,7 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 4
[[package]]
name = "zap-stream"
version = "0.1.0"

View File

@ -0,0 +1,43 @@
[package]
name = "zap-stream"
version = "0.1.0"
edition = "2021"
[features]
default = ["srt", "rtmp"]
srt = ["zap-stream-core/srt"]
rtmp = ["zap-stream-core/rtmp"]
test-pattern = ["zap-stream-core/test-pattern", "zap-stream-db/test-pattern"]
[dependencies]
zap-stream-db = { path = "../zap-stream-db" }
zap-stream-core = { path = "../core" }
uuid.workspace = true
ffmpeg-rs-raw.workspace = true
anyhow.workspace = true
log.workspace = true
tokio.workspace = true
async-trait.workspace = true
serde.workspace = true
chrono.workspace = true
hex.workspace = true
url.workspace = true
# http setuff
hyper = { version = "1.5.1", features = ["server"] }
bytes = "1.8.0"
http-body-util = "0.1.2"
tokio-util = "0.7.13"
hyper-util = "0.1.10"
# direct deps
config = { version = "0.15.6", features = ["yaml"] }
nostr-sdk = { version = "0.38.0" }
fedimint-tonic-lnd = { version = "0.2.0", default-features = false, features = ["invoicesrpc", "versionrpc"] }
reqwest = { version = "0.12.9", features = ["stream", "json"] }
base64 = { version = "0.22.1" }
sha2 = { version = "0.10.8" }
pretty_env_logger = "0.5.0"
clap = { version = "4.5.16", features = ["derive"] }
futures-util = "0.3.31"

43
crates/zap-stream/Dockerfile Executable file
View File

@ -0,0 +1,43 @@
ARG IMAGE=rust:bookworm
FROM $IMAGE AS build
WORKDIR /app/src
ENV FFMPEG_DIR=/app/ffmpeg
COPY . .
RUN apt update && \
apt install -y \
build-essential \
libx264-dev \
libx265-dev \
libwebp-dev \
libpng-dev \
nasm \
protobuf-compiler \
libclang-dev && \
rm -rf /var/lib/apt/lists/*
RUN git clone --single-branch --branch release/7.1 https://git.ffmpeg.org/ffmpeg.git && \
cd ffmpeg && \
./configure \
--prefix=$FFMPEG_DIR \
--disable-programs \
--disable-doc \
--disable-network \
--enable-gpl \
--enable-version3 \
--disable-postproc \
--enable-libx264 \
--enable-libx265 \
--enable-libwebp \
--disable-static \
--enable-shared && \
make -j$(nproc) && make install
RUN cargo install --path . --bin zap-stream-core --root /app/build --features zap-stream
FROM $IMAGE AS runner
WORKDIR /app
RUN apt update && \
apt install -y libx264-164 && \
rm -rf /var/lib/apt/lists/*
COPY --from=build /app/build .
COPY --from=build /app/ffmpeg/lib/ /lib
ENTRYPOINT ["/app/bin/zap-stream-core"]

51
crates/zap-stream/config.yaml Executable file
View File

@ -0,0 +1,51 @@
# List of endpoints to listen on
# currently supporting srt/tcp/file/test-pattern
# All the endpoints must be valid URI's
endpoints:
- "rtmp://127.0.0.1:3336"
- "srt://127.0.0.1:3335"
- "tcp://127.0.0.1:3334"
# Output directory for recording / hls
output_dir: "./out"
# Public URL for serving files for [output_dir]
public_url: "http://localhost:8080"
# Bind address for http server serving files from [output_dir]
listen_http: "127.0.0.1:8080"
# Overseer is the main control structure which controls access to the service
#
# ** ONLY 1 OVERSEER CAN BE CONFIGURED AT A TIME **
#
# Supported overseers:
# static:
# egress-types:
# - hls
# - recorder
# webhook:
# url: <endpoint-url>
# zap-stream:
# private-key: "nsec1234"
# relays:
# - "wss://relay.com"
# lnd:
# address: <ip:port>
# cert: <path-to-tls-cert>
# macaroon: <path-to-macaroon>
# database: <database-connection-string>
#
overseer:
zap-stream:
cost: 16
nsec: "nsec1wya428srvpu96n4h78gualaj7wqw4ecgatgja8d5ytdqrxw56r2se440y4"
blossom:
- "http://localhost:8881"
relays:
- "ws://localhost:7766"
database: "mysql://root:root@localhost:3368/zap_stream?max_connections=2"
lnd:
address: "https://127.0.0.1:10001"
cert: "/home/kieran/.polar/networks/1/volumes/lnd/alice/tls.cert"
macaroon: "/home/kieran/.polar/networks/1/volumes/lnd/alice/data/chain/bitcoin/regtest/admin.macaroon"

View File

@ -0,0 +1,138 @@
##
## Default strfry config
##
# Directory that contains the strfry LMDB database (restart required)
db = "./strfry-db/"
dbParams {
# Maximum number of threads/processes that can simultaneously have LMDB transactions open (restart required)
maxreaders = 256
# Size of mmap() to use when loading LMDB (default is 10TB, does *not* correspond to disk-space used) (restart required)
mapsize = 10995116277760
# Disables read-ahead when accessing the LMDB mapping. Reduces IO activity when DB size is larger than RAM. (restart required)
noReadAhead = false
}
events {
# Maximum size of normalised JSON, in bytes
maxEventSize = 65536
# Events newer than this will be rejected
rejectEventsNewerThanSeconds = 900
# Events older than this will be rejected
rejectEventsOlderThanSeconds = 94608000
# Ephemeral events older than this will be rejected
rejectEphemeralEventsOlderThanSeconds = 60
# Ephemeral events will be deleted from the DB when older than this
ephemeralEventsLifetimeSeconds = 300
# Maximum number of tags allowed
maxNumTags = 2000
# Maximum size for tag values, in bytes
maxTagValSize = 1024
}
relay {
# Interface to listen on. Use 0.0.0.0 to listen on all interfaces (restart required)
bind = "0.0.0.0"
# Port to open for the nostr websocket protocol (restart required)
port = 7777
# Set OS-limit on maximum number of open files/sockets (if 0, don't attempt to set) (restart required)
nofiles = 1000000
# HTTP header that contains the client's real IP, before reverse proxying (ie x-real-ip) (MUST be all lower-case)
realIpHeader = ""
info {
# NIP-11: Name of this server. Short/descriptive (< 30 characters)
name = "strfry default"
# NIP-11: Detailed information about relay, free-form
description = "This is a strfry instance."
# NIP-11: Administrative nostr pubkey, for contact purposes
pubkey = ""
# NIP-11: Alternative administrative contact (email, website, etc)
contact = ""
}
# Maximum accepted incoming websocket frame size (should be larger than max event) (restart required)
maxWebsocketPayloadSize = 131072
# Websocket-level PING message frequency (should be less than any reverse proxy idle timeouts) (restart required)
autoPingSeconds = 55
# If TCP keep-alive should be enabled (detect dropped connections to upstream reverse proxy)
enableTcpKeepalive = false
# How much uninterrupted CPU time a REQ query should get during its DB scan
queryTimesliceBudgetMicroseconds = 10000
# Maximum records that can be returned per filter
maxFilterLimit = 500
# Maximum number of subscriptions (concurrent REQs) a connection can have open at any time
maxSubsPerConnection = 20
writePolicy {
# If non-empty, path to an executable script that implements the writePolicy plugin logic
plugin = "/app/write-policy.py"
}
compression {
# Use permessage-deflate compression if supported by client. Reduces bandwidth, but slight increase in CPU (restart required)
enabled = true
# Maintain a sliding window buffer for each connection. Improves compression, but uses more memory (restart required)
slidingWindow = true
}
logging {
# Dump all incoming messages
dumpInAll = false
# Dump all incoming EVENT messages
dumpInEvents = false
# Dump all incoming REQ/CLOSE messages
dumpInReqs = false
# Log performance metrics for initial REQ database scans
dbScanPerf = false
# Log reason for invalid event rejection? Can be disabled to silence excessive logging
invalidEvents = true
}
numThreads {
# Ingester threads: route incoming requests, validate events/sigs (restart required)
ingester = 3
# reqWorker threads: Handle initial DB scan for events (restart required)
reqWorker = 3
# reqMonitor threads: Handle filtering of new events (restart required)
reqMonitor = 3
# negentropy threads: Handle negentropy protocol messages (restart required)
negentropy = 2
}
negentropy {
# Support negentropy protocol messages
enabled = true
# Maximum records that sync will process before returning an error
maxSyncEvents = 1000000
}
}

View File

@ -0,0 +1,32 @@
name: zap-stream-core
services:
db:
image: mariadb
environment:
- "MARIADB_ROOT_PASSWORD=root"
ports:
- "3368:3306"
volumes:
- "./dev-setup/db.sql:/docker-entrypoint-initdb.d/00-init.sql"
relay:
image: dockurr/strfry
ports:
- "7766:7777"
volumes:
- "relay:/app/strfry-db"
- "./dev-setup/strfry.conf:/etc/strfry.conf"
blossom:
depends_on:
- db
image: voidic/route96
environment:
- "RUST_LOG=info"
ports:
- "8881:8000"
volumes:
- "blossom:/app/data"
- "./dev-setup/route96.toml:/app/config.toml"
volumes:
db:
blossom:
relay:

View File

@ -0,0 +1,17 @@
<!DOCTYPE html>
<html lang="en">
<head>
<title>zap-stream-core</title>
<style>
html, body {
margin: 0;
background: black;
color: white;
font-family: monospace;
}
</style>
</head>
<body>
<h1>Welcome to %%PUBLIC_URL%%</h1>
</body>
</html>

View File

@ -0,0 +1,90 @@
use anyhow::Result;
use base64::Engine;
use nostr_sdk::{EventBuilder, JsonUtil, Keys, Kind, Tag, Timestamp};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::io::SeekFrom;
use std::ops::Add;
use std::path::PathBuf;
use tokio::fs::File;
use tokio::io::{AsyncReadExt, AsyncSeekExt};
use url::Url;
pub struct Blossom {
url: Url,
client: reqwest::Client,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct BlobDescriptor {
pub url: String,
pub sha256: String,
pub size: u64,
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub mime_type: Option<String>,
pub created: u64,
#[serde(rename = "nip94", skip_serializing_if = "Option::is_none")]
pub nip94: Option<HashMap<String, String>>,
}
impl Blossom {
pub fn new(url: &str) -> Self {
Self {
url: url.parse().unwrap(),
client: reqwest::Client::new(),
}
}
async fn hash_file(f: &mut File) -> Result<String> {
let mut hash = Sha256::new();
let mut buf: [u8; 1024] = [0; 1024];
f.seek(SeekFrom::Start(0)).await?;
while let Ok(data) = f.read(&mut buf[..]).await {
if data == 0 {
break;
}
hash.update(&buf[..data]);
}
let hash = hash.finalize();
f.seek(SeekFrom::Start(0)).await?;
Ok(hex::encode(hash))
}
pub async fn upload(
&self,
from_file: &PathBuf,
keys: &Keys,
mime: Option<&str>,
) -> Result<BlobDescriptor> {
let mut f = File::open(from_file).await?;
let hash = Self::hash_file(&mut f).await?;
let auth_event = EventBuilder::new(Kind::Custom(24242), "Upload blob").tags([
Tag::hashtag("upload"),
Tag::parse(["x", &hash])?,
Tag::expiration(Timestamp::now().add(60)),
]);
let auth_event = auth_event.sign_with_keys(keys)?;
let rsp: BlobDescriptor = self
.client
.put(self.url.join("/upload").unwrap())
.header("Content-Type", mime.unwrap_or("application/octet-stream"))
.header(
"Authorization",
&format!(
"Nostr {}",
base64::engine::general_purpose::STANDARD
.encode(auth_event.as_json().as_bytes())
),
)
.body(f)
.send()
.await?
.json()
.await?;
Ok(rsp)
}
}

View File

@ -0,0 +1,95 @@
use bytes::Bytes;
use futures_util::TryStreamExt;
use http_body_util::combinators::BoxBody;
use http_body_util::{BodyExt, Full, StreamBody};
use hyper::body::{Frame, Incoming};
use hyper::service::Service;
use hyper::{Method, Request, Response};
use log::error;
use std::future::Future;
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Arc;
use tokio::fs::File;
use tokio_util::io::ReaderStream;
use zap_stream_core::overseer::Overseer;
use crate::overseer::ZapStreamOverseer;
#[derive(Clone)]
pub struct HttpServer {
index: String,
files_dir: PathBuf,
overseer: Arc<ZapStreamOverseer>,
}
impl HttpServer {
pub fn new(index: String, files_dir: PathBuf, overseer: Arc<ZapStreamOverseer>) -> Self {
Self {
index,
files_dir,
overseer,
}
}
}
impl Service<Request<Incoming>> for HttpServer {
type Response = Response<BoxBody<Bytes, Self::Error>>;
type Error = anyhow::Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn call(&self, req: Request<Incoming>) -> Self::Future {
// check is index.html
if req.method() == Method::GET && req.uri().path() == "/"
|| req.uri().path() == "/index.html"
{
let index = self.index.clone();
return Box::pin(async move {
Ok(Response::builder()
.header("content-type", "text/html")
.header("server", "zap-stream-core")
.body(
Full::new(Bytes::from(index))
.map_err(|e| match e {})
.boxed(),
)?)
});
}
// check if mapped to file
let mut dst_path = self.files_dir.join(req.uri().path()[1..].to_string());
if dst_path.exists() {
return Box::pin(async move {
let mut rsp = Response::builder()
.header("server", "zap-stream-core")
.header("access-control-allow-origin", "*")
.header("access-control-allow-headers", "*")
.header("access-control-allow-methods", "HEAD, GET");
if req.method() == Method::HEAD {
return Ok(rsp.body(BoxBody::default())?);
}
let f = File::open(&dst_path).await?;
let f_stream = ReaderStream::new(f);
let body = StreamBody::new(
f_stream
.map_ok(Frame::data)
.map_err(|e| Self::Error::new(e)),
)
.boxed();
Ok(rsp.body(body)?)
});
}
// otherwise handle in overseer
let overseer = self.overseer.clone();
Box::pin(async move {
match overseer.api(req).await {
Ok(res) => Ok(res),
Err(e) => {
error!("{}", e);
Ok(Response::builder().status(500).body(BoxBody::default())?)
}
}
})
}
}

View File

@ -0,0 +1,148 @@
use anyhow::{bail, Result};
use clap::Parser;
use config::Config;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{av_log_set_callback, av_version_info};
use ffmpeg_rs_raw::{av_log_redirect, rstr};
use hyper::server::conn::http1;
use hyper_util::rt::TokioIo;
use log::{error, info};
use std::net::SocketAddr;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use tokio::net::TcpListener;
use tokio::task::JoinHandle;
use tokio::time::sleep;
use url::Url;
#[cfg(feature = "rtmp")]
use zap_stream_core::ingress::rtmp;
#[cfg(feature = "srt")]
use zap_stream_core::ingress::srt;
#[cfg(feature = "test-pattern")]
use zap_stream_core::ingress::test;
use zap_stream_core::ingress::{file, tcp};
use zap_stream_core::overseer::Overseer;
use crate::http::HttpServer;
use crate::monitor::BackgroundMonitor;
use crate::overseer::ZapStreamOverseer;
use crate::settings::Settings;
mod blossom;
mod http;
mod monitor;
mod overseer;
mod settings;
#[derive(Parser, Debug)]
struct Args {}
#[tokio::main]
async fn main() -> Result<()> {
pretty_env_logger::init();
let _args = Args::parse();
unsafe {
av_log_set_callback(Some(av_log_redirect));
info!("FFMPEG version={}", rstr!(av_version_info()));
}
let builder = Config::builder()
.add_source(config::File::with_name("config.yaml"))
.add_source(config::Environment::with_prefix("APP"))
.build()?;
let settings: Settings = builder.try_deserialize()?;
let overseer = settings.get_overseer().await?;
let mut tasks = vec![];
for e in &settings.endpoints {
match try_create_listener(e, &settings.output_dir, &overseer) {
Ok(l) => tasks.push(l),
Err(e) => error!("{}", e),
}
}
let http_addr: SocketAddr = settings.listen_http.parse()?;
let index_html = include_str!("../index.html").replace("%%PUBLIC_URL%%", &settings.public_url);
let server = HttpServer::new(
index_html,
PathBuf::from(settings.output_dir),
overseer.clone(),
);
tasks.push(tokio::spawn(async move {
let listener = TcpListener::bind(&http_addr).await?;
loop {
let (socket, _) = listener.accept().await?;
let io = TokioIo::new(socket);
let server = server.clone();
tokio::spawn(async move {
if let Err(e) = http1::Builder::new().serve_connection(io, server).await {
error!("Failed to handle request: {}", e);
}
});
}
}));
// spawn background job
let mut bg = BackgroundMonitor::new(overseer.clone());
tasks.push(tokio::spawn(async move {
loop {
if let Err(e) = bg.check().await {
error!("{}", e);
}
sleep(Duration::from_secs(10)).await;
}
}));
for handle in tasks {
if let Err(e) = handle.await? {
error!("{e}");
}
}
info!("Server closed");
Ok(())
}
fn try_create_listener(
u: &str,
out_dir: &str,
overseer: &Arc<ZapStreamOverseer>,
) -> Result<JoinHandle<Result<()>>> {
let url: Url = u.parse()?;
match url.scheme() {
#[cfg(feature = "srt")]
"srt" => Ok(tokio::spawn(srt::listen(
out_dir.to_string(),
format!("{}:{}", url.host().unwrap(), url.port().unwrap()),
overseer.clone(),
))),
#[cfg(feature = "rtmp")]
"rtmp" => Ok(tokio::spawn(rtmp::listen(
out_dir.to_string(),
format!("{}:{}", url.host().unwrap(), url.port().unwrap()),
overseer.clone(),
))),
"tcp" => Ok(tokio::spawn(tcp::listen(
out_dir.to_string(),
format!("{}:{}", url.host().unwrap(), url.port().unwrap()),
overseer.clone(),
))),
"file" => Ok(tokio::spawn(file::listen(
out_dir.to_string(),
PathBuf::from(url.path()),
overseer.clone(),
))),
#[cfg(feature = "test-pattern")]
"test-pattern" => Ok(tokio::spawn(test::listen(
out_dir.to_string(),
overseer.clone(),
))),
_ => {
bail!("Unknown endpoint config: {u}");
}
}
}

View File

@ -0,0 +1,19 @@
use crate::overseer::ZapStreamOverseer;
use anyhow::Result;
use std::sync::Arc;
use zap_stream_core::overseer::Overseer;
/// Monitor stream status, perform any necessary cleanup
pub struct BackgroundMonitor {
overseer: Arc<ZapStreamOverseer>,
}
impl BackgroundMonitor {
pub fn new(overseer: Arc<ZapStreamOverseer>) -> Self {
Self { overseer }
}
pub async fn check(&mut self) -> Result<()> {
self.overseer.check_streams().await
}
}

View File

@ -0,0 +1,522 @@
use crate::blossom::{BlobDescriptor, Blossom};
use zap_stream_core::egress::hls::HlsEgress;
use zap_stream_core::egress::EgressConfig;
use zap_stream_core::ingress::ConnectionInfo;
use zap_stream_core::overseer::{IngressInfo, IngressStreamType, Overseer};
use zap_stream_core::pipeline::{EgressType, PipelineConfig};
use zap_stream_core::variant::{StreamMapping, VariantStream};
use anyhow::{anyhow, bail, Result};
use async_trait::async_trait;
use base64::alphabet::STANDARD;
use base64::Engine;
use bytes::Bytes;
use chrono::Utc;
use fedimint_tonic_lnd::verrpc::VersionRequest;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_MJPEG;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVFrame;
use ffmpeg_rs_raw::Encoder;
use futures_util::FutureExt;
use http_body_util::combinators::BoxBody;
use http_body_util::{BodyExt, Full};
use hyper::body::Incoming;
use hyper::{Method, Request, Response};
use log::{error, info, warn};
use nostr_sdk::bitcoin::PrivateKey;
use nostr_sdk::prelude::Coordinate;
use nostr_sdk::{Client, Event, EventBuilder, JsonUtil, Keys, Kind, Tag, ToBech32};
use serde::Serialize;
use std::collections::HashSet;
use std::env::temp_dir;
use std::fs::create_dir_all;
use std::path::PathBuf;
use std::str::FromStr;
use std::sync::Arc;
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
use tokio::sync::RwLock;
use url::Url;
use uuid::Uuid;
use zap_stream_core::variant::audio::AudioVariant;
use zap_stream_core::variant::mapping::VariantMapping;
use zap_stream_core::variant::video::VideoVariant;
use zap_stream_db::sqlx::Encode;
use zap_stream_db::{UserStream, UserStreamState, ZapStreamDb};
use crate::settings::LndSettings;
const STREAM_EVENT_KIND: u16 = 30_313;
/// zap.stream NIP-53 overseer
pub struct ZapStreamOverseer {
/// Dir where HTTP server serves files from
out_dir: String,
/// Database instance for accounts/streams
db: ZapStreamDb,
/// LND node connection
lnd: fedimint_tonic_lnd::Client,
/// Nostr client for publishing events
client: Client,
/// Nostr keys used to sign events
keys: Keys,
/// List of blossom servers to upload segments to
blossom_servers: Vec<Blossom>,
/// Public facing URL pointing to [out_dir]
public_url: String,
/// Cost / second / variant
cost: i64,
/// Currently active streams
/// Any streams which are not contained in this set are dead
active_streams: Arc<RwLock<HashSet<Uuid>>>,
}
impl ZapStreamOverseer {
pub async fn new(
out_dir: &String,
public_url: &String,
private_key: &str,
db: &str,
lnd: &LndSettings,
relays: &Vec<String>,
blossom_servers: &Option<Vec<String>>,
cost: i64,
) -> Result<Self> {
let db = ZapStreamDb::new(db).await?;
db.migrate().await?;
#[cfg(debug_assertions)]
{
let uid = db.upsert_user(&[0; 32]).await?;
db.update_user_balance(uid, 100_000_000).await?;
let user = db.get_user(uid).await?;
info!(
"ZERO pubkey: uid={},key={},balance={}",
user.id,
user.stream_key,
user.balance / 1000
);
}
let mut lnd = fedimint_tonic_lnd::connect(
lnd.address.clone(),
PathBuf::from(&lnd.cert),
PathBuf::from(&lnd.macaroon),
)
.await?;
let version = lnd
.versioner()
.get_version(VersionRequest::default())
.await?;
info!("LND connected: v{}", version.into_inner().version);
let keys = Keys::from_str(private_key)?;
let client = nostr_sdk::ClientBuilder::new().signer(keys.clone()).build();
for r in relays {
client.add_relay(r).await?;
}
client.connect().await;
Ok(Self {
out_dir: out_dir.clone(),
db,
lnd,
client,
keys,
blossom_servers: blossom_servers
.as_ref()
.unwrap_or(&Vec::new())
.into_iter()
.map(|b| Blossom::new(b))
.collect(),
public_url: public_url.clone(),
cost,
active_streams: Arc::new(RwLock::new(HashSet::new())),
})
}
pub(crate) async fn api(&self, req: Request<Incoming>) -> Result<Response<BoxBody<Bytes, anyhow::Error>>> {
let base = Response::builder()
.header("server", "zap-stream-core")
.header("access-control-allow-origin", "*")
.header("access-control-allow-headers", "*")
.header("access-control-allow-methods", "HEAD, GET");
Ok(match (req.method(), req.uri().path()) {
(&Method::GET, "/api/v1/account") => {
self.check_nip98_auth(req)?;
base.body(Default::default())?
}
(&Method::PATCH, "/api/v1/account") => {
bail!("Not implemented")
}
(&Method::GET, "/api/v1/topup") => {
bail!("Not implemented")
}
(&Method::PATCH, "/api/v1/event") => {
bail!("Not implemented")
}
(&Method::POST, "/api/v1/withdraw") => {
bail!("Not implemented")
}
(&Method::POST, "/api/v1/account/forward") => {
bail!("Not implemented")
}
(&Method::DELETE, "/api/v1/account/forward/<id>") => {
bail!("Not implemented")
}
(&Method::GET, "/api/v1/account/history") => {
bail!("Not implemented")
}
(&Method::GET, "/api/v1/account/keys") => {
bail!("Not implemented")
}
_ => {
if req.method() == Method::OPTIONS {
base.body(Default::default())?
} else {
base.status(404).body(Default::default())?
}
}
})
}
fn stream_to_event_builder(&self, stream: &UserStream) -> Result<EventBuilder> {
let mut tags = vec![
Tag::parse(&["d".to_string(), stream.id.to_string()])?,
Tag::parse(&["status".to_string(), stream.state.to_string()])?,
Tag::parse(&["starts".to_string(), stream.starts.timestamp().to_string()])?,
];
if let Some(ref ends) = stream.ends {
tags.push(Tag::parse(&[
"ends".to_string(),
ends.timestamp().to_string(),
])?);
}
if let Some(ref title) = stream.title {
tags.push(Tag::parse(&["title".to_string(), title.to_string()])?);
}
if let Some(ref summary) = stream.summary {
tags.push(Tag::parse(&["summary".to_string(), summary.to_string()])?);
}
if let Some(ref image) = stream.image {
tags.push(Tag::parse(&["image".to_string(), image.to_string()])?);
}
if let Some(ref thumb) = stream.thumb {
tags.push(Tag::parse(&["thumb".to_string(), thumb.to_string()])?);
}
if let Some(ref content_warning) = stream.content_warning {
tags.push(Tag::parse(&[
"content_warning".to_string(),
content_warning.to_string(),
])?);
}
if let Some(ref goal) = stream.goal {
tags.push(Tag::parse(&["goal".to_string(), goal.to_string()])?);
}
if let Some(ref pinned) = stream.pinned {
tags.push(Tag::parse(&["pinned".to_string(), pinned.to_string()])?);
}
if let Some(ref tags_csv) = stream.tags {
for tag in tags_csv.split(',') {
tags.push(Tag::parse(&["t".to_string(), tag.to_string()])?);
}
}
let kind = Kind::from(STREAM_EVENT_KIND);
let coord = Coordinate::new(kind, self.keys.public_key).identifier(&stream.id);
tags.push(Tag::parse([
"alt",
&format!("Watch live on https://zap.stream/{}", coord.to_bech32()?),
])?);
Ok(EventBuilder::new(kind, "").tags(tags))
}
fn blob_to_event_builder(&self, stream: &BlobDescriptor) -> Result<EventBuilder> {
let tags = if let Some(tags) = stream.nip94.as_ref() {
tags.iter()
.map_while(|(k, v)| Tag::parse([k, v]).ok())
.collect()
} else {
let mut tags = vec![
Tag::parse(["x", &stream.sha256])?,
Tag::parse(["url", &stream.url])?,
Tag::parse(["size", &stream.size.to_string()])?,
];
if let Some(m) = stream.mime_type.as_ref() {
tags.push(Tag::parse(["m", m])?)
}
tags
};
Ok(EventBuilder::new(Kind::FileMetadata, "").tags(tags))
}
async fn publish_stream_event(&self, stream: &UserStream, pubkey: &Vec<u8>) -> Result<Event> {
let extra_tags = vec![
Tag::parse(["p", hex::encode(pubkey).as_str(), "", "host"])?,
Tag::parse([
"streaming",
self.map_to_stream_public_url(stream, "live.m3u8")?.as_str(),
])?,
Tag::parse([
"image",
self.map_to_stream_public_url(stream, "thumb.webp")?
.as_str(),
])?,
Tag::parse(["service", self.map_to_public_url("api/v1")?.as_str()])?,
];
let ev = self
.stream_to_event_builder(stream)?
.tags(extra_tags)
.sign_with_keys(&self.keys)?;
self.client.send_event(ev.clone()).await?;
Ok(ev)
}
fn map_to_stream_public_url(&self, stream: &UserStream, path: &str) -> Result<String> {
self.map_to_public_url(&format!("{}/{}", stream.id, path))
}
fn map_to_public_url(&self, path: &str) -> Result<String> {
let u: Url = self.public_url.parse()?;
Ok(u.join(path)?.to_string())
}
fn check_nip98_auth(&self, req: Request<Incoming>) -> Result<()> {
let auth = if let Some(a) = req.headers().get("authorization") {
a.to_str()?
} else {
bail!("Authorization header missing");
};
if !auth.starts_with("Nostr ") {
bail!("Invalid authorization scheme");
}
let json = String::from_utf8(
base64::engine::general_purpose::STANDARD.decode(auth[6..].as_bytes())?,
)?;
info!("{}", json);
Ok(())
}
}
#[derive(Serialize)]
struct Endpoint {}
#[derive(Serialize)]
struct AccountInfo {
pub endpoints: Vec<Endpoint>,
pub event: Event,
pub balance: u64,
}
#[async_trait]
impl Overseer for ZapStreamOverseer {
async fn check_streams(&self) -> Result<()> {
let active_streams = self.db.list_live_streams().await?;
for stream in active_streams {
// check
let id = Uuid::parse_str(&stream.id)?;
info!("Checking stream is alive: {}", stream.id);
let is_active = {
let streams = self.active_streams.read().await;
streams.contains(&id)
};
if !is_active {
if let Err(e) = self.on_end(&id).await {
error!("Failed to end dead stream {}: {}", &id, e);
}
}
}
Ok(())
}
async fn start_stream(
&self,
connection: &ConnectionInfo,
stream_info: &IngressInfo,
) -> Result<PipelineConfig> {
let uid = self
.db
.find_user_stream_key(&connection.key)
.await?
.ok_or_else(|| anyhow::anyhow!("User not found"))?;
let user = self.db.get_user(uid).await?;
if user.balance <= 0 {
bail!("Not enough balance");
}
let variants = get_default_variants(&stream_info)?;
let mut egress = vec![];
egress.push(EgressType::HLS(EgressConfig {
name: "hls".to_string(),
variants: variants.iter().map(|v| v.id()).collect(),
}));
let stream_id = Uuid::new_v4();
// insert new stream record
let mut new_stream = UserStream {
id: stream_id.to_string(),
user_id: uid,
starts: Utc::now(),
state: UserStreamState::Live,
..Default::default()
};
let stream_event = self.publish_stream_event(&new_stream, &user.pubkey).await?;
new_stream.event = Some(stream_event.as_json());
let mut streams = self.active_streams.write().await;
streams.insert(stream_id.clone());
self.db.insert_stream(&new_stream).await?;
self.db.update_stream(&new_stream).await?;
Ok(PipelineConfig {
id: stream_id,
variants,
egress,
})
}
async fn on_segment(
&self,
pipeline_id: &Uuid,
variant_id: &Uuid,
index: u64,
duration: f32,
path: &PathBuf,
) -> Result<()> {
let cost = self.cost * duration.round() as i64;
let stream = self.db.get_stream(pipeline_id).await?;
let bal = self
.db
.tick_stream(pipeline_id, stream.user_id, duration, cost)
.await?;
if bal <= 0 {
bail!("Not enough balance");
}
// Upload to blossom servers if configured
let mut blobs = vec![];
for b in &self.blossom_servers {
blobs.push(b.upload(path, &self.keys, Some("video/mp2t")).await?);
}
if let Some(blob) = blobs.first() {
let a_tag = format!(
"{}:{}:{}",
STREAM_EVENT_KIND,
self.keys.public_key.to_hex(),
pipeline_id
);
let mut n94 = self.blob_to_event_builder(blob)?.add_tags([
Tag::parse(["a", &a_tag])?,
Tag::parse(["d", variant_id.to_string().as_str()])?,
Tag::parse(["duration", duration.to_string().as_str()])?,
]);
for b in blobs.iter().skip(1) {
n94 = n94.tag(Tag::parse(["url", &b.url])?);
}
let n94 = n94.sign_with_keys(&self.keys)?;
let cc = self.client.clone();
tokio::spawn(async move {
if let Err(e) = cc.send_event(n94).await {
warn!("Error sending event: {}", e);
}
});
info!("Published N94 segment to {}", blob.url);
}
Ok(())
}
async fn on_thumbnail(
&self,
pipeline_id: &Uuid,
width: usize,
height: usize,
pixels: &PathBuf,
) -> Result<()> {
// nothing to do
Ok(())
}
async fn on_end(&self, pipeline_id: &Uuid) -> Result<()> {
let mut stream = self.db.get_stream(pipeline_id).await?;
let user = self.db.get_user(stream.user_id).await?;
let mut streams = self.active_streams.write().await;
streams.remove(pipeline_id);
stream.state = UserStreamState::Ended;
let event = self.publish_stream_event(&stream, &user.pubkey).await?;
stream.event = Some(event.as_json());
self.db.update_stream(&stream).await?;
info!("Stream ended {}", stream.id);
Ok(())
}
}
fn get_default_variants(info: &IngressInfo) -> Result<Vec<VariantStream>> {
let mut vars: Vec<VariantStream> = vec![];
if let Some(video_src) = info
.streams
.iter()
.find(|c| c.stream_type == IngressStreamType::Video)
{
vars.push(VariantStream::CopyVideo(VariantMapping {
id: Uuid::new_v4(),
src_index: video_src.index,
dst_index: 0,
group_id: 0,
}));
vars.push(VariantStream::Video(VideoVariant {
mapping: VariantMapping {
id: Uuid::new_v4(),
src_index: video_src.index,
dst_index: 1,
group_id: 1,
},
width: 1280,
height: 720,
fps: video_src.fps,
bitrate: 3_000_000,
codec: "libx264".to_string(),
profile: 100,
level: 51,
keyframe_interval: video_src.fps as u16 * 2,
pixel_format: AV_PIX_FMT_YUV420P as u32,
}));
}
if let Some(audio_src) = info
.streams
.iter()
.find(|c| c.stream_type == IngressStreamType::Audio)
{
vars.push(VariantStream::CopyAudio(VariantMapping {
id: Uuid::new_v4(),
src_index: audio_src.index,
dst_index: 2,
group_id: 0,
}));
vars.push(VariantStream::Audio(AudioVariant {
mapping: VariantMapping {
id: Uuid::new_v4(),
src_index: audio_src.index,
dst_index: 3,
group_id: 1,
},
bitrate: 192_000,
codec: "aac".to_string(),
channels: 2,
sample_rate: 48_000,
sample_fmt: "fltp".to_owned(),
}));
}
Ok(vars)
}

View File

@ -0,0 +1,90 @@
use crate::overseer::ZapStreamOverseer;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use zap_stream_core::overseer::Overseer;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Settings {
/// List of listen endpoints
///
/// - srt://localhost:3333
/// - tcp://localhost:3334
/// - rtmp://localhost:1935
pub endpoints: Vec<String>,
/// Where to store output (static files)
pub output_dir: String,
/// Public facing URL that maps to [output_dir]
pub public_url: String,
/// Binding address for http server serving files from [output_dir]
pub listen_http: String,
/// Overseer service see [crate::overseer::Overseer] for more info
pub overseer: OverseerConfig,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum OverseerConfig {
/// Static output
Local,
/// Control system via external API
Webhook {
/// Webhook service URL
url: String,
},
/// NIP-53 service (i.e. zap.stream backend)
ZapStream {
/// MYSQL database connection string
database: String,
/// LND node connection details
lnd: LndSettings,
/// Relays to publish events to
relays: Vec<String>,
/// Nsec to sign nostr events
nsec: String,
/// Blossom servers
blossom: Option<Vec<String>>,
/// Cost (milli-sats) / second / variant
cost: i64,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LndSettings {
pub address: String,
pub cert: String,
pub macaroon: String,
}
impl Settings {
pub async fn get_overseer(&self) -> anyhow::Result<Arc<ZapStreamOverseer>> {
match &self.overseer {
OverseerConfig::ZapStream {
nsec: private_key,
database,
lnd,
relays,
blossom,
cost,
} => Ok(Arc::new(
ZapStreamOverseer::new(
&self.output_dir,
&self.public_url,
private_key,
database,
lnd,
relays,
blossom,
*cost,
)
.await?,
)),
_ => {
panic!("Unsupported overseer");
}
}
}
}

6
crates/zap-stream/test.sh Executable file
View File

@ -0,0 +1,6 @@
#!/bin/bash
ffmpeg \
-f lavfi -i "sine=frequency=1000:sample_rate=48000" \
-re -f lavfi -i testsrc -g 300 -r 60 -pix_fmt yuv420p -s 1280x720 \
-c:v h264 -b:v 2000k -c:a aac -ac 2 -b:a 192k -fflags nobuffer -f mpegts srt://localhost:3333

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 118 KiB