mirror of
https://github.com/v0l/zap-stream-core.git
synced 2025-06-17 12:08:46 +00:00
refactor: frame gen
This commit is contained in:
115
Cargo.lock
generated
115
Cargo.lock
generated
@ -857,9 +857,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "data-encoding"
|
name = "data-encoding"
|
||||||
version = "2.7.0"
|
version = "2.9.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0e60eed09d8c01d3cee5b7d30acb059b76614c918fa0f992e0dd6eeb10daad6f"
|
checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "data-url"
|
name = "data-url"
|
||||||
@ -964,7 +964,7 @@ checksum = "4cd405aab171cb85d6735e5c8d9db038c17d3ca007a4d2c25f337935c3d90580"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"humantime",
|
"humantime",
|
||||||
"is-terminal",
|
"is-terminal",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"regex",
|
"regex",
|
||||||
"termcolor",
|
"termcolor",
|
||||||
]
|
]
|
||||||
@ -1045,12 +1045,12 @@ dependencies = [
|
|||||||
[[package]]
|
[[package]]
|
||||||
name = "ffmpeg-rs-raw"
|
name = "ffmpeg-rs-raw"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
source = "git+https://git.v0l.io/Kieran/ffmpeg-rs-raw.git?rev=29ab0547478256c574766b4acc6fcda8ebf4cae6#29ab0547478256c574766b4acc6fcda8ebf4cae6"
|
source = "git+https://github.com/v0l/ffmpeg-rs-raw.git?rev=8307b0a225267cefac9c174d5f6a0314a2f0a66b#8307b0a225267cefac9c174d5f6a0314a2f0a66b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"ffmpeg-sys-the-third",
|
"ffmpeg-sys-the-third",
|
||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"slimbox",
|
"slimbox",
|
||||||
]
|
]
|
||||||
|
|
||||||
@ -1123,16 +1123,16 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fontdb"
|
name = "fontdb"
|
||||||
version = "0.22.0"
|
version = "0.23.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a3a6f9af55fb97ad673fb7a69533eb2f967648a06fa21f8c9bb2cd6d33975716"
|
checksum = "457e789b3d1202543297a350643cf459f836cade38934e7a4cf6a39e7cde2905"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"fontconfig-parser",
|
"fontconfig-parser",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"memmap2",
|
"memmap2",
|
||||||
"slotmap",
|
"slotmap",
|
||||||
"tinyvec",
|
"tinyvec",
|
||||||
"ttf-parser 0.24.1",
|
"ttf-parser 0.25.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -1857,9 +1857,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "image-webp"
|
name = "image-webp"
|
||||||
version = "0.1.3"
|
version = "0.2.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f79afb8cbee2ef20f59ccd477a218c12a93943d075b492015ecb1bb81f8ee904"
|
checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"byteorder-lite",
|
"byteorder-lite",
|
||||||
"quick-error",
|
"quick-error",
|
||||||
@ -2069,6 +2069,15 @@ dependencies = [
|
|||||||
"scopeguard",
|
"scopeguard",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "log"
|
||||||
|
version = "0.3.9"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b"
|
||||||
|
dependencies = [
|
||||||
|
"log 0.4.25",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "log"
|
name = "log"
|
||||||
version = "0.4.25"
|
version = "0.4.25"
|
||||||
@ -2161,6 +2170,16 @@ version = "0.10.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
|
checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mustache"
|
||||||
|
version = "0.9.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "51956ef1c5d20a1384524d91e616fb44dfc7d8f249bf696d49c97dd3289ecab5"
|
||||||
|
dependencies = [
|
||||||
|
"log 0.3.9",
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "native-tls"
|
name = "native-tls"
|
||||||
version = "0.2.13"
|
version = "0.2.13"
|
||||||
@ -2168,7 +2187,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c"
|
checksum = "0dab59f8e050d5df8e4dd87d9206fb6f65a483e20ac9fda365ade4fab353196c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"libc",
|
"libc",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"openssl",
|
"openssl",
|
||||||
"openssl-probe",
|
"openssl-probe",
|
||||||
"openssl-sys",
|
"openssl-sys",
|
||||||
@ -2627,7 +2646,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c"
|
checksum = "865724d4dbe39d9f3dd3b52b88d859d66bcb2d6a0acfd5ea68a65fb66d4bdc1c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"env_logger",
|
"env_logger",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -2668,7 +2687,7 @@ dependencies = [
|
|||||||
"bytes",
|
"bytes",
|
||||||
"heck",
|
"heck",
|
||||||
"itertools 0.12.1",
|
"itertools 0.12.1",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"multimap",
|
"multimap",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"petgraph",
|
"petgraph",
|
||||||
@ -2806,7 +2825,7 @@ dependencies = [
|
|||||||
"hyper-util",
|
"hyper-util",
|
||||||
"ipnet",
|
"ipnet",
|
||||||
"js-sys",
|
"js-sys",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"mime",
|
"mime",
|
||||||
"native-tls",
|
"native-tls",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@ -2833,13 +2852,13 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "resvg"
|
name = "resvg"
|
||||||
version = "0.44.0"
|
version = "0.45.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4a325d5e8d1cebddd070b13f44cec8071594ab67d1012797c121f27a669b7958"
|
checksum = "a8928798c0a55e03c9ca6c4c6846f76377427d2c1e1f7e6de3c06ae57942df43"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"gif",
|
"gif",
|
||||||
"image-webp",
|
"image-webp",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"pico-args",
|
"pico-args",
|
||||||
"rgb",
|
"rgb",
|
||||||
"svgtypes",
|
"svgtypes",
|
||||||
@ -2996,7 +3015,7 @@ version = "0.21.12"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
|
checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"ring",
|
"ring",
|
||||||
"rustls-webpki 0.101.7",
|
"rustls-webpki 0.101.7",
|
||||||
"sct",
|
"sct",
|
||||||
@ -3069,16 +3088,16 @@ checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustybuzz"
|
name = "rustybuzz"
|
||||||
version = "0.18.0"
|
version = "0.20.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "c85d1ccd519e61834798eb52c4e886e8c2d7d698dd3d6ce0b1b47eb8557f1181"
|
checksum = "fd3c7c96f8a08ee34eff8857b11b49b07d71d1c3f4e88f8a88d4c9e9f90b1702"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags 2.8.0",
|
"bitflags 2.8.0",
|
||||||
"bytemuck",
|
"bytemuck",
|
||||||
"core_maths",
|
"core_maths",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"ttf-parser 0.24.1",
|
"ttf-parser 0.25.1",
|
||||||
"unicode-bidi-mirroring",
|
"unicode-bidi-mirroring",
|
||||||
"unicode-ccc",
|
"unicode-ccc",
|
||||||
"unicode-properties",
|
"unicode-properties",
|
||||||
@ -3315,7 +3334,7 @@ version = "0.2.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7a9c6883ca9c3c7c90e888de77b7a5c849c779d25d74a1269b0218b14e8b136c"
|
checksum = "7a9c6883ca9c3c7c90e888de77b7a5c849c779d25d74a1269b0218b14e8b136c"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log 0.4.25",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@ -3418,7 +3437,7 @@ dependencies = [
|
|||||||
"hashbrown 0.15.2",
|
"hashbrown 0.15.2",
|
||||||
"hashlink 0.10.0",
|
"hashlink 0.10.0",
|
||||||
"indexmap 2.7.1",
|
"indexmap 2.7.1",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"memchr",
|
"memchr",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
@ -3497,7 +3516,7 @@ dependencies = [
|
|||||||
"hkdf",
|
"hkdf",
|
||||||
"hmac 0.12.1",
|
"hmac 0.12.1",
|
||||||
"itoa",
|
"itoa",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"md-5",
|
"md-5",
|
||||||
"memchr",
|
"memchr",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@ -3537,7 +3556,7 @@ dependencies = [
|
|||||||
"hmac 0.12.1",
|
"hmac 0.12.1",
|
||||||
"home",
|
"home",
|
||||||
"itoa",
|
"itoa",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"md-5",
|
"md-5",
|
||||||
"memchr",
|
"memchr",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
@ -3568,7 +3587,7 @@ dependencies = [
|
|||||||
"futures-intrusive",
|
"futures-intrusive",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"libsqlite3-sys",
|
"libsqlite3-sys",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_urlencoded",
|
"serde_urlencoded",
|
||||||
@ -3594,7 +3613,7 @@ dependencies = [
|
|||||||
"hex",
|
"hex",
|
||||||
"hmac 0.12.1",
|
"hmac 0.12.1",
|
||||||
"keyed_priority_queue",
|
"keyed_priority_queue",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"pbkdf2",
|
"pbkdf2",
|
||||||
"rand",
|
"rand",
|
||||||
"regex",
|
"regex",
|
||||||
@ -3613,7 +3632,7 @@ checksum = "0a55cb90afac5672b00954e3291846dd262cfef3b52d1b507f580180433373d3"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures",
|
"futures",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"rand",
|
"rand",
|
||||||
"socket2",
|
"socket2",
|
||||||
"srt-protocol",
|
"srt-protocol",
|
||||||
@ -3824,7 +3843,7 @@ dependencies = [
|
|||||||
"arrayvec",
|
"arrayvec",
|
||||||
"bytemuck",
|
"bytemuck",
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"png",
|
"png",
|
||||||
"tiny-skia-path",
|
"tiny-skia-path",
|
||||||
]
|
]
|
||||||
@ -3963,7 +3982,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9"
|
checksum = "edc5f74e248dc973e0dbb7b74c7e0d6fcc301c694ff50049504004ef4d0cdcd9"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"rustls 0.23.21",
|
"rustls 0.23.21",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"tokio",
|
"tokio",
|
||||||
@ -4115,7 +4134,7 @@ version = "0.1.41"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"tracing-attributes",
|
"tracing-attributes",
|
||||||
"tracing-core",
|
"tracing-core",
|
||||||
@ -4161,9 +4180,9 @@ checksum = "2c591d83f69777866b9126b24c6dd9a18351f177e49d625920d19f989fd31cf8"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ttf-parser"
|
name = "ttf-parser"
|
||||||
version = "0.24.1"
|
version = "0.25.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5be21190ff5d38e8b4a2d3b6a3ae57f612cc39c96e83cedeaf7abc338a8bac4a"
|
checksum = "d2df906b07856748fa3f6e0ad0cbaa047052d4a7dd609e231c4f72cee8c36f31"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"core_maths",
|
"core_maths",
|
||||||
]
|
]
|
||||||
@ -4179,7 +4198,7 @@ dependencies = [
|
|||||||
"data-encoding",
|
"data-encoding",
|
||||||
"http 1.2.0",
|
"http 1.2.0",
|
||||||
"httparse",
|
"httparse",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"rand",
|
"rand",
|
||||||
"rustls 0.23.21",
|
"rustls 0.23.21",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
@ -4208,15 +4227,15 @@ checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5"
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-bidi-mirroring"
|
name = "unicode-bidi-mirroring"
|
||||||
version = "0.3.0"
|
version = "0.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "64af057ad7466495ca113126be61838d8af947f41d93a949980b2389a118082f"
|
checksum = "5dfa6e8c60bb66d49db113e0125ee8711b7647b5579dc7f5f19c42357ed039fe"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ccc"
|
name = "unicode-ccc"
|
||||||
version = "0.3.0"
|
version = "0.4.0"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "260bc6647b3893a9a90668360803a15f96b85a5257b1c3a0c3daf6ae2496de42"
|
checksum = "ce61d488bcdc9bc8b5d1772c404828b17fc481c0a582b5581e95fb233aef503e"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "unicode-ident"
|
name = "unicode-ident"
|
||||||
@ -4287,9 +4306,9 @@ dependencies = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "usvg"
|
name = "usvg"
|
||||||
version = "0.44.0"
|
version = "0.45.1"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "7447e703d7223b067607655e625e0dbca80822880248937da65966194c4864e6"
|
checksum = "80be9b06fbae3b8b303400ab20778c80bbaf338f563afe567cf3c9eea17b47ef"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
"data-url",
|
"data-url",
|
||||||
@ -4297,7 +4316,7 @@ dependencies = [
|
|||||||
"fontdb",
|
"fontdb",
|
||||||
"imagesize",
|
"imagesize",
|
||||||
"kurbo",
|
"kurbo",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"pico-args",
|
"pico-args",
|
||||||
"roxmltree",
|
"roxmltree",
|
||||||
"rustybuzz",
|
"rustybuzz",
|
||||||
@ -4407,7 +4426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
|
checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bumpalo",
|
"bumpalo",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn",
|
||||||
@ -4792,9 +4811,10 @@ dependencies = [
|
|||||||
"http-body-util",
|
"http-body-util",
|
||||||
"hyper 1.6.0",
|
"hyper 1.6.0",
|
||||||
"hyper-util",
|
"hyper-util",
|
||||||
"log",
|
"log 0.4.25",
|
||||||
"m3u8-rs",
|
"m3u8-rs",
|
||||||
"matchit 0.8.6",
|
"matchit 0.8.6",
|
||||||
|
"mustache",
|
||||||
"nostr-sdk",
|
"nostr-sdk",
|
||||||
"pretty_env_logger",
|
"pretty_env_logger",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
@ -4814,17 +4834,20 @@ version = "0.1.0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"async-trait",
|
"async-trait",
|
||||||
|
"data-encoding",
|
||||||
"ffmpeg-rs-raw",
|
"ffmpeg-rs-raw",
|
||||||
"fontdue",
|
"fontdue",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"hex",
|
"hex",
|
||||||
"itertools 0.14.0",
|
"itertools 0.14.0",
|
||||||
"log",
|
"libc",
|
||||||
|
"log 0.4.25",
|
||||||
"m3u8-rs",
|
"m3u8-rs",
|
||||||
"resvg",
|
"resvg",
|
||||||
"ringbuf",
|
"ringbuf",
|
||||||
"rml_rtmp",
|
"rml_rtmp",
|
||||||
"serde",
|
"serde",
|
||||||
|
"sha2 0.10.8",
|
||||||
"srt-tokio",
|
"srt-tokio",
|
||||||
"tiny-skia",
|
"tiny-skia",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
@ -10,10 +10,10 @@ members = [
|
|||||||
opt-level = 3
|
opt-level = 3
|
||||||
lto = true
|
lto = true
|
||||||
codegen-units = 1
|
codegen-units = 1
|
||||||
panic = "abort"
|
panic = "unwind"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
ffmpeg-rs-raw = { git = "https://git.v0l.io/Kieran/ffmpeg-rs-raw.git", rev = "29ab0547478256c574766b4acc6fcda8ebf4cae6" }
|
ffmpeg-rs-raw = { git = "https://github.com/v0l/ffmpeg-rs-raw.git", rev = "8307b0a225267cefac9c174d5f6a0314a2f0a66b" }
|
||||||
tokio = { version = "1.36.0", features = ["rt", "rt-multi-thread", "macros"] }
|
tokio = { version = "1.36.0", features = ["rt", "rt-multi-thread", "macros"] }
|
||||||
anyhow = { version = "^1.0.91", features = ["backtrace"] }
|
anyhow = { version = "^1.0.91", features = ["backtrace"] }
|
||||||
async-trait = "0.1.77"
|
async-trait = "0.1.77"
|
||||||
|
@ -4,18 +4,9 @@ version = "0.1.0"
|
|||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["test-pattern", "srt", "rtmp"]
|
default = ["srt", "rtmp"]
|
||||||
srt = ["dep:srt-tokio"]
|
srt = ["dep:srt-tokio"]
|
||||||
rtmp = ["dep:rml_rtmp"]
|
rtmp = ["dep:rml_rtmp"]
|
||||||
local-overseer = [] # WIP
|
|
||||||
webhook-overseer = [] # WIP
|
|
||||||
test-pattern = [
|
|
||||||
"dep:resvg",
|
|
||||||
"dep:usvg",
|
|
||||||
"dep:tiny-skia",
|
|
||||||
"dep:fontdue",
|
|
||||||
"dep:ringbuf",
|
|
||||||
]
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
ffmpeg-rs-raw.workspace = true
|
ffmpeg-rs-raw.workspace = true
|
||||||
@ -27,20 +18,20 @@ uuid.workspace = true
|
|||||||
serde.workspace = true
|
serde.workspace = true
|
||||||
hex.workspace = true
|
hex.workspace = true
|
||||||
itertools.workspace = true
|
itertools.workspace = true
|
||||||
futures-util = "0.3.30"
|
|
||||||
m3u8-rs.workspace = true
|
m3u8-rs.workspace = true
|
||||||
sha2.workspace = true
|
sha2.workspace = true
|
||||||
data-encoding.workspace = true
|
data-encoding.workspace = true
|
||||||
|
|
||||||
|
futures-util = "0.3.30"
|
||||||
|
resvg = "0.45.1"
|
||||||
|
usvg = "0.45.1"
|
||||||
|
tiny-skia = "0.11.4"
|
||||||
|
fontdue = "0.9.2"
|
||||||
|
ringbuf = "0.4.7"
|
||||||
|
|
||||||
# srt
|
# srt
|
||||||
srt-tokio = { version = "0.4.3", optional = true }
|
srt-tokio = { version = "0.4.3", optional = true }
|
||||||
|
|
||||||
# rtmp
|
# rtmp
|
||||||
rml_rtmp = { version = "0.8.0", optional = true }
|
rml_rtmp = { version = "0.8.0", optional = true }
|
||||||
|
libc = "0.2.169"
|
||||||
# test-pattern
|
|
||||||
resvg = { version = "0.44.0", optional = true }
|
|
||||||
usvg = { version = "0.44.0", optional = true }
|
|
||||||
tiny-skia = { version = "0.11.4", optional = true }
|
|
||||||
fontdue = { version = "0.9.2", optional = true }
|
|
||||||
ringbuf = { version = "0.4.7", optional = true }
|
|
431
crates/core/src/generator.rs
Normal file
431
crates/core/src/generator.rs
Normal file
@ -0,0 +1,431 @@
|
|||||||
|
use crate::overseer::IngressStream;
|
||||||
|
use anyhow::{bail, Result};
|
||||||
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVColorSpace::AVCOL_SPC_RGB;
|
||||||
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
|
||||||
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_RGBA;
|
||||||
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVSampleFormat::AV_SAMPLE_FMT_FLTP;
|
||||||
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
||||||
|
av_channel_layout_default, av_frame_alloc, av_frame_free, av_frame_get_buffer, AVFrame,
|
||||||
|
AVPixelFormat, AVRational,
|
||||||
|
};
|
||||||
|
use ffmpeg_rs_raw::Scaler;
|
||||||
|
use fontdue::layout::{CoordinateSystem, Layout, TextStyle};
|
||||||
|
use fontdue::Font;
|
||||||
|
use std::mem::transmute;
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
use std::{ptr, slice};
|
||||||
|
|
||||||
|
/// Frame generator
|
||||||
|
pub struct FrameGenerator {
|
||||||
|
fps: f32,
|
||||||
|
width: u16,
|
||||||
|
height: u16,
|
||||||
|
video_sample_fmt: AVPixelFormat,
|
||||||
|
|
||||||
|
audio_sample_rate: u32,
|
||||||
|
audio_frame_size: i32,
|
||||||
|
audio_channels: u8,
|
||||||
|
|
||||||
|
frame_idx: u64,
|
||||||
|
audio_samples: u64,
|
||||||
|
|
||||||
|
// internal
|
||||||
|
next_frame: *mut AVFrame,
|
||||||
|
scaler: Scaler,
|
||||||
|
font: Font,
|
||||||
|
start: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for FrameGenerator {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
unsafe {
|
||||||
|
if !self.next_frame.is_null() {
|
||||||
|
av_frame_free(&mut self.next_frame);
|
||||||
|
self.next_frame = std::ptr::null_mut();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FrameGenerator {
|
||||||
|
pub fn new(
|
||||||
|
fps: f32,
|
||||||
|
width: u16,
|
||||||
|
height: u16,
|
||||||
|
pix_fmt: AVPixelFormat,
|
||||||
|
sample_rate: u32,
|
||||||
|
frame_size: i32,
|
||||||
|
channels: u8,
|
||||||
|
) -> Result<Self> {
|
||||||
|
let font = include_bytes!("../SourceCodePro-Regular.ttf") as &[u8];
|
||||||
|
let font = Font::from_bytes(font, Default::default()).unwrap();
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
fps,
|
||||||
|
width,
|
||||||
|
height,
|
||||||
|
video_sample_fmt: pix_fmt,
|
||||||
|
audio_sample_rate: sample_rate,
|
||||||
|
audio_frame_size: frame_size,
|
||||||
|
audio_channels: channels,
|
||||||
|
frame_idx: 0,
|
||||||
|
audio_samples: 0,
|
||||||
|
font,
|
||||||
|
start: Instant::now(),
|
||||||
|
scaler: Scaler::default(),
|
||||||
|
next_frame: ptr::null_mut(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn from_stream(
|
||||||
|
video_stream: &IngressStream,
|
||||||
|
audio_stream: Option<&IngressStream>,
|
||||||
|
) -> Result<Self> {
|
||||||
|
Ok(Self::new(
|
||||||
|
video_stream.fps,
|
||||||
|
video_stream.width as _,
|
||||||
|
video_stream.height as _,
|
||||||
|
unsafe { transmute(video_stream.format as i32) },
|
||||||
|
audio_stream.map(|i| i.sample_rate as _).unwrap_or(0),
|
||||||
|
if audio_stream.is_none() { 0 } else { 1024 },
|
||||||
|
audio_stream.map(|i| i.channels as _).unwrap_or(0),
|
||||||
|
)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn frame_no(&self) -> u64 {
|
||||||
|
self.frame_idx
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new frame for composing text / images
|
||||||
|
pub fn begin(&mut self) -> Result<()> {
|
||||||
|
if self.next_frame.is_null() {
|
||||||
|
unsafe {
|
||||||
|
let mut src_frame = av_frame_alloc();
|
||||||
|
if src_frame.is_null() {
|
||||||
|
bail!("Failed to allocate placeholder video frame");
|
||||||
|
}
|
||||||
|
|
||||||
|
(*src_frame).width = self.width as _;
|
||||||
|
(*src_frame).height = self.height as _;
|
||||||
|
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
|
||||||
|
(*src_frame).key_frame = 1;
|
||||||
|
(*src_frame).colorspace = AVCOL_SPC_RGB;
|
||||||
|
//internally always use RGBA, we convert frame to target pixel format at the end
|
||||||
|
(*src_frame).format = AV_PIX_FMT_RGBA as _;
|
||||||
|
(*src_frame).pts = self.frame_idx as _;
|
||||||
|
(*src_frame).duration = 1;
|
||||||
|
(*src_frame).time_base = AVRational {
|
||||||
|
num: 1,
|
||||||
|
den: self.fps as i32,
|
||||||
|
};
|
||||||
|
if av_frame_get_buffer(src_frame, 0) < 0 {
|
||||||
|
av_frame_free(&mut src_frame);
|
||||||
|
bail!("Failed to get frame buffer");
|
||||||
|
}
|
||||||
|
self.next_frame = src_frame;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write some text into the next frame
|
||||||
|
pub fn write_text(&mut self, msg: &str, size: f32, x: f32, y: f32) -> Result<()> {
|
||||||
|
if self.next_frame.is_null() {
|
||||||
|
bail!("Must call begin() before writing text")
|
||||||
|
}
|
||||||
|
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
|
||||||
|
layout.append(&[&self.font], &TextStyle::new(msg, size, 0));
|
||||||
|
|
||||||
|
self.write_layout(layout, x, y)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write text layout into frame
|
||||||
|
fn write_layout(&mut self, layout: Layout, x: f32, y: f32) -> Result<()> {
|
||||||
|
for g in layout.glyphs() {
|
||||||
|
let (metrics, bitmap) = self.font.rasterize_config_subpixel(g.key);
|
||||||
|
for y1 in 0..metrics.height {
|
||||||
|
for x1 in 0..metrics.width {
|
||||||
|
let dst_x = x as usize + x1 + g.x as usize;
|
||||||
|
let dst_y = y as usize + y1 + g.y as usize;
|
||||||
|
let offset_src = (x1 + y1 * metrics.width) * 3;
|
||||||
|
unsafe {
|
||||||
|
let offset_dst =
|
||||||
|
4 * dst_x + dst_y * (*self.next_frame).linesize[0] as usize;
|
||||||
|
let pixel_dst = (*self.next_frame).data[0].add(offset_dst);
|
||||||
|
*pixel_dst.offset(0) = bitmap[offset_src];
|
||||||
|
*pixel_dst.offset(1) = bitmap[offset_src + 1];
|
||||||
|
*pixel_dst.offset(2) = bitmap[offset_src + 2];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Copy data directly into the frame buffer (must be RGBA data)
|
||||||
|
pub unsafe fn copy_frame_data(&mut self, data: &[u8]) -> Result<()> {
|
||||||
|
if self.next_frame.is_null() {
|
||||||
|
bail!("Must call begin() before writing frame data")
|
||||||
|
}
|
||||||
|
let buf = slice::from_raw_parts_mut(
|
||||||
|
(*self.next_frame).data[0],
|
||||||
|
(self.width as usize * self.height as usize * 4) as usize,
|
||||||
|
);
|
||||||
|
if buf.len() < data.len() {
|
||||||
|
bail!("Frame buffer is too small");
|
||||||
|
}
|
||||||
|
buf.copy_from_slice(data);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generate audio to stay synchronized with video frames
|
||||||
|
unsafe fn generate_audio_frame(&mut self) -> Result<*mut AVFrame> {
|
||||||
|
const FREQUENCY: f32 = 440.0; // A4 note
|
||||||
|
|
||||||
|
// audio is disabled if sample rate is 0
|
||||||
|
if self.audio_sample_rate == 0 {
|
||||||
|
return Ok(ptr::null_mut());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Calculate how many audio samples we need to cover the next video frame
|
||||||
|
let samples_per_frame = (self.audio_sample_rate as f32 / self.fps) as u64;
|
||||||
|
let next_frame_needs_samples = (self.frame_idx + 1) * samples_per_frame;
|
||||||
|
|
||||||
|
// Generate audio if we don't have enough to cover the next video frame
|
||||||
|
if self.audio_samples < next_frame_needs_samples {
|
||||||
|
let audio_frame = av_frame_alloc();
|
||||||
|
(*audio_frame).format = AV_SAMPLE_FMT_FLTP as _;
|
||||||
|
(*audio_frame).nb_samples = self.audio_frame_size as _;
|
||||||
|
(*audio_frame).duration = self.audio_frame_size as _;
|
||||||
|
(*audio_frame).sample_rate = self.audio_sample_rate as _;
|
||||||
|
(*audio_frame).pts = self.audio_samples as _;
|
||||||
|
(*audio_frame).time_base = AVRational {
|
||||||
|
num: 1,
|
||||||
|
den: self.audio_sample_rate as _,
|
||||||
|
};
|
||||||
|
av_channel_layout_default(&mut (*audio_frame).ch_layout, self.audio_channels as _);
|
||||||
|
av_frame_get_buffer(audio_frame, 0);
|
||||||
|
|
||||||
|
// Generate sine wave samples
|
||||||
|
let data = (*audio_frame).data[0] as *mut f32;
|
||||||
|
for i in 0..self.audio_frame_size {
|
||||||
|
let sample_time =
|
||||||
|
(self.audio_samples + i as u64) as f32 / self.audio_sample_rate as f32;
|
||||||
|
let sample_value =
|
||||||
|
(2.0 * std::f32::consts::PI * FREQUENCY * sample_time).sin() * 0.5;
|
||||||
|
*data.add(i as _) = sample_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.audio_samples += self.audio_frame_size as u64;
|
||||||
|
return Ok(audio_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(ptr::null_mut())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the next frame for encoding (blocking)
|
||||||
|
pub unsafe fn next(&mut self) -> Result<*mut AVFrame> {
|
||||||
|
// set start time to now if this is the first call to next()
|
||||||
|
if self.frame_idx == 0 {
|
||||||
|
self.start = Instant::now();
|
||||||
|
}
|
||||||
|
|
||||||
|
// try to get audio frames before video frames (non-blocking)
|
||||||
|
let audio_frame = self.generate_audio_frame()?;
|
||||||
|
if !audio_frame.is_null() {
|
||||||
|
return Ok(audio_frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
// auto-init frame
|
||||||
|
if self.next_frame.is_null() {
|
||||||
|
self.begin()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let stream_time = Duration::from_secs_f64(self.frame_idx as f64 / self.fps as f64);
|
||||||
|
let real_time = Instant::now().duration_since(self.start);
|
||||||
|
let wait_time = if stream_time > real_time {
|
||||||
|
stream_time - real_time
|
||||||
|
} else {
|
||||||
|
Duration::new(0, 0)
|
||||||
|
};
|
||||||
|
if !wait_time.is_zero() && wait_time.as_secs_f32() > 1f32 / self.fps {
|
||||||
|
std::thread::sleep(wait_time);
|
||||||
|
}
|
||||||
|
|
||||||
|
// convert to output pixel format, or just return internal frame if it matches output
|
||||||
|
if self.video_sample_fmt != transmute((*self.next_frame).format) {
|
||||||
|
let out_frame = self.scaler.process_frame(
|
||||||
|
self.next_frame,
|
||||||
|
self.width,
|
||||||
|
self.height,
|
||||||
|
self.video_sample_fmt,
|
||||||
|
)?;
|
||||||
|
av_frame_free(&mut self.next_frame);
|
||||||
|
self.next_frame = ptr::null_mut();
|
||||||
|
self.frame_idx += 1;
|
||||||
|
Ok(out_frame)
|
||||||
|
} else {
|
||||||
|
let ret = self.next_frame;
|
||||||
|
self.next_frame = ptr::null_mut();
|
||||||
|
self.frame_idx += 1;
|
||||||
|
Ok(ret)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_frame_timing_synchronization() {
|
||||||
|
unsafe {
|
||||||
|
let fps = 30.0;
|
||||||
|
let sample_rate = 44100;
|
||||||
|
let frame_size = 1024;
|
||||||
|
let channels = 2;
|
||||||
|
|
||||||
|
let mut gen = FrameGenerator::new(
|
||||||
|
fps,
|
||||||
|
1280,
|
||||||
|
720,
|
||||||
|
AV_PIX_FMT_YUV420P,
|
||||||
|
sample_rate,
|
||||||
|
frame_size,
|
||||||
|
channels,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let samples_per_frame = sample_rate as f64 / fps as f64; // Expected: 1470 samples per frame
|
||||||
|
println!("Expected samples per video frame: {:.2}", samples_per_frame);
|
||||||
|
|
||||||
|
let mut audio_frames = 0;
|
||||||
|
let mut video_frames = 0;
|
||||||
|
let mut total_audio_samples = 0;
|
||||||
|
|
||||||
|
// Generate frames for 2 seconds (60 video frames at 30fps)
|
||||||
|
for i in 0..120 {
|
||||||
|
let mut frame = gen.next().unwrap();
|
||||||
|
|
||||||
|
if (*frame).sample_rate > 0 {
|
||||||
|
// Audio frame
|
||||||
|
audio_frames += 1;
|
||||||
|
total_audio_samples += (*frame).nb_samples as u64;
|
||||||
|
println!(
|
||||||
|
"Frame {}: AUDIO - PTS: {}, samples: {}, total_samples: {}",
|
||||||
|
i,
|
||||||
|
(*frame).pts,
|
||||||
|
(*frame).nb_samples,
|
||||||
|
total_audio_samples
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
// Video frame
|
||||||
|
video_frames += 1;
|
||||||
|
let expected_audio_samples = (video_frames as f64 * samples_per_frame) as u64;
|
||||||
|
let audio_deficit = if total_audio_samples >= expected_audio_samples {
|
||||||
|
0
|
||||||
|
} else {
|
||||||
|
expected_audio_samples - total_audio_samples
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("Frame {}: VIDEO - PTS: {}, frame_idx: {}, expected_audio: {}, actual_audio: {}, deficit: {}",
|
||||||
|
i, (*frame).pts, video_frames, expected_audio_samples, total_audio_samples, audio_deficit);
|
||||||
|
|
||||||
|
// Verify we have enough audio for this video frame
|
||||||
|
assert!(
|
||||||
|
total_audio_samples >= expected_audio_samples,
|
||||||
|
"Video frame {} needs {} audio samples but only have {}",
|
||||||
|
video_frames,
|
||||||
|
expected_audio_samples,
|
||||||
|
total_audio_samples
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
av_frame_free(&mut frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("\nSummary:");
|
||||||
|
println!("Video frames: {}", video_frames);
|
||||||
|
println!("Audio frames: {}", audio_frames);
|
||||||
|
println!("Total audio samples: {}", total_audio_samples);
|
||||||
|
println!(
|
||||||
|
"Expected audio samples for {} video frames: {:.2}",
|
||||||
|
video_frames,
|
||||||
|
video_frames as f64 * samples_per_frame
|
||||||
|
);
|
||||||
|
|
||||||
|
// Verify the ratio is correct
|
||||||
|
let expected_total_audio = video_frames as f64 * samples_per_frame;
|
||||||
|
let sample_accuracy = (total_audio_samples as f64 - expected_total_audio).abs();
|
||||||
|
println!("Sample accuracy (difference): {:.2}", sample_accuracy);
|
||||||
|
|
||||||
|
// Allow for some tolerance due to frame size constraints
|
||||||
|
assert!(
|
||||||
|
sample_accuracy < frame_size as f64,
|
||||||
|
"Audio sample count too far from expected: got {}, expected {:.2}, diff {:.2}",
|
||||||
|
total_audio_samples,
|
||||||
|
expected_total_audio,
|
||||||
|
sample_accuracy
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_pts_progression() {
|
||||||
|
unsafe {
|
||||||
|
let fps = 30.0;
|
||||||
|
let sample_rate = 44100;
|
||||||
|
|
||||||
|
let mut gen =
|
||||||
|
FrameGenerator::new(fps, 1280, 720, AV_PIX_FMT_YUV420P, sample_rate, 1024, 2)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut last_audio_pts = -1i64;
|
||||||
|
let mut last_video_pts = -1i64;
|
||||||
|
let mut audio_pts_gaps = Vec::new();
|
||||||
|
let mut video_pts_gaps = Vec::new();
|
||||||
|
|
||||||
|
// Generate 60 frames to test PTS progression
|
||||||
|
for _ in 0..60 {
|
||||||
|
let mut frame = gen.next().unwrap();
|
||||||
|
|
||||||
|
if (*frame).sample_rate > 0 {
|
||||||
|
// Audio frame - check PTS progression
|
||||||
|
if last_audio_pts >= 0 {
|
||||||
|
let gap = (*frame).pts - last_audio_pts;
|
||||||
|
audio_pts_gaps.push(gap);
|
||||||
|
println!("Audio PTS gap: {}", gap);
|
||||||
|
}
|
||||||
|
last_audio_pts = (*frame).pts;
|
||||||
|
} else {
|
||||||
|
// Video frame - check PTS progression
|
||||||
|
if last_video_pts >= 0 {
|
||||||
|
let gap = (*frame).pts - last_video_pts;
|
||||||
|
video_pts_gaps.push(gap);
|
||||||
|
println!("Video PTS gap: {}", gap);
|
||||||
|
}
|
||||||
|
last_video_pts = (*frame).pts;
|
||||||
|
}
|
||||||
|
|
||||||
|
av_frame_free(&mut frame);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify audio PTS gaps are consistent (should be 1024 samples)
|
||||||
|
for gap in &audio_pts_gaps {
|
||||||
|
assert_eq!(
|
||||||
|
*gap, 1024,
|
||||||
|
"Audio PTS should increment by frame_size (1024)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify video PTS gaps are consistent (should be 1 frame)
|
||||||
|
for gap in &video_pts_gaps {
|
||||||
|
assert_eq!(*gap, 1, "Video PTS should increment by 1 frame");
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("PTS progression test passed - all gaps are consistent");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -12,7 +12,6 @@ pub mod rtmp;
|
|||||||
#[cfg(feature = "srt")]
|
#[cfg(feature = "srt")]
|
||||||
pub mod srt;
|
pub mod srt;
|
||||||
pub mod tcp;
|
pub mod tcp;
|
||||||
#[cfg(feature = "test-pattern")]
|
|
||||||
pub mod test;
|
pub mod test;
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
|
@ -1,23 +1,16 @@
|
|||||||
|
use crate::generator::FrameGenerator;
|
||||||
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
use crate::ingress::{spawn_pipeline, ConnectionInfo};
|
||||||
use crate::overseer::Overseer;
|
use crate::overseer::Overseer;
|
||||||
use anyhow::Result;
|
use anyhow::Result;
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVColorSpace::AVCOL_SPC_RGB;
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
|
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::{AV_PIX_FMT_RGBA, AV_PIX_FMT_YUV420P};
|
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVSampleFormat::AV_SAMPLE_FMT_FLTP;
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVSampleFormat::AV_SAMPLE_FMT_FLTP;
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{av_frame_free, av_packet_free, AV_PROFILE_H264_MAIN};
|
||||||
av_frame_alloc, av_frame_free, av_frame_get_buffer, av_packet_free, AVRational,
|
use ffmpeg_rs_raw::{Encoder, Muxer};
|
||||||
AV_PROFILE_H264_MAIN,
|
|
||||||
};
|
|
||||||
use ffmpeg_rs_raw::{Encoder, Muxer, Scaler};
|
|
||||||
use fontdue::layout::{CoordinateSystem, Layout, TextStyle};
|
|
||||||
use fontdue::Font;
|
|
||||||
use log::info;
|
use log::info;
|
||||||
use ringbuf::traits::{Observer, Split};
|
use ringbuf::traits::{Observer, Split};
|
||||||
use ringbuf::{HeapCons, HeapRb};
|
use ringbuf::{HeapCons, HeapRb};
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
use tiny_skia::Pixmap;
|
use tiny_skia::Pixmap;
|
||||||
use tokio::runtime::Handle;
|
use tokio::runtime::Handle;
|
||||||
|
|
||||||
@ -42,33 +35,31 @@ pub async fn listen(out_dir: String, overseer: Arc<dyn Overseer>) -> Result<()>
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct TestPatternSrc {
|
struct TestPatternSrc {
|
||||||
|
gen: FrameGenerator,
|
||||||
video_encoder: Encoder,
|
video_encoder: Encoder,
|
||||||
audio_encoder: Encoder,
|
audio_encoder: Encoder,
|
||||||
scaler: Scaler,
|
|
||||||
muxer: Muxer,
|
|
||||||
background: Pixmap,
|
background: Pixmap,
|
||||||
font: [Font; 1],
|
muxer: Muxer,
|
||||||
frame_no: u64,
|
|
||||||
audio_sample_no: u64,
|
|
||||||
start: Instant,
|
|
||||||
reader: HeapCons<u8>,
|
reader: HeapCons<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl Send for TestPatternSrc {}
|
unsafe impl Send for TestPatternSrc {}
|
||||||
|
|
||||||
const VIDEO_FPS: f32 = 30.0;
|
const VIDEO_FPS: f32 = 30.0;
|
||||||
|
const VIDEO_WIDTH: u16 = 1280;
|
||||||
|
const VIDEO_HEIGHT: u16 = 720;
|
||||||
|
const SAMPLE_RATE: u32 = 44100;
|
||||||
|
|
||||||
impl TestPatternSrc {
|
impl TestPatternSrc {
|
||||||
pub fn new() -> Result<Self> {
|
pub fn new() -> Result<Self> {
|
||||||
let scaler = Scaler::new();
|
|
||||||
let video_encoder = unsafe {
|
let video_encoder = unsafe {
|
||||||
Encoder::new_with_name("libx264")?
|
Encoder::new_with_name("libx264")?
|
||||||
.with_stream_index(0)
|
.with_stream_index(0)
|
||||||
.with_framerate(VIDEO_FPS)?
|
.with_framerate(VIDEO_FPS)?
|
||||||
.with_bitrate(1_000_000)
|
.with_bitrate(1_000_000)
|
||||||
.with_pix_fmt(AV_PIX_FMT_YUV420P)
|
.with_pix_fmt(AV_PIX_FMT_YUV420P)
|
||||||
.with_width(1280)
|
.with_width(VIDEO_WIDTH as _)
|
||||||
.with_height(720)
|
.with_height(VIDEO_HEIGHT as _)
|
||||||
.with_level(51)
|
.with_level(51)
|
||||||
.with_profile(AV_PROFILE_H264_MAIN)
|
.with_profile(AV_PROFILE_H264_MAIN)
|
||||||
.open(None)?
|
.open(None)?
|
||||||
@ -80,22 +71,20 @@ impl TestPatternSrc {
|
|||||||
.with_default_channel_layout(1)
|
.with_default_channel_layout(1)
|
||||||
.with_bitrate(128_000)
|
.with_bitrate(128_000)
|
||||||
.with_sample_format(AV_SAMPLE_FMT_FLTP)
|
.with_sample_format(AV_SAMPLE_FMT_FLTP)
|
||||||
.with_sample_rate(44100)?
|
.with_sample_rate(SAMPLE_RATE as _)?
|
||||||
.open(None)?
|
.open(None)?
|
||||||
};
|
};
|
||||||
|
|
||||||
let svg_data = include_bytes!("../../test.svg");
|
let svg_data = include_bytes!("../../test.svg");
|
||||||
let tree = usvg::Tree::from_data(svg_data, &Default::default())?;
|
let tree = usvg::Tree::from_data(svg_data, &Default::default())?;
|
||||||
let mut pixmap = Pixmap::new(1280, 720).unwrap();
|
|
||||||
|
let mut pixmap = Pixmap::new(VIDEO_WIDTH as _, VIDEO_HEIGHT as _).unwrap();
|
||||||
let render_ts = tiny_skia::Transform::from_scale(
|
let render_ts = tiny_skia::Transform::from_scale(
|
||||||
pixmap.width() as f32 / tree.size().width(),
|
pixmap.width() as f32 / tree.size().width(),
|
||||||
pixmap.height() as f32 / tree.size().height(),
|
pixmap.height() as f32 / tree.size().height(),
|
||||||
);
|
);
|
||||||
resvg::render(&tree, render_ts, &mut pixmap.as_mut());
|
resvg::render(&tree, render_ts, &mut pixmap.as_mut());
|
||||||
|
|
||||||
let font = include_bytes!("../../SourceCodePro-Regular.ttf") as &[u8];
|
|
||||||
let font = Font::from_bytes(font, Default::default()).unwrap();
|
|
||||||
|
|
||||||
let buf = HeapRb::new(1024 * 1024);
|
let buf = HeapRb::new(1024 * 1024);
|
||||||
let (writer, reader) = buf.split();
|
let (writer, reader) = buf.split();
|
||||||
|
|
||||||
@ -109,140 +98,51 @@ impl TestPatternSrc {
|
|||||||
m
|
m
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let frame_size = unsafe { (*audio_encoder.codec_context()).frame_size as _ };
|
||||||
Ok(Self {
|
Ok(Self {
|
||||||
|
gen: FrameGenerator::new(
|
||||||
|
VIDEO_FPS,
|
||||||
|
VIDEO_WIDTH,
|
||||||
|
VIDEO_HEIGHT,
|
||||||
|
AV_PIX_FMT_YUV420P,
|
||||||
|
SAMPLE_RATE,
|
||||||
|
frame_size,
|
||||||
|
1,
|
||||||
|
)?,
|
||||||
video_encoder,
|
video_encoder,
|
||||||
audio_encoder,
|
audio_encoder,
|
||||||
scaler,
|
|
||||||
muxer,
|
muxer,
|
||||||
background: pixmap,
|
background: pixmap,
|
||||||
font: [font],
|
|
||||||
frame_no: 0,
|
|
||||||
audio_sample_no: 0,
|
|
||||||
start: Instant::now(),
|
|
||||||
reader,
|
reader,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub unsafe fn next_pkt(&mut self) -> Result<()> {
|
pub unsafe fn next_pkt(&mut self) -> Result<()> {
|
||||||
let stream_time = Duration::from_secs_f64(self.frame_no as f64 / VIDEO_FPS as f64);
|
self.gen.begin()?;
|
||||||
let real_time = Instant::now().duration_since(self.start);
|
self.gen.copy_frame_data(self.background.data())?;
|
||||||
let wait_time = if stream_time > real_time {
|
self.gen
|
||||||
stream_time - real_time
|
.write_text(&format!("frame={}", self.gen.frame_no()), 40.0, 5.0, 5.0)?;
|
||||||
} else {
|
|
||||||
Duration::new(0, 0)
|
let mut frame = self.gen.next()?;
|
||||||
};
|
if frame.is_null() {
|
||||||
if !wait_time.is_zero() && wait_time.as_secs_f32() > 1f32 / VIDEO_FPS {
|
return Ok(());
|
||||||
std::thread::sleep(wait_time);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut src_frame = unsafe {
|
// if sample_rate is set this frame is audio
|
||||||
let src_frame = av_frame_alloc();
|
if (*frame).sample_rate > 0 {
|
||||||
|
for mut pkt in self.audio_encoder.encode_frame(frame)? {
|
||||||
(*src_frame).width = 1280;
|
self.muxer.write_packet(pkt)?;
|
||||||
(*src_frame).height = 720;
|
av_packet_free(&mut pkt);
|
||||||
(*src_frame).pict_type = AV_PICTURE_TYPE_NONE;
|
}
|
||||||
(*src_frame).key_frame = 1;
|
} else {
|
||||||
(*src_frame).colorspace = AVCOL_SPC_RGB;
|
for mut pkt in self.video_encoder.encode_frame(frame)? {
|
||||||
(*src_frame).format = AV_PIX_FMT_RGBA as _;
|
|
||||||
(*src_frame).pts = self.frame_no as i64;
|
|
||||||
(*src_frame).duration = 1;
|
|
||||||
av_frame_get_buffer(src_frame, 0);
|
|
||||||
|
|
||||||
self.background
|
|
||||||
.data()
|
|
||||||
.as_ptr()
|
|
||||||
.copy_to((*src_frame).data[0] as *mut _, 1280 * 720 * 4);
|
|
||||||
src_frame
|
|
||||||
};
|
|
||||||
let mut layout = Layout::new(CoordinateSystem::PositiveYDown);
|
|
||||||
layout.clear();
|
|
||||||
layout.append(
|
|
||||||
&self.font,
|
|
||||||
&TextStyle::new(&format!("frame={}", self.frame_no), 40.0, 0),
|
|
||||||
);
|
|
||||||
for g in layout.glyphs() {
|
|
||||||
let (metrics, bitmap) = self.font[0].rasterize_config_subpixel(g.key);
|
|
||||||
for y in 0..metrics.height {
|
|
||||||
for x in 0..metrics.width {
|
|
||||||
let dst_x = x + g.x as usize;
|
|
||||||
let dst_y = y + g.y as usize;
|
|
||||||
let offset_src = (x + y * metrics.width) * 3;
|
|
||||||
unsafe {
|
|
||||||
let offset_dst = 4 * dst_x + dst_y * (*src_frame).linesize[0] as usize;
|
|
||||||
let pixel_dst = (*src_frame).data[0].add(offset_dst);
|
|
||||||
*pixel_dst.offset(0) = bitmap[offset_src];
|
|
||||||
*pixel_dst.offset(1) = bitmap[offset_src + 1];
|
|
||||||
*pixel_dst.offset(2) = bitmap[offset_src + 2];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// scale/encode video
|
|
||||||
let mut frame = self
|
|
||||||
.scaler
|
|
||||||
.process_frame(src_frame, 1280, 720, AV_PIX_FMT_YUV420P)?;
|
|
||||||
for mut pkt in self.video_encoder.encode_frame(frame)? {
|
|
||||||
self.muxer.write_packet(pkt)?;
|
|
||||||
av_packet_free(&mut pkt);
|
|
||||||
}
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
av_frame_free(&mut src_frame);
|
|
||||||
|
|
||||||
// Generate and encode audio (sine wave)
|
|
||||||
self.generate_audio_frame()?;
|
|
||||||
|
|
||||||
self.frame_no += 1;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate audio to stay synchronized with video frames
|
|
||||||
unsafe fn generate_audio_frame(&mut self) -> Result<()> {
|
|
||||||
const SAMPLE_RATE: f32 = 44100.0;
|
|
||||||
const FREQUENCY: f32 = 440.0; // A4 note
|
|
||||||
const SAMPLES_PER_FRAME: usize = 1024; // Fixed AAC frame size
|
|
||||||
|
|
||||||
// Calculate how many audio samples we should have by now
|
|
||||||
// At 30fps, each video frame = 1/30 sec = 1470 audio samples at 44.1kHz
|
|
||||||
let audio_samples_per_video_frame = (SAMPLE_RATE / VIDEO_FPS) as u64; // ~1470 samples
|
|
||||||
let target_audio_samples = self.frame_no * audio_samples_per_video_frame;
|
|
||||||
|
|
||||||
// Generate audio frames to catch up to the target
|
|
||||||
while self.audio_sample_no < target_audio_samples {
|
|
||||||
let mut audio_frame = av_frame_alloc();
|
|
||||||
(*audio_frame).format = AV_SAMPLE_FMT_FLTP as _;
|
|
||||||
(*audio_frame).nb_samples = SAMPLES_PER_FRAME as _;
|
|
||||||
(*audio_frame).ch_layout.nb_channels = 1;
|
|
||||||
(*audio_frame).sample_rate = SAMPLE_RATE as _;
|
|
||||||
(*audio_frame).pts = self.audio_sample_no as i64;
|
|
||||||
(*audio_frame).duration = 1;
|
|
||||||
(*audio_frame).time_base = AVRational {
|
|
||||||
num: 1,
|
|
||||||
den: SAMPLE_RATE as _,
|
|
||||||
};
|
|
||||||
|
|
||||||
av_frame_get_buffer(audio_frame, 0);
|
|
||||||
|
|
||||||
// Generate sine wave samples
|
|
||||||
let data = (*audio_frame).data[0] as *mut f32;
|
|
||||||
for i in 0..SAMPLES_PER_FRAME {
|
|
||||||
let sample_time = (self.audio_sample_no + i as u64) as f32 / SAMPLE_RATE;
|
|
||||||
let sample_value =
|
|
||||||
(2.0 * std::f32::consts::PI * FREQUENCY * sample_time).sin() * 0.5;
|
|
||||||
*data.add(i) = sample_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode audio frame
|
|
||||||
for mut pkt in self.audio_encoder.encode_frame(audio_frame)? {
|
|
||||||
self.muxer.write_packet(pkt)?;
|
self.muxer.write_packet(pkt)?;
|
||||||
av_packet_free(&mut pkt);
|
av_packet_free(&mut pkt);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.audio_sample_no += SAMPLES_PER_FRAME as u64;
|
|
||||||
av_frame_free(&mut audio_frame);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
av_frame_free(&mut frame);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,3 +5,4 @@ pub mod overseer;
|
|||||||
pub mod pipeline;
|
pub mod pipeline;
|
||||||
pub mod variant;
|
pub mod variant;
|
||||||
pub mod viewer;
|
pub mod viewer;
|
||||||
|
mod generator;
|
||||||
|
@ -8,12 +8,6 @@ use std::cmp::PartialEq;
|
|||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
#[cfg(feature = "local-overseer")]
|
|
||||||
mod local;
|
|
||||||
|
|
||||||
#[cfg(feature = "webhook-overseer")]
|
|
||||||
mod webhook;
|
|
||||||
|
|
||||||
/// A copy of [ffmpeg_rs_raw::DemuxerInfo] without internal ptr
|
/// A copy of [ffmpeg_rs_raw::DemuxerInfo] without internal ptr
|
||||||
#[derive(PartialEq, Clone)]
|
#[derive(PartialEq, Clone)]
|
||||||
pub struct IngressInfo {
|
pub struct IngressInfo {
|
||||||
@ -32,6 +26,7 @@ pub struct IngressStream {
|
|||||||
pub height: usize,
|
pub height: usize,
|
||||||
pub fps: f32,
|
pub fps: f32,
|
||||||
pub sample_rate: usize,
|
pub sample_rate: usize,
|
||||||
|
pub channels: u8,
|
||||||
pub language: String,
|
pub language: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,7 +7,6 @@ use serde::{Deserialize, Serialize};
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
pub mod runner;
|
pub mod runner;
|
||||||
pub mod placeholder;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||||
pub enum EgressType {
|
pub enum EgressType {
|
||||||
@ -41,7 +40,7 @@ impl Display for EgressType {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
|
#[derive(Clone)]
|
||||||
pub struct PipelineConfig {
|
pub struct PipelineConfig {
|
||||||
pub id: Uuid,
|
pub id: Uuid,
|
||||||
/// Transcoded/Copied stream config
|
/// Transcoded/Copied stream config
|
||||||
@ -49,7 +48,11 @@ pub struct PipelineConfig {
|
|||||||
/// Output muxers
|
/// Output muxers
|
||||||
pub egress: Vec<EgressType>,
|
pub egress: Vec<EgressType>,
|
||||||
/// Source stream information for placeholder generation
|
/// Source stream information for placeholder generation
|
||||||
pub ingress_info: Option<IngressInfo>,
|
pub ingress_info: IngressInfo,
|
||||||
|
/// Primary source video stream
|
||||||
|
pub video_src: usize,
|
||||||
|
/// Primary audio source stream
|
||||||
|
pub audio_src: Option<usize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Display for PipelineConfig {
|
impl Display for PipelineConfig {
|
||||||
|
@ -1,188 +0,0 @@
|
|||||||
use anyhow::{bail, Result};
|
|
||||||
use crate::variant::video::VideoVariant;
|
|
||||||
use crate::variant::audio::AudioVariant;
|
|
||||||
use crate::overseer::{IngressStream, IngressStreamType};
|
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
|
||||||
av_frame_alloc, av_frame_get_buffer, av_frame_free, av_get_sample_fmt, AVFrame,
|
|
||||||
AVPixelFormat, AVSampleFormat
|
|
||||||
};
|
|
||||||
use std::ffi::CString;
|
|
||||||
|
|
||||||
/// Placeholder frame generator for idle mode when stream disconnects
|
|
||||||
pub struct PlaceholderGenerator;
|
|
||||||
|
|
||||||
impl PlaceholderGenerator {
|
|
||||||
/// Generate a placeholder video frame based on ingress stream info
|
|
||||||
pub unsafe fn generate_video_frame_from_stream(
|
|
||||||
stream: &IngressStream,
|
|
||||||
stream_time_base: (i32, i32),
|
|
||||||
frame_index: u64
|
|
||||||
) -> Result<*mut AVFrame> {
|
|
||||||
let frame = av_frame_alloc();
|
|
||||||
if frame.is_null() {
|
|
||||||
bail!("Failed to allocate placeholder video frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
(*frame).format = AVPixelFormat::AV_PIX_FMT_YUV420P as i32;
|
|
||||||
(*frame).width = stream.width as i32;
|
|
||||||
(*frame).height = stream.height as i32;
|
|
||||||
(*frame).time_base.num = stream_time_base.0;
|
|
||||||
(*frame).time_base.den = stream_time_base.1;
|
|
||||||
|
|
||||||
// Set PTS based on frame rate and total frame index
|
|
||||||
let fps = if stream.fps > 0.0 { stream.fps } else { 30.0 };
|
|
||||||
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
|
|
||||||
(*frame).pts = (frame_index as f64 / fps / time_base_f64) as i64;
|
|
||||||
|
|
||||||
if av_frame_get_buffer(frame, 0) < 0 {
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
bail!("Failed to allocate buffer for placeholder video frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill with black (Y=16, U=V=128 for limited range YUV420P)
|
|
||||||
let y_size = ((*frame).width * (*frame).height) as usize;
|
|
||||||
let uv_size = y_size / 4;
|
|
||||||
|
|
||||||
if !(*frame).data[0].is_null() {
|
|
||||||
std::ptr::write_bytes((*frame).data[0], 16, y_size);
|
|
||||||
}
|
|
||||||
if !(*frame).data[1].is_null() {
|
|
||||||
std::ptr::write_bytes((*frame).data[1], 128, uv_size);
|
|
||||||
}
|
|
||||||
if !(*frame).data[2].is_null() {
|
|
||||||
std::ptr::write_bytes((*frame).data[2], 128, uv_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(frame)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate a placeholder audio frame based on ingress stream info
|
|
||||||
pub unsafe fn generate_audio_frame_from_stream(
|
|
||||||
stream: &IngressStream,
|
|
||||||
stream_time_base: (i32, i32),
|
|
||||||
frame_index: u64,
|
|
||||||
sample_fmt: &str,
|
|
||||||
channels: u32
|
|
||||||
) -> Result<*mut AVFrame> {
|
|
||||||
let frame = av_frame_alloc();
|
|
||||||
if frame.is_null() {
|
|
||||||
bail!("Failed to allocate placeholder audio frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the provided sample format
|
|
||||||
let sample_fmt_cstr = CString::new(sample_fmt)
|
|
||||||
.map_err(|_| anyhow::anyhow!("Invalid sample format string"))?;
|
|
||||||
let sample_fmt_int = av_get_sample_fmt(sample_fmt_cstr.as_ptr());
|
|
||||||
(*frame).format = sample_fmt_int;
|
|
||||||
(*frame).channels = channels as i32;
|
|
||||||
(*frame).sample_rate = stream.sample_rate as i32;
|
|
||||||
(*frame).nb_samples = 1024; // Standard audio frame size
|
|
||||||
(*frame).time_base.num = stream_time_base.0;
|
|
||||||
(*frame).time_base.den = stream_time_base.1;
|
|
||||||
|
|
||||||
// Set PTS based on sample rate and frame index
|
|
||||||
let samples_per_second = stream.sample_rate as f64;
|
|
||||||
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
|
|
||||||
(*frame).pts = ((frame_index * 1024) as f64 / samples_per_second / time_base_f64) as i64;
|
|
||||||
|
|
||||||
if av_frame_get_buffer(frame, 0) < 0 {
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
bail!("Failed to allocate buffer for placeholder audio frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill with silence (zeros)
|
|
||||||
for i in 0..8 {
|
|
||||||
if !(*frame).data[i].is_null() && (*frame).linesize[i] > 0 {
|
|
||||||
std::ptr::write_bytes((*frame).data[i], 0, (*frame).linesize[i] as usize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(frame)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate a placeholder black video frame
|
|
||||||
pub unsafe fn generate_video_frame(
|
|
||||||
variant: &VideoVariant,
|
|
||||||
stream_time_base: (i32, i32),
|
|
||||||
frame_index: u64
|
|
||||||
) -> Result<*mut AVFrame> {
|
|
||||||
let frame = av_frame_alloc();
|
|
||||||
if frame.is_null() {
|
|
||||||
bail!("Failed to allocate placeholder video frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
(*frame).format = AVPixelFormat::AV_PIX_FMT_YUV420P as i32;
|
|
||||||
(*frame).width = variant.width as i32;
|
|
||||||
(*frame).height = variant.height as i32;
|
|
||||||
(*frame).time_base.num = stream_time_base.0;
|
|
||||||
(*frame).time_base.den = stream_time_base.1;
|
|
||||||
|
|
||||||
// Set PTS based on frame rate and total frame index
|
|
||||||
let fps = if variant.fps > 0.0 { variant.fps } else { 30.0 };
|
|
||||||
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
|
|
||||||
(*frame).pts = (frame_index as f64 / fps / time_base_f64) as i64;
|
|
||||||
|
|
||||||
if av_frame_get_buffer(frame, 0) < 0 {
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
bail!("Failed to allocate buffer for placeholder video frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill with black (Y=16, U=V=128 for limited range YUV420P)
|
|
||||||
let y_size = ((*frame).width * (*frame).height) as usize;
|
|
||||||
let uv_size = y_size / 4;
|
|
||||||
|
|
||||||
if !(*frame).data[0].is_null() {
|
|
||||||
std::ptr::write_bytes((*frame).data[0], 16, y_size);
|
|
||||||
}
|
|
||||||
if !(*frame).data[1].is_null() {
|
|
||||||
std::ptr::write_bytes((*frame).data[1], 128, uv_size);
|
|
||||||
}
|
|
||||||
if !(*frame).data[2].is_null() {
|
|
||||||
std::ptr::write_bytes((*frame).data[2], 128, uv_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(frame)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate a placeholder silent audio frame
|
|
||||||
pub unsafe fn generate_audio_frame(
|
|
||||||
variant: &AudioVariant,
|
|
||||||
stream_time_base: (i32, i32),
|
|
||||||
frame_index: u64
|
|
||||||
) -> Result<*mut AVFrame> {
|
|
||||||
let frame = av_frame_alloc();
|
|
||||||
if frame.is_null() {
|
|
||||||
bail!("Failed to allocate placeholder audio frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use the sample format from the variant configuration
|
|
||||||
let sample_fmt_cstr = CString::new(variant.sample_fmt.as_str())
|
|
||||||
.map_err(|_| anyhow::anyhow!("Invalid sample format string"))?;
|
|
||||||
let sample_fmt_int = av_get_sample_fmt(sample_fmt_cstr.as_ptr());
|
|
||||||
(*frame).format = sample_fmt_int;
|
|
||||||
(*frame).channels = variant.channels as i32;
|
|
||||||
(*frame).sample_rate = variant.sample_rate as i32;
|
|
||||||
(*frame).nb_samples = 1024; // Standard audio frame size
|
|
||||||
(*frame).time_base.num = stream_time_base.0;
|
|
||||||
(*frame).time_base.den = stream_time_base.1;
|
|
||||||
|
|
||||||
// Set PTS based on sample rate and frame index
|
|
||||||
let samples_per_second = variant.sample_rate as f64;
|
|
||||||
let time_base_f64 = stream_time_base.0 as f64 / stream_time_base.1 as f64;
|
|
||||||
(*frame).pts = ((frame_index * 1024) as f64 / samples_per_second / time_base_f64) as i64;
|
|
||||||
|
|
||||||
if av_frame_get_buffer(frame, 0) < 0 {
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
bail!("Failed to allocate buffer for placeholder audio frame");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fill with silence (zeros)
|
|
||||||
for i in 0..8 {
|
|
||||||
if !(*frame).data[i].is_null() && (*frame).linesize[i] > 0 {
|
|
||||||
std::ptr::write_bytes((*frame).data[i], 0, (*frame).linesize[i] as usize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(frame)
|
|
||||||
}
|
|
||||||
}
|
|
@ -10,18 +10,19 @@ use std::time::{Duration, Instant};
|
|||||||
use crate::egress::hls::HlsEgress;
|
use crate::egress::hls::HlsEgress;
|
||||||
use crate::egress::recorder::RecorderEgress;
|
use crate::egress::recorder::RecorderEgress;
|
||||||
use crate::egress::{Egress, EgressResult};
|
use crate::egress::{Egress, EgressResult};
|
||||||
|
use crate::generator::FrameGenerator;
|
||||||
use crate::ingress::ConnectionInfo;
|
use crate::ingress::ConnectionInfo;
|
||||||
use crate::mux::SegmentType;
|
use crate::mux::SegmentType;
|
||||||
use crate::overseer::{IngressInfo, IngressStream, IngressStreamType, Overseer};
|
use crate::overseer::{IngressInfo, IngressStream, IngressStreamType, Overseer};
|
||||||
use crate::pipeline::{EgressType, PipelineConfig};
|
use crate::pipeline::{EgressType, PipelineConfig};
|
||||||
use crate::variant::{StreamMapping, VariantStream};
|
use crate::variant::{StreamMapping, VariantStream};
|
||||||
use crate::pipeline::placeholder::PlaceholderGenerator;
|
use anyhow::{bail, Context, Result};
|
||||||
use anyhow::{bail, Result};
|
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_WEBP;
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVCodecID::AV_CODEC_ID_WEBP;
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPictureType::AV_PICTURE_TYPE_NONE;
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::AVPixelFormat::AV_PIX_FMT_YUV420P;
|
||||||
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
use ffmpeg_rs_raw::ffmpeg_sys_the_third::{
|
||||||
av_frame_free, av_get_sample_fmt, av_packet_free, av_q2d, av_rescale_q, AVMediaType,
|
av_frame_free, av_get_sample_fmt, av_packet_free, av_q2d, av_rescale_q, AVFrame, AVMediaType,
|
||||||
|
AVStream,
|
||||||
};
|
};
|
||||||
use ffmpeg_rs_raw::{
|
use ffmpeg_rs_raw::{
|
||||||
cstr, get_frame_from_hw, AudioFifo, Decoder, Demuxer, DemuxerInfo, Encoder, Resample, Scaler,
|
cstr, get_frame_from_hw, AudioFifo, Decoder, Demuxer, DemuxerInfo, Encoder, Resample, Scaler,
|
||||||
@ -32,15 +33,14 @@ use tokio::runtime::Handle;
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
|
|
||||||
/// Runner state for handling normal vs idle modes
|
/// Runner state for handling normal vs idle modes
|
||||||
#[derive(Debug, Clone)]
|
|
||||||
pub enum RunnerState {
|
pub enum RunnerState {
|
||||||
/// Normal operation - processing live stream
|
/// Normal operation - processing live stream
|
||||||
Normal,
|
Normal,
|
||||||
/// Idle mode - generating placeholder content after disconnection
|
/// Idle mode - generating placeholder content after disconnection
|
||||||
Idle {
|
Idle {
|
||||||
start_time: Instant,
|
start_time: Instant,
|
||||||
variant_index: usize,
|
|
||||||
last_frame_time: Option<Instant>,
|
last_frame_time: Option<Instant>,
|
||||||
|
gen: FrameGenerator,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,142 +129,131 @@ impl PipelineRunner {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process a single idle frame - generates one source frame and processes it through all variants
|
/// process the frame in the pipeline
|
||||||
unsafe fn process_single_idle_frame(&mut self, config: &PipelineConfig) -> Result<()> {
|
unsafe fn process_frame(
|
||||||
use std::time::{Duration, Instant};
|
&mut self,
|
||||||
|
config: &PipelineConfig,
|
||||||
if config.variants.is_empty() {
|
stream: *mut AVStream,
|
||||||
return Ok(());
|
frame: *mut AVFrame,
|
||||||
}
|
) -> Result<Vec<EgressResult>> {
|
||||||
|
// Copy frame from GPU if using hwaccel decoding
|
||||||
|
let mut frame = get_frame_from_hw(frame)?;
|
||||||
|
(*frame).time_base = (*stream).time_base;
|
||||||
|
|
||||||
// Extract timing info from current state
|
let p = (*stream).codecpar;
|
||||||
let (mut last_frame_time, variant_index) = match &mut self.state {
|
if (*p).codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO {
|
||||||
RunnerState::Idle { last_frame_time, variant_index, .. } => (last_frame_time, variant_index),
|
// Conditionally generate thumbnails based on interval (0 = disabled)
|
||||||
_ => return Ok(()), // Only process in idle state
|
if self.thumb_interval > 0 && (self.frame_ctr % self.thumb_interval) == 0 {
|
||||||
};
|
let thumb_start = Instant::now();
|
||||||
|
let dst_pic = PathBuf::from(&self.out_dir)
|
||||||
|
.join(config.id.to_string())
|
||||||
|
.join("thumb.webp");
|
||||||
|
{
|
||||||
|
let mut sw = Scaler::new();
|
||||||
|
let mut scaled_frame = sw.process_frame(
|
||||||
|
frame,
|
||||||
|
(*frame).width as _,
|
||||||
|
(*frame).height as _,
|
||||||
|
AV_PIX_FMT_YUV420P,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Time-based frame rate calculation
|
let encoder = Encoder::new(AV_CODEC_ID_WEBP)?
|
||||||
let now = Instant::now();
|
.with_height((*scaled_frame).height)
|
||||||
if let Some(last_time) = *last_frame_time {
|
.with_width((*scaled_frame).width)
|
||||||
// Calculate target frame interval (assume 30fps for now)
|
.with_pix_fmt(transmute((*scaled_frame).format))
|
||||||
let target_interval = Duration::from_millis(33); // ~30fps
|
.open(None)?;
|
||||||
let elapsed = now.duration_since(last_time);
|
|
||||||
|
encoder.save_picture(scaled_frame, dst_pic.to_str().unwrap())?;
|
||||||
if elapsed < target_interval {
|
av_frame_free(&mut scaled_frame);
|
||||||
// Not time for next frame yet
|
}
|
||||||
std::thread::sleep(target_interval - elapsed);
|
|
||||||
|
let thumb_duration = thumb_start.elapsed();
|
||||||
|
info!(
|
||||||
|
"Saved thumb ({:.2}ms) to: {}",
|
||||||
|
thumb_duration.as_millis() as f32 / 1000.0,
|
||||||
|
dst_pic.display(),
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
*last_frame_time = Some(Instant::now());
|
|
||||||
|
|
||||||
// Get source video stream info from stored ingress info
|
|
||||||
let video_stream = config.ingress_info.as_ref()
|
|
||||||
.and_then(|info| info.streams.iter().find(|s| matches!(s.stream_type, crate::overseer::IngressStreamType::Video)));
|
|
||||||
|
|
||||||
let mut egress_results = vec![];
|
|
||||||
|
|
||||||
// Generate one source frame and process it through all relevant variants
|
|
||||||
if let Some(stream) = video_stream {
|
|
||||||
// Generate a single source placeholder video frame based on original stream properties
|
|
||||||
let fps = if stream.fps > 0.0 { stream.fps } else { 30.0 };
|
|
||||||
let time_base = (1, fps as i32);
|
|
||||||
let mut source_frame = PlaceholderGenerator::generate_video_frame_from_stream(stream, time_base, self.frame_ctr)?;
|
|
||||||
|
|
||||||
// Set the frame time_base
|
|
||||||
(*source_frame).time_base.num = time_base.0;
|
|
||||||
(*source_frame).time_base.den = time_base.1;
|
|
||||||
|
|
||||||
// Increment frame counter for all video processing
|
|
||||||
self.frame_ctr += 1;
|
self.frame_ctr += 1;
|
||||||
|
|
||||||
// Process this single frame through all video variants (like normal pipeline)
|
|
||||||
for variant in &config.variants {
|
|
||||||
if let VariantStream::Video(v) = variant {
|
|
||||||
// Scale/encode the source frame for this variant
|
|
||||||
if let Some(enc) = self.encoders.get_mut(&v.id()) {
|
|
||||||
// Use scaler if needed for different resolutions
|
|
||||||
let frame_to_encode = if v.width as i32 == (*source_frame).width &&
|
|
||||||
v.height as i32 == (*source_frame).height {
|
|
||||||
// Same resolution, use source frame directly
|
|
||||||
source_frame
|
|
||||||
} else {
|
|
||||||
// Different resolution, need to scale
|
|
||||||
if let Some(scaler) = self.scalers.get_mut(&v.id()) {
|
|
||||||
scaler.process_frame(source_frame, v.width, v.height, AV_PIX_FMT_YUV420P)?
|
|
||||||
} else {
|
|
||||||
source_frame // Fallback to source frame
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let packets = enc.encode_frame(frame_to_encode)?;
|
|
||||||
for mut pkt in packets {
|
|
||||||
for eg in self.egress.iter_mut() {
|
|
||||||
let er = eg.process_pkt(pkt, &v.id())?;
|
|
||||||
egress_results.push(er);
|
|
||||||
}
|
|
||||||
av_packet_free(&mut pkt);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
av_frame_free(&mut source_frame);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate and process audio frames separately (audio doesn't share like video)
|
let mut egress_results = Vec::new();
|
||||||
let audio_stream = config.ingress_info.as_ref()
|
// Get the variants which want this pkt
|
||||||
.and_then(|info| info.streams.iter().find(|s| matches!(s.stream_type, crate::overseer::IngressStreamType::Audio)));
|
let pkt_vars = config
|
||||||
|
.variants
|
||||||
for variant in &config.variants {
|
.iter()
|
||||||
if let VariantStream::Audio(a) = variant {
|
.filter(|v| v.src_index() == (*stream).index as usize);
|
||||||
let time_base = (1, a.sample_rate as i32);
|
for var in pkt_vars {
|
||||||
let mut frame = if let Some(stream) = audio_stream {
|
let enc = if let Some(enc) = self.encoders.get_mut(&var.id()) {
|
||||||
// Use original stream properties for placeholder generation
|
enc
|
||||||
PlaceholderGenerator::generate_audio_frame_from_stream(stream, time_base, self.frame_ctr, &a.sample_fmt, a.channels)?
|
} else {
|
||||||
} else {
|
//warn!("Frame had nowhere to go in {} :/", var.id());
|
||||||
// Fallback to variant properties if no stream info available
|
continue;
|
||||||
PlaceholderGenerator::generate_audio_frame(a, time_base, self.frame_ctr)?
|
};
|
||||||
};
|
|
||||||
|
// scaling / resampling
|
||||||
// Set the frame time_base
|
let mut new_frame = false;
|
||||||
(*frame).time_base.num = time_base.0;
|
let mut frame = match var {
|
||||||
(*frame).time_base.den = time_base.1;
|
VariantStream::Video(v) => {
|
||||||
|
if let Some(s) = self.scalers.get_mut(&v.id()) {
|
||||||
// Process through the encoding pipeline
|
new_frame = true;
|
||||||
if let Some(enc) = self.encoders.get_mut(&a.id()) {
|
s.process_frame(frame, v.width, v.height, transmute(v.pixel_format))?
|
||||||
let packets = enc.encode_frame(frame)?;
|
} else {
|
||||||
for mut pkt in packets {
|
frame
|
||||||
for eg in self.egress.iter_mut() {
|
|
||||||
let er = eg.process_pkt(pkt, &a.id())?;
|
|
||||||
egress_results.push(er);
|
|
||||||
}
|
|
||||||
av_packet_free(&mut pkt);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
VariantStream::Audio(a) => {
|
||||||
|
if let Some((r, f)) = self.resampler.get_mut(&a.id()) {
|
||||||
|
let frame_size = (*enc.codec_context()).frame_size;
|
||||||
|
new_frame = true;
|
||||||
|
let mut resampled_frame = r.process_frame(frame)?;
|
||||||
|
if let Some(ret) = f.buffer_frame(resampled_frame, frame_size as usize)? {
|
||||||
|
// Set correct timebase for audio (1/sample_rate)
|
||||||
|
(*ret).time_base.num = 1;
|
||||||
|
(*ret).time_base.den = a.sample_rate as i32;
|
||||||
|
av_frame_free(&mut resampled_frame);
|
||||||
|
ret
|
||||||
|
} else {
|
||||||
|
av_frame_free(&mut resampled_frame);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
frame
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => frame,
|
||||||
|
};
|
||||||
|
|
||||||
|
// before encoding frame, rescale timestamps
|
||||||
|
if !frame.is_null() {
|
||||||
|
let enc_ctx = enc.codec_context();
|
||||||
|
(*frame).pict_type = AV_PICTURE_TYPE_NONE;
|
||||||
|
(*frame).pts = av_rescale_q((*frame).pts, (*frame).time_base, (*enc_ctx).time_base);
|
||||||
|
(*frame).pkt_dts =
|
||||||
|
av_rescale_q((*frame).pkt_dts, (*frame).time_base, (*enc_ctx).time_base);
|
||||||
|
(*frame).duration =
|
||||||
|
av_rescale_q((*frame).duration, (*frame).time_base, (*enc_ctx).time_base);
|
||||||
|
(*frame).time_base = (*enc_ctx).time_base;
|
||||||
|
}
|
||||||
|
|
||||||
|
let packets = enc.encode_frame(frame)?;
|
||||||
|
// pass new packets to egress
|
||||||
|
for mut pkt in packets {
|
||||||
|
for eg in self.egress.iter_mut() {
|
||||||
|
let er = eg.process_pkt(pkt, &var.id())?;
|
||||||
|
egress_results.push(er);
|
||||||
|
}
|
||||||
|
av_packet_free(&mut pkt);
|
||||||
|
}
|
||||||
|
|
||||||
|
if new_frame {
|
||||||
av_frame_free(&mut frame);
|
av_frame_free(&mut frame);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle egress results (same as normal processing)
|
av_frame_free(&mut frame);
|
||||||
if !egress_results.is_empty() {
|
Ok(egress_results)
|
||||||
self.handle.block_on(async {
|
|
||||||
for er in egress_results {
|
|
||||||
if let EgressResult::Segments { created, deleted } = er {
|
|
||||||
if let Err(e) = self
|
|
||||||
.overseer
|
|
||||||
.on_segments(&config.id, &created, &deleted)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
bail!("Failed to process segment {}", e.to_string());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// EOF, cleanup
|
/// EOF, cleanup
|
||||||
@ -297,23 +286,36 @@ impl PipelineRunner {
|
|||||||
self.setup()?;
|
self.setup()?;
|
||||||
|
|
||||||
let config = if let Some(config) = &self.config {
|
let config = if let Some(config) = &self.config {
|
||||||
config
|
config.clone()
|
||||||
} else {
|
} else {
|
||||||
bail!("Pipeline not configured, cannot run")
|
bail!("Pipeline not configured, cannot run")
|
||||||
};
|
};
|
||||||
|
|
||||||
// run transcoder pipeline
|
// run transcoder pipeline
|
||||||
let (mut pkt, stream_info) = self.demuxer.get_packet()?;
|
let (mut pkt, _) = self.demuxer.get_packet()?;
|
||||||
|
|
||||||
|
let src_video_stream = config
|
||||||
|
.ingress_info
|
||||||
|
.streams
|
||||||
|
.iter()
|
||||||
|
.find(|s| s.index == config.video_src)
|
||||||
|
.unwrap();
|
||||||
|
let src_audio_stream = config
|
||||||
|
.ingress_info
|
||||||
|
.streams
|
||||||
|
.iter()
|
||||||
|
.find(|s| Some(s.index) == config.audio_src);
|
||||||
|
|
||||||
// Handle state transitions based on packet availability
|
// Handle state transitions based on packet availability
|
||||||
match (&self.state, pkt.is_null()) {
|
match (&self.state, pkt.is_null()) {
|
||||||
(RunnerState::Normal, true) => {
|
(RunnerState::Normal, true) => {
|
||||||
// First time entering idle mode
|
// First time entering idle mode
|
||||||
info!("Stream input disconnected, entering idle mode with placeholder content");
|
info!("Stream input disconnected, entering idle mode");
|
||||||
|
|
||||||
self.state = RunnerState::Idle {
|
self.state = RunnerState::Idle {
|
||||||
start_time: Instant::now(),
|
start_time: Instant::now(),
|
||||||
variant_index: 0,
|
|
||||||
last_frame_time: None,
|
last_frame_time: None,
|
||||||
|
gen: FrameGenerator::from_stream(src_video_stream, src_audio_stream)?,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
(RunnerState::Idle { start_time, .. }, true) => {
|
(RunnerState::Idle { start_time, .. }, true) => {
|
||||||
@ -332,27 +334,23 @@ impl PipelineRunner {
|
|||||||
// Normal operation continues
|
// Normal operation continues
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process based on current state
|
// Process based on current state
|
||||||
match &self.state {
|
let result = match &mut self.state {
|
||||||
RunnerState::Idle { .. } => {
|
RunnerState::Idle { gen, .. } => {
|
||||||
// Process a single idle frame (rotating through variants)
|
let frame = gen.next()?;
|
||||||
self.process_single_idle_frame(config)?;
|
let stream = if (*frame).sample_rate > 0 {
|
||||||
|
self.demuxer.get_stream(
|
||||||
// Free the null packet if needed
|
src_audio_stream
|
||||||
if !pkt.is_null() {
|
.context("frame generator created an audio frame with no src stream")?
|
||||||
av_packet_free(&mut pkt);
|
.index,
|
||||||
}
|
)?
|
||||||
|
} else {
|
||||||
return Ok(true); // Continue processing
|
self.demuxer.get_stream(src_video_stream.index)?
|
||||||
|
};
|
||||||
|
self.process_frame(&config, stream, frame)?
|
||||||
}
|
}
|
||||||
RunnerState::Normal => {
|
RunnerState::Normal => {
|
||||||
// Normal packet processing
|
|
||||||
if pkt.is_null() {
|
|
||||||
// This shouldn't happen in Normal state but handle gracefully
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: For copy streams, skip decoder
|
// TODO: For copy streams, skip decoder
|
||||||
let frames = match self.decoder.decode_pkt(pkt) {
|
let frames = match self.decoder.decode_pkt(pkt) {
|
||||||
Ok(f) => f,
|
Ok(f) => f,
|
||||||
@ -364,133 +362,19 @@ impl PipelineRunner {
|
|||||||
|
|
||||||
let mut egress_results = vec![];
|
let mut egress_results = vec![];
|
||||||
for (frame, stream) in frames {
|
for (frame, stream) in frames {
|
||||||
// Copy frame from GPU if using hwaccel decoding
|
let results = self.process_frame(&config, stream, frame)?;
|
||||||
let mut frame = get_frame_from_hw(frame)?;
|
egress_results.extend(results);
|
||||||
(*frame).time_base = (*stream).time_base;
|
|
||||||
|
|
||||||
let p = (*stream).codecpar;
|
|
||||||
if (*p).codec_type == AVMediaType::AVMEDIA_TYPE_VIDEO {
|
|
||||||
// Conditionally generate thumbnails based on interval (0 = disabled)
|
|
||||||
if self.thumb_interval > 0 && (self.frame_ctr % self.thumb_interval) == 0 {
|
|
||||||
let thumb_start = Instant::now();
|
|
||||||
let dst_pic = PathBuf::from(&self.out_dir)
|
|
||||||
.join(config.id.to_string())
|
|
||||||
.join("thumb.webp");
|
|
||||||
{
|
|
||||||
let mut sw = Scaler::new();
|
|
||||||
let mut scaled_frame = sw.process_frame(
|
|
||||||
frame,
|
|
||||||
(*frame).width as _,
|
|
||||||
(*frame).height as _,
|
|
||||||
AV_PIX_FMT_YUV420P,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut encoder = Encoder::new(AV_CODEC_ID_WEBP)?
|
|
||||||
.with_height((*scaled_frame).height)
|
|
||||||
.with_width((*scaled_frame).width)
|
|
||||||
.with_pix_fmt(transmute((*scaled_frame).format))
|
|
||||||
.open(None)?;
|
|
||||||
|
|
||||||
encoder.save_picture(scaled_frame, dst_pic.to_str().unwrap())?;
|
|
||||||
av_frame_free(&mut scaled_frame);
|
|
||||||
}
|
|
||||||
|
|
||||||
let thumb_duration = thumb_start.elapsed();
|
|
||||||
info!(
|
|
||||||
"Saved thumb ({:.2}ms) to: {}",
|
|
||||||
thumb_duration.as_millis() as f32 / 1000.0,
|
|
||||||
dst_pic.display(),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
self.frame_ctr += 1;
|
av_packet_free(&mut pkt);
|
||||||
|
egress_results
|
||||||
}
|
}
|
||||||
|
};
|
||||||
// Get the variants which want this pkt
|
|
||||||
let pkt_vars = config
|
|
||||||
.variants
|
|
||||||
.iter()
|
|
||||||
.filter(|v| v.src_index() == (*stream).index as usize);
|
|
||||||
for var in pkt_vars {
|
|
||||||
let enc = if let Some(enc) = self.encoders.get_mut(&var.id()) {
|
|
||||||
enc
|
|
||||||
} else {
|
|
||||||
//warn!("Frame had nowhere to go in {} :/", var.id());
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
// scaling / resampling
|
|
||||||
let mut new_frame = false;
|
|
||||||
let mut frame = match var {
|
|
||||||
VariantStream::Video(v) => {
|
|
||||||
if let Some(s) = self.scalers.get_mut(&v.id()) {
|
|
||||||
new_frame = true;
|
|
||||||
s.process_frame(frame, v.width, v.height, transmute(v.pixel_format))?
|
|
||||||
} else {
|
|
||||||
frame
|
|
||||||
}
|
|
||||||
}
|
|
||||||
VariantStream::Audio(a) => {
|
|
||||||
if let Some((r, f)) = self.resampler.get_mut(&a.id()) {
|
|
||||||
let frame_size = (*enc.codec_context()).frame_size;
|
|
||||||
new_frame = true;
|
|
||||||
let mut resampled_frame = r.process_frame(frame)?;
|
|
||||||
if let Some(ret) =
|
|
||||||
f.buffer_frame(resampled_frame, frame_size as usize)?
|
|
||||||
{
|
|
||||||
// Set correct timebase for audio (1/sample_rate)
|
|
||||||
(*ret).time_base.num = 1;
|
|
||||||
(*ret).time_base.den = a.sample_rate as i32;
|
|
||||||
av_frame_free(&mut resampled_frame);
|
|
||||||
ret
|
|
||||||
} else {
|
|
||||||
av_frame_free(&mut resampled_frame);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
frame
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => frame,
|
|
||||||
};
|
|
||||||
|
|
||||||
// before encoding frame, rescale timestamps
|
|
||||||
if !frame.is_null() {
|
|
||||||
let enc_ctx = enc.codec_context();
|
|
||||||
(*frame).pict_type = AV_PICTURE_TYPE_NONE;
|
|
||||||
(*frame).pts =
|
|
||||||
av_rescale_q((*frame).pts, (*frame).time_base, (*enc_ctx).time_base);
|
|
||||||
(*frame).pkt_dts =
|
|
||||||
av_rescale_q((*frame).pkt_dts, (*frame).time_base, (*enc_ctx).time_base);
|
|
||||||
(*frame).duration =
|
|
||||||
av_rescale_q((*frame).duration, (*frame).time_base, (*enc_ctx).time_base);
|
|
||||||
(*frame).time_base = (*enc_ctx).time_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
let packets = enc.encode_frame(frame)?;
|
|
||||||
// pass new packets to egress
|
|
||||||
for mut pkt in packets {
|
|
||||||
for eg in self.egress.iter_mut() {
|
|
||||||
let er = eg.process_pkt(pkt, &var.id())?;
|
|
||||||
egress_results.push(er);
|
|
||||||
}
|
|
||||||
av_packet_free(&mut pkt);
|
|
||||||
}
|
|
||||||
|
|
||||||
if new_frame {
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
av_frame_free(&mut frame);
|
|
||||||
}
|
|
||||||
|
|
||||||
av_packet_free(&mut pkt);
|
|
||||||
|
|
||||||
// egress results - process async operations without blocking if possible
|
// egress results - process async operations without blocking if possible
|
||||||
if !egress_results.is_empty() {
|
if !result.is_empty() {
|
||||||
self.handle.block_on(async {
|
self.handle.block_on(async {
|
||||||
for er in egress_results {
|
for er in result {
|
||||||
if let EgressResult::Segments { created, deleted } = er {
|
if let EgressResult::Segments { created, deleted } = er {
|
||||||
if let Err(e) = self
|
if let Err(e) = self
|
||||||
.overseer
|
.overseer
|
||||||
@ -510,8 +394,6 @@ impl PipelineRunner {
|
|||||||
info!("Average fps: {:.2}", n_frames as f32 / elapsed);
|
info!("Average fps: {:.2}", n_frames as f32 / elapsed);
|
||||||
self.fps_counter_start = Instant::now();
|
self.fps_counter_start = Instant::now();
|
||||||
self.fps_last_frame_ctr = self.frame_ctr;
|
self.fps_last_frame_ctr = self.frame_ctr;
|
||||||
}
|
|
||||||
} // Close the RunnerState::Normal match arm
|
|
||||||
}
|
}
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
@ -542,18 +424,16 @@ impl PipelineRunner {
|
|||||||
height: s.height,
|
height: s.height,
|
||||||
fps: s.fps,
|
fps: s.fps,
|
||||||
sample_rate: s.sample_rate,
|
sample_rate: s.sample_rate,
|
||||||
|
channels: s.channels,
|
||||||
language: s.language.clone(),
|
language: s.language.clone(),
|
||||||
})
|
})
|
||||||
.collect(),
|
.collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut cfg = self
|
let cfg = self
|
||||||
.handle
|
.handle
|
||||||
.block_on(async { self.overseer.start_stream(&self.connection, &i_info).await })?;
|
.block_on(async { self.overseer.start_stream(&self.connection, &i_info).await })?;
|
||||||
|
|
||||||
// Store ingress info in config for placeholder generation
|
|
||||||
cfg.ingress_info = Some(i_info.clone());
|
|
||||||
|
|
||||||
self.config = Some(cfg);
|
self.config = Some(cfg);
|
||||||
self.info = Some(i_info);
|
self.info = Some(i_info);
|
||||||
|
|
||||||
@ -649,7 +529,10 @@ impl Drop for PipelineRunner {
|
|||||||
self.copy_stream.clear();
|
self.copy_stream.clear();
|
||||||
self.egress.clear();
|
self.egress.clear();
|
||||||
|
|
||||||
info!("PipelineRunner cleaned up resources for stream: {}", self.connection.key);
|
info!(
|
||||||
|
"PipelineRunner cleaned up resources for stream: {}",
|
||||||
|
self.connection.key
|
||||||
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,7 +7,7 @@ edition = "2021"
|
|||||||
default = ["srt", "rtmp", "test-pattern"]
|
default = ["srt", "rtmp", "test-pattern"]
|
||||||
srt = ["zap-stream-core/srt"]
|
srt = ["zap-stream-core/srt"]
|
||||||
rtmp = ["zap-stream-core/rtmp"]
|
rtmp = ["zap-stream-core/rtmp"]
|
||||||
test-pattern = ["zap-stream-core/test-pattern", "zap-stream-db/test-pattern"]
|
test-pattern = ["zap-stream-db/test-pattern"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
zap-stream-db = { path = "../zap-stream-db" }
|
zap-stream-db = { path = "../zap-stream-db" }
|
||||||
|
@ -9,45 +9,55 @@
|
|||||||
color: white;
|
color: white;
|
||||||
font-family: monospace;
|
font-family: monospace;
|
||||||
}
|
}
|
||||||
|
|
||||||
.container {
|
.container {
|
||||||
padding: 20px;
|
padding: 20px;
|
||||||
max-width: 1200px;
|
max-width: 1200px;
|
||||||
margin: 0 auto;
|
margin: 0 auto;
|
||||||
}
|
}
|
||||||
|
|
||||||
.stream-list {
|
.stream-list {
|
||||||
margin: 20px 0;
|
margin: 20px 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
.stream-item {
|
.stream-item {
|
||||||
background: #333;
|
background: #333;
|
||||||
margin: 10px 0;
|
margin: 10px 0;
|
||||||
padding: 15px;
|
padding: 15px;
|
||||||
border-radius: 5px;
|
border-radius: 5px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.stream-title {
|
.stream-title {
|
||||||
font-size: 18px;
|
font-size: 18px;
|
||||||
font-weight: bold;
|
font-weight: bold;
|
||||||
margin-bottom: 5px;
|
margin-bottom: 5px;
|
||||||
}
|
}
|
||||||
|
|
||||||
.stream-link {
|
.stream-link {
|
||||||
color: #00ff00;
|
color: #00ff00;
|
||||||
text-decoration: none;
|
text-decoration: none;
|
||||||
}
|
}
|
||||||
|
|
||||||
.stream-link:hover {
|
.stream-link:hover {
|
||||||
text-decoration: underline;
|
text-decoration: underline;
|
||||||
}
|
}
|
||||||
|
|
||||||
.video-player {
|
.video-player {
|
||||||
margin: 20px 0;
|
margin: 20px 0;
|
||||||
max-width: 800px;
|
max-width: 800px;
|
||||||
}
|
}
|
||||||
|
|
||||||
video {
|
video {
|
||||||
width: 100%;
|
width: 100%;
|
||||||
max-width: 800px;
|
max-width: 800px;
|
||||||
background: #000;
|
background: #000;
|
||||||
}
|
}
|
||||||
|
|
||||||
.no-streams {
|
.no-streams {
|
||||||
color: #999;
|
color: #999;
|
||||||
font-style: italic;
|
font-style: italic;
|
||||||
}
|
}
|
||||||
|
|
||||||
.player-section {
|
.player-section {
|
||||||
margin-top: 30px;
|
margin-top: 30px;
|
||||||
border-top: 1px solid #555;
|
border-top: 1px solid #555;
|
||||||
@ -59,19 +69,24 @@
|
|||||||
<body>
|
<body>
|
||||||
<div class="container">
|
<div class="container">
|
||||||
<h1>Welcome to {{public_url}}</h1>
|
<h1>Welcome to {{public_url}}</h1>
|
||||||
|
|
||||||
<h2>Active Streams</h2>
|
<h2>Active Streams</h2>
|
||||||
{{#has_streams}}
|
{{#has_streams}}
|
||||||
<div class="stream-list">
|
<div class="stream-list">
|
||||||
{{#streams}}
|
{{#streams}}
|
||||||
<div class="stream-item">
|
<div class="stream-item">
|
||||||
<div class="stream-title">{{title}}</div>
|
<div class="stream-title">{{title}}</div>
|
||||||
{{#summary}}<div class="stream-summary">{{summary}}</div>{{/summary}}
|
{{#summary}}
|
||||||
|
<div class="stream-summary">{{summary}}</div>
|
||||||
|
{{/summary}}
|
||||||
<div>
|
<div>
|
||||||
<a href="{{live_url}}" class="stream-link">📺 {{live_url}}</a>
|
<a href="{{live_url}}" class="stream-link">{{live_url}}</a>
|
||||||
{{#viewer_count}}<span style="margin-left: 15px;">👥 {{viewer_count}} viewers</span>{{/viewer_count}}
|
{{#viewer_count}}<span style="margin-left: 15px;">{{viewer_count}} viewers</span>{{/viewer_count}}
|
||||||
</div>
|
</div>
|
||||||
<button onclick="playStream('{{live_url}}')" style="margin-top: 5px; background: #00ff00; color: black; border: none; padding: 5px 10px; cursor: pointer;">Play</button>
|
<button onclick="playStream('{{live_url}}')"
|
||||||
|
style="margin-top: 5px; background: #00ff00; color: black; border: none; padding: 5px 10px; cursor: pointer;">
|
||||||
|
Play
|
||||||
|
</button>
|
||||||
</div>
|
</div>
|
||||||
{{/streams}}
|
{{/streams}}
|
||||||
</div>
|
</div>
|
||||||
@ -79,15 +94,19 @@
|
|||||||
{{^has_streams}}
|
{{^has_streams}}
|
||||||
<div class="no-streams">No active streams</div>
|
<div class="no-streams">No active streams</div>
|
||||||
{{/has_streams}}
|
{{/has_streams}}
|
||||||
|
|
||||||
<div class="player-section">
|
<div class="player-section">
|
||||||
<h2>Stream Player</h2>
|
<h2>Stream Player</h2>
|
||||||
<div class="video-player">
|
<div class="video-player">
|
||||||
<video id="video-player" controls></video>
|
<video id="video-player" controls></video>
|
||||||
</div>
|
</div>
|
||||||
<div style="margin-top: 10px;">
|
<div style="margin-top: 10px;">
|
||||||
<input type="text" id="stream-url" placeholder="Enter stream URL (e.g., /stream-id/live.m3u8)" style="width: 400px; padding: 5px; margin-right: 10px;">
|
<input type="text" id="stream-url" placeholder="Enter stream URL (e.g., /stream-id/live.m3u8)"
|
||||||
<button onclick="playCustomStream()" style="background: #00ff00; color: black; border: none; padding: 5px 10px; cursor: pointer;">Play URL</button>
|
style="width: 400px; padding: 5px; margin-right: 10px;">
|
||||||
|
<button onclick="playCustomStream()"
|
||||||
|
style="background: #00ff00; color: black; border: none; padding: 5px 10px; cursor: pointer;">Play
|
||||||
|
URL
|
||||||
|
</button>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@ -104,12 +123,12 @@
|
|||||||
hls = new Hls();
|
hls = new Hls();
|
||||||
hls.loadSource(url);
|
hls.loadSource(url);
|
||||||
hls.attachMedia(video);
|
hls.attachMedia(video);
|
||||||
hls.on(Hls.Events.MANIFEST_PARSED, function() {
|
hls.on(Hls.Events.MANIFEST_PARSED, function () {
|
||||||
video.play();
|
video.play();
|
||||||
});
|
});
|
||||||
} else if (video.canPlayType('application/vnd.apple.mpegurl')) {
|
} else if (video.canPlayType('application/vnd.apple.mpegurl')) {
|
||||||
video.src = url;
|
video.src = url;
|
||||||
video.addEventListener('loadedmetadata', function() {
|
video.addEventListener('loadedmetadata', function () {
|
||||||
video.play();
|
video.play();
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
@ -11,18 +11,18 @@ use hyper::service::Service;
|
|||||||
use hyper::{Method, Request, Response};
|
use hyper::{Method, Request, Response};
|
||||||
use log::error;
|
use log::error;
|
||||||
use nostr_sdk::{serde_json, Alphabet, Event, Kind, PublicKey, SingleLetterTag, TagKind};
|
use nostr_sdk::{serde_json, Alphabet, Event, Kind, PublicKey, SingleLetterTag, TagKind};
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::Serialize;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::path::PathBuf;
|
use std::path::PathBuf;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
use tokio::fs::File;
|
use tokio::fs::File;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tokio_util::io::ReaderStream;
|
use tokio_util::io::ReaderStream;
|
||||||
use zap_stream_core::viewer::ViewerTracker;
|
use zap_stream_core::viewer::ViewerTracker;
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize, Clone)]
|
||||||
struct StreamData {
|
struct StreamData {
|
||||||
id: String,
|
id: String,
|
||||||
title: String,
|
title: String,
|
||||||
@ -33,7 +33,7 @@ struct StreamData {
|
|||||||
viewer_count: Option<u64>,
|
viewer_count: Option<u64>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize)]
|
#[derive(Serialize, Clone)]
|
||||||
struct IndexTemplateData {
|
struct IndexTemplateData {
|
||||||
public_url: String,
|
public_url: String,
|
||||||
has_streams: bool,
|
has_streams: bool,
|
||||||
@ -41,7 +41,7 @@ struct IndexTemplateData {
|
|||||||
streams: Vec<StreamData>,
|
streams: Vec<StreamData>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct CachedStreams {
|
pub struct CachedStreams {
|
||||||
data: IndexTemplateData,
|
data: IndexTemplateData,
|
||||||
cached_at: Instant,
|
cached_at: Instant,
|
||||||
}
|
}
|
||||||
@ -57,7 +57,12 @@ pub struct HttpServer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl HttpServer {
|
impl HttpServer {
|
||||||
pub fn new(index_template: String, files_dir: PathBuf, api: Api, stream_cache: StreamCache) -> Self {
|
pub fn new(
|
||||||
|
index_template: String,
|
||||||
|
files_dir: PathBuf,
|
||||||
|
api: Api,
|
||||||
|
stream_cache: StreamCache,
|
||||||
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
index_template,
|
index_template,
|
||||||
files_dir,
|
files_dir,
|
||||||
@ -70,8 +75,11 @@ impl HttpServer {
|
|||||||
Self::get_cached_or_fetch_streams_static(&self.stream_cache, &self.api).await
|
Self::get_cached_or_fetch_streams_static(&self.stream_cache, &self.api).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn get_cached_or_fetch_streams_static(stream_cache: &StreamCache, api: &Api) -> Result<IndexTemplateData> {
|
async fn get_cached_or_fetch_streams_static(
|
||||||
const CACHE_DURATION: Duration = Duration::from_secs(60); // 1 minute
|
stream_cache: &StreamCache,
|
||||||
|
api: &Api,
|
||||||
|
) -> Result<IndexTemplateData> {
|
||||||
|
const CACHE_DURATION: Duration = Duration::from_secs(10);
|
||||||
|
|
||||||
// Check if we have valid cached data
|
// Check if we have valid cached data
|
||||||
{
|
{
|
||||||
@ -86,7 +94,7 @@ impl HttpServer {
|
|||||||
// Cache is expired or missing, fetch new data
|
// Cache is expired or missing, fetch new data
|
||||||
let active_streams = api.get_active_streams().await?;
|
let active_streams = api.get_active_streams().await?;
|
||||||
let public_url = api.get_public_url();
|
let public_url = api.get_public_url();
|
||||||
|
|
||||||
let template_data = if !active_streams.is_empty() {
|
let template_data = if !active_streams.is_empty() {
|
||||||
let streams: Vec<StreamData> = active_streams
|
let streams: Vec<StreamData> = active_streams
|
||||||
.into_iter()
|
.into_iter()
|
||||||
@ -94,10 +102,16 @@ impl HttpServer {
|
|||||||
let viewer_count = api.get_viewer_count(&stream.id);
|
let viewer_count = api.get_viewer_count(&stream.id);
|
||||||
StreamData {
|
StreamData {
|
||||||
id: stream.id.clone(),
|
id: stream.id.clone(),
|
||||||
title: stream.title.unwrap_or_else(|| format!("Stream {}", &stream.id[..8])),
|
title: stream
|
||||||
|
.title
|
||||||
|
.unwrap_or_else(|| format!("Stream {}", &stream.id[..8])),
|
||||||
summary: stream.summary,
|
summary: stream.summary,
|
||||||
live_url: format!("/{}/live.m3u8", stream.id),
|
live_url: format!("/{}/live.m3u8", stream.id),
|
||||||
viewer_count: if viewer_count > 0 { Some(viewer_count) } else { None },
|
viewer_count: if viewer_count > 0 {
|
||||||
|
Some(viewer_count as _)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
},
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
@ -140,13 +154,18 @@ impl HttpServer {
|
|||||||
playlist_path: &PathBuf,
|
playlist_path: &PathBuf,
|
||||||
) -> Result<Response<BoxBody<Bytes, anyhow::Error>>, anyhow::Error> {
|
) -> Result<Response<BoxBody<Bytes, anyhow::Error>>, anyhow::Error> {
|
||||||
// Extract stream ID from path (e.g., /uuid/live.m3u8 -> uuid)
|
// Extract stream ID from path (e.g., /uuid/live.m3u8 -> uuid)
|
||||||
let path_parts: Vec<&str> = req.uri().path().trim_start_matches('/').split('/').collect();
|
let path_parts: Vec<&str> = req
|
||||||
|
.uri()
|
||||||
|
.path()
|
||||||
|
.trim_start_matches('/')
|
||||||
|
.split('/')
|
||||||
|
.collect();
|
||||||
if path_parts.len() < 2 {
|
if path_parts.len() < 2 {
|
||||||
return Ok(Response::builder().status(404).body(BoxBody::default())?);
|
return Ok(Response::builder().status(404).body(BoxBody::default())?);
|
||||||
}
|
}
|
||||||
|
|
||||||
let stream_id = path_parts[0];
|
let stream_id = path_parts[0];
|
||||||
|
|
||||||
// Get client IP and User-Agent for tracking
|
// Get client IP and User-Agent for tracking
|
||||||
let client_ip = Self::get_client_ip(req);
|
let client_ip = Self::get_client_ip(req);
|
||||||
let user_agent = req
|
let user_agent = req
|
||||||
@ -179,9 +198,10 @@ impl HttpServer {
|
|||||||
|
|
||||||
// Read the playlist file
|
// Read the playlist file
|
||||||
let playlist_content = tokio::fs::read(playlist_path).await?;
|
let playlist_content = tokio::fs::read(playlist_path).await?;
|
||||||
|
|
||||||
// Parse and modify playlist to add viewer token to URLs
|
// Parse and modify playlist to add viewer token to URLs
|
||||||
let modified_content = Self::add_viewer_token_to_playlist(&playlist_content, &viewer_token)?;
|
let modified_content =
|
||||||
|
Self::add_viewer_token_to_playlist(&playlist_content, &viewer_token)?;
|
||||||
|
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.header("content-type", "application/vnd.apple.mpegurl")
|
.header("content-type", "application/vnd.apple.mpegurl")
|
||||||
@ -205,7 +225,7 @@ impl HttpServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(real_ip) = req.headers().get("x-real-ip") {
|
if let Some(real_ip) = req.headers().get("x-real-ip") {
|
||||||
if let Ok(ip_str) = real_ip.to_str() {
|
if let Ok(ip_str) = real_ip.to_str() {
|
||||||
return ip_str.to_string();
|
return ip_str.to_string();
|
||||||
@ -220,17 +240,18 @@ impl HttpServer {
|
|||||||
// Parse the M3U8 playlist using the m3u8-rs crate
|
// Parse the M3U8 playlist using the m3u8-rs crate
|
||||||
let (_, playlist) = m3u8_rs::parse_playlist(content)
|
let (_, playlist) = m3u8_rs::parse_playlist(content)
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to parse M3U8 playlist: {}", e))?;
|
.map_err(|e| anyhow::anyhow!("Failed to parse M3U8 playlist: {}", e))?;
|
||||||
|
|
||||||
match playlist {
|
match playlist {
|
||||||
m3u8_rs::Playlist::MasterPlaylist(mut master) => {
|
m3u8_rs::Playlist::MasterPlaylist(mut master) => {
|
||||||
// For master playlists, add viewer token to variant streams
|
// For master playlists, add viewer token to variant streams
|
||||||
for variant in &mut master.variants {
|
for variant in &mut master.variants {
|
||||||
variant.uri = Self::add_token_to_url(&variant.uri, viewer_token);
|
variant.uri = Self::add_token_to_url(&variant.uri, viewer_token);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write the modified playlist back to string
|
// Write the modified playlist back to string
|
||||||
let mut output = Vec::new();
|
let mut output = Vec::new();
|
||||||
master.write_to(&mut output)
|
master
|
||||||
|
.write_to(&mut output)
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to write master playlist: {}", e))?;
|
.map_err(|e| anyhow::anyhow!("Failed to write master playlist: {}", e))?;
|
||||||
String::from_utf8(output)
|
String::from_utf8(output)
|
||||||
.map_err(|e| anyhow::anyhow!("Failed to convert playlist to string: {}", e))
|
.map_err(|e| anyhow::anyhow!("Failed to convert playlist to string: {}", e))
|
||||||
@ -242,7 +263,7 @@ impl HttpServer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn add_token_to_url(url: &str, viewer_token: &str) -> String {
|
fn add_token_to_url(url: &str, viewer_token: &str) -> String {
|
||||||
if url.contains('?') {
|
if url.contains('?') {
|
||||||
format!("{}&vt={}", url, viewer_token)
|
format!("{}&vt={}", url, viewer_token)
|
||||||
@ -264,7 +285,7 @@ impl Service<Request<Incoming>> for HttpServer {
|
|||||||
{
|
{
|
||||||
let stream_cache = self.stream_cache.clone();
|
let stream_cache = self.stream_cache.clone();
|
||||||
let api = self.api.clone();
|
let api = self.api.clone();
|
||||||
|
|
||||||
// Compile template outside async move for better performance
|
// Compile template outside async move for better performance
|
||||||
let template = match mustache::compile_str(&self.index_template) {
|
let template = match mustache::compile_str(&self.index_template) {
|
||||||
Ok(t) => t,
|
Ok(t) => t,
|
||||||
@ -272,40 +293,36 @@ impl Service<Request<Incoming>> for HttpServer {
|
|||||||
error!("Failed to compile template: {}", e);
|
error!("Failed to compile template: {}", e);
|
||||||
return Box::pin(async move {
|
return Box::pin(async move {
|
||||||
Ok(Response::builder()
|
Ok(Response::builder()
|
||||||
.status(500)
|
.status(500)
|
||||||
.body(BoxBody::default()).unwrap())
|
.body(BoxBody::default())
|
||||||
|
.unwrap())
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
return Box::pin(async move {
|
return Box::pin(async move {
|
||||||
// Use the existing method to get cached template data
|
// Use the existing method to get cached template data
|
||||||
let template_data = Self::get_cached_or_fetch_streams_static(&stream_cache, &api).await;
|
let template_data =
|
||||||
|
Self::get_cached_or_fetch_streams_static(&stream_cache, &api).await;
|
||||||
|
|
||||||
match template_data {
|
match template_data {
|
||||||
Ok(data) => {
|
Ok(data) => match template.render_to_string(&data) {
|
||||||
match template.render_to_string(&data) {
|
Ok(index_html) => Ok(Response::builder()
|
||||||
Ok(index_html) => Ok(Response::builder()
|
.header("content-type", "text/html")
|
||||||
.header("content-type", "text/html")
|
.header("server", "zap-stream-core")
|
||||||
.header("server", "zap-stream-core")
|
.body(
|
||||||
.body(
|
Full::new(Bytes::from(index_html))
|
||||||
Full::new(Bytes::from(index_html))
|
.map_err(|e| match e {})
|
||||||
.map_err(|e| match e {})
|
.boxed(),
|
||||||
.boxed(),
|
)?),
|
||||||
)?),
|
Err(e) => {
|
||||||
Err(e) => {
|
error!("Failed to render template: {}", e);
|
||||||
error!("Failed to render template: {}", e);
|
Ok(Response::builder().status(500).body(BoxBody::default())?)
|
||||||
Ok(Response::builder()
|
|
||||||
.status(500)
|
|
||||||
.body(BoxBody::default())?)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Failed to fetch template data: {}", e);
|
error!("Failed to fetch template data: {}", e);
|
||||||
Ok(Response::builder()
|
Ok(Response::builder().status(500).body(BoxBody::default())?)
|
||||||
.status(500)
|
|
||||||
.body(BoxBody::default())?)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
@ -415,12 +432,21 @@ pub fn check_nip98_auth(req: &Request<Incoming>, public_url: &str) -> Result<Aut
|
|||||||
|
|
||||||
// Construct full URI using public_url + path + query
|
// Construct full URI using public_url + path + query
|
||||||
let request_uri = match req.uri().query() {
|
let request_uri = match req.uri().query() {
|
||||||
Some(query) => format!("{}{}?{}", public_url.trim_end_matches('/'), req.uri().path(), query),
|
Some(query) => format!(
|
||||||
|
"{}{}?{}",
|
||||||
|
public_url.trim_end_matches('/'),
|
||||||
|
req.uri().path(),
|
||||||
|
query
|
||||||
|
),
|
||||||
None => format!("{}{}", public_url.trim_end_matches('/'), req.uri().path()),
|
None => format!("{}{}", public_url.trim_end_matches('/'), req.uri().path()),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !url_tag.eq_ignore_ascii_case(&request_uri) {
|
if !url_tag.eq_ignore_ascii_case(&request_uri) {
|
||||||
bail!("Invalid nostr event, URL tag invalid. Expected: {}, Got: {}", request_uri, url_tag);
|
bail!(
|
||||||
|
"Invalid nostr event, URL tag invalid. Expected: {}, Got: {}",
|
||||||
|
request_uri,
|
||||||
|
url_tag
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check method tag
|
// Check method tag
|
||||||
|
@ -17,14 +17,14 @@ use url::Url;
|
|||||||
use uuid::Uuid;
|
use uuid::Uuid;
|
||||||
use zap_stream_core::egress::{EgressConfig, EgressSegment};
|
use zap_stream_core::egress::{EgressConfig, EgressSegment};
|
||||||
use zap_stream_core::ingress::ConnectionInfo;
|
use zap_stream_core::ingress::ConnectionInfo;
|
||||||
use zap_stream_core::overseer::{IngressInfo, IngressStreamType, Overseer};
|
use zap_stream_core::overseer::{IngressInfo, IngressStream, IngressStreamType, Overseer};
|
||||||
use zap_stream_core::pipeline::{EgressType, PipelineConfig};
|
use zap_stream_core::pipeline::{EgressType, PipelineConfig};
|
||||||
use zap_stream_core::variant::audio::AudioVariant;
|
use zap_stream_core::variant::audio::AudioVariant;
|
||||||
use zap_stream_core::variant::mapping::VariantMapping;
|
use zap_stream_core::variant::mapping::VariantMapping;
|
||||||
use zap_stream_core::variant::video::VideoVariant;
|
use zap_stream_core::variant::video::VideoVariant;
|
||||||
use zap_stream_core::variant::{StreamMapping, VariantStream};
|
use zap_stream_core::variant::{StreamMapping, VariantStream};
|
||||||
use zap_stream_core::viewer::ViewerTracker;
|
use zap_stream_core::viewer::ViewerTracker;
|
||||||
use zap_stream_db::{UserStream, UserStreamState, ZapStreamDb};
|
use zap_stream_db::{IngestEndpoint, UserStream, UserStreamState, ZapStreamDb};
|
||||||
|
|
||||||
const STREAM_EVENT_KIND: u16 = 30_311;
|
const STREAM_EVENT_KIND: u16 = 30_311;
|
||||||
|
|
||||||
@ -353,22 +353,18 @@ impl Overseer for ZapStreamOverseer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get ingest endpoint configuration based on connection type
|
// Get ingest endpoint configuration based on connection type
|
||||||
let endpoint_id = self
|
let endpoint = self.detect_endpoint(&connection).await?;
|
||||||
.detect_endpoint(&connection)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("No ingest endpoints configured"))?;
|
|
||||||
let endpoint = self
|
|
||||||
.db
|
|
||||||
.get_ingest_endpoint(endpoint_id)
|
|
||||||
.await?
|
|
||||||
.ok_or_else(|| anyhow::anyhow!("Ingest endpoint not found"))?;
|
|
||||||
|
|
||||||
let variants = get_variants_from_endpoint(&stream_info, &endpoint)?;
|
let cfg = get_variants_from_endpoint(&stream_info, &endpoint)?;
|
||||||
|
|
||||||
|
if cfg.video_src.is_none() || cfg.variants.is_empty() {
|
||||||
|
bail!("No video src found");
|
||||||
|
}
|
||||||
|
|
||||||
let mut egress = vec![];
|
let mut egress = vec![];
|
||||||
egress.push(EgressType::HLS(EgressConfig {
|
egress.push(EgressType::HLS(EgressConfig {
|
||||||
name: "hls".to_string(),
|
name: "hls".to_string(),
|
||||||
variants: variants.iter().map(|v| v.id()).collect(),
|
variants: cfg.variants.iter().map(|v| v.id()).collect(),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
let stream_id = Uuid::new_v4();
|
let stream_id = Uuid::new_v4();
|
||||||
@ -378,7 +374,7 @@ impl Overseer for ZapStreamOverseer {
|
|||||||
user_id: uid,
|
user_id: uid,
|
||||||
starts: Utc::now(),
|
starts: Utc::now(),
|
||||||
state: UserStreamState::Live,
|
state: UserStreamState::Live,
|
||||||
endpoint_id: Some(endpoint_id),
|
endpoint_id: Some(endpoint.id),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
let stream_event = self.publish_stream_event(&new_stream, &user.pubkey).await?;
|
let stream_event = self.publish_stream_event(&new_stream, &user.pubkey).await?;
|
||||||
@ -399,8 +395,11 @@ impl Overseer for ZapStreamOverseer {
|
|||||||
|
|
||||||
Ok(PipelineConfig {
|
Ok(PipelineConfig {
|
||||||
id: stream_id,
|
id: stream_id,
|
||||||
variants,
|
variants: cfg.variants,
|
||||||
egress,
|
egress,
|
||||||
|
ingress_info: stream_info.clone(),
|
||||||
|
video_src: cfg.video_src.unwrap().index,
|
||||||
|
audio_src: cfg.audio_src.map(|s| s.index),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -525,25 +524,29 @@ impl Overseer for ZapStreamOverseer {
|
|||||||
|
|
||||||
impl ZapStreamOverseer {
|
impl ZapStreamOverseer {
|
||||||
/// Detect which ingest endpoint should be used based on connection info
|
/// Detect which ingest endpoint should be used based on connection info
|
||||||
async fn detect_endpoint(&self, connection: &ConnectionInfo) -> Result<Option<u64>> {
|
async fn detect_endpoint(&self, connection: &ConnectionInfo) -> Result<IngestEndpoint> {
|
||||||
// Get all ingest endpoints and match by name against connection endpoint
|
|
||||||
let endpoints = self.db.get_ingest_endpoints().await?;
|
let endpoints = self.db.get_ingest_endpoints().await?;
|
||||||
|
|
||||||
for endpoint in &endpoints {
|
let default = endpoints.iter().max_by_key(|e| e.cost);
|
||||||
if endpoint.name == connection.endpoint {
|
Ok(endpoints
|
||||||
return Ok(Some(endpoint.id));
|
.iter()
|
||||||
}
|
.find(|e| e.name == connection.endpoint)
|
||||||
}
|
.or(default)
|
||||||
|
.unwrap()
|
||||||
// No matching endpoint found, use the most expensive one
|
.clone())
|
||||||
Ok(endpoints.into_iter().max_by_key(|e| e.cost).map(|e| e.id))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_variants_from_endpoint(
|
struct EndpointConfig<'a> {
|
||||||
info: &IngressInfo,
|
video_src: Option<&'a IngressStream>,
|
||||||
|
audio_src: Option<&'a IngressStream>,
|
||||||
|
variants: Vec<VariantStream>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_variants_from_endpoint<'a>(
|
||||||
|
info: &'a IngressInfo,
|
||||||
endpoint: &zap_stream_db::IngestEndpoint,
|
endpoint: &zap_stream_db::IngestEndpoint,
|
||||||
) -> Result<Vec<VariantStream>> {
|
) -> Result<EndpointConfig<'a>> {
|
||||||
let capabilities_str = endpoint.capabilities.as_deref().unwrap_or("");
|
let capabilities_str = endpoint.capabilities.as_deref().unwrap_or("");
|
||||||
let capabilities: Vec<&str> = capabilities_str.split(',').collect();
|
let capabilities: Vec<&str> = capabilities_str.split(',').collect();
|
||||||
|
|
||||||
@ -658,5 +661,9 @@ fn get_variants_from_endpoint(
|
|||||||
// Handle other capabilities like dvr:720h here if needed
|
// Handle other capabilities like dvr:720h here if needed
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(vars)
|
Ok(EndpointConfig {
|
||||||
|
audio_src,
|
||||||
|
video_src,
|
||||||
|
variants: vars,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user