Implement payments system with default free allowance and quota enforcement (#17)

* Initial plan for issue

* Implement complete payments system with quota enforcement

- Add default free allowance configuration (100MB)
- Implement quota checking before uploads in both blossom and nip96 routes
- Add comprehensive quota checking functions in database module
- Enhance admin API to show quota information
- Add payment processing infrastructure
- Include all necessary database migrations

Users now get 100MB free storage + any valid paid storage.
Uploads are rejected when quota would be exceeded.

* Move free_quota_bytes to PaymentConfig and restore mime_type parameter

Co-authored-by: v0l <1172179+v0l@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: v0l <1172179+v0l@users.noreply.github.com>
This commit is contained in:
Copilot
2025-06-10 11:37:17 +01:00
committed by GitHub
parent 71cb34eaee
commit ca2d23508b
15 changed files with 619 additions and 82 deletions

View File

@ -3,44 +3,27 @@ name = "route96"
version = "0.4.0"
edition = "2021"
[[bin]]
name = "void_cat_migrate"
required-features = ["bin-void-cat-migrate"]
[[bin]]
name = "void_cat_forced_migrate"
required-features = ["bin-void-cat-force-migrate"]
[[bin]]
name = "route96"
path = "src/bin/main.rs"
[[bin]]
name = "r96util"
path = "src/bin/r96util.rs"
required-features = ["r96util"]
[lib]
name = "route96"
[features]
default = ["nip96", "blossom", "analytics", "react-ui", "r96util"]
default = ["nip96", "blossom", "analytics", "react-ui", "payments"]
media-compression = ["dep:ffmpeg-rs-raw", "dep:libc"]
labels = ["nip96", "dep:candle-core", "dep:candle-nn", "dep:candle-transformers"]
labels = ["media-compression", "dep:candle-core", "dep:candle-nn", "dep:candle-transformers"]
nip96 = ["media-compression"]
blossom = []
bin-void-cat-migrate = ["dep:sqlx-postgres"]
bin-void-cat-force-migrate = ["dep:regex", "dep:nostr-cursor"]
torrent-v2 = []
analytics = []
void-cat-redirects = ["dep:sqlx-postgres"]
react-ui = []
r96util = ["dep:walkdir", "dep:indicatif"]
payments = ["dep:fedimint-tonic-lnd"]
[dependencies]
log = "0.4.21"
nostr = "0.37.0"
env_logger = "0.11.7"
nostr = "0.39.0"
pretty_env_logger = "0.5.0"
rocket = { version = "0.5.1", features = ["json"] }
tokio = { version = "1.37.0", features = ["rt", "rt-multi-thread", "macros"] }
base64 = "0.22.1"
@ -50,25 +33,19 @@ uuid = { version = "1.8.0", features = ["v4", "serde"] }
anyhow = "^1.0.82"
sha2 = "0.10.8"
sqlx = { version = "0.8.1", features = ["mysql", "runtime-tokio", "chrono", "uuid"] }
config = { version = "0.14.0", features = ["yaml"] }
config = { version = "0.15.7", features = ["yaml"] }
chrono = { version = "0.4.38", features = ["serde"] }
serde_with = { version = "3.8.1", features = ["hex"] }
reqwest = { version = "0.12.8", features = ["stream"] }
reqwest = { version = "0.12.8", features = ["stream", "http2"] }
clap = { version = "4.5.18", features = ["derive"] }
mime2ext = "0.1.53"
infer = "0.16.0"
http-range-header = { version = "0.4.2" }
infer = "0.19.0"
tokio-util = { version = "0.7.13", features = ["io", "io-util"] }
http-range-header = { version = "0.4.2" }
base58 = "0.2.0"
libc = { version = "0.2.153", optional = true }
ffmpeg-rs-raw = { git = "https://git.v0l.io/Kieran/ffmpeg-rs-raw.git", rev = "29ab0547478256c574766b4acc6fcda8ebf4cae6", optional = true }
ffmpeg-rs-raw = { git = "https://git.v0l.io/Kieran/ffmpeg-rs-raw.git", rev = "a63b88ef3c8f58c7c0ac57d361d06ff0bb3ed385", optional = true }
candle-core = { git = "https://git.v0l.io/huggingface/candle.git", tag = "0.8.1", optional = true }
candle-nn = { git = "https://git.v0l.io/huggingface/candle.git", tag = "0.8.1", optional = true }
candle-transformers = { git = "https://git.v0l.io/huggingface/candle.git", tag = "0.8.1", optional = true }
sqlx-postgres = { version = "0.8.2", optional = true, features = ["chrono", "uuid"] }
nostr-cursor = { git = "https://git.v0l.io/Kieran/nostr-backup-util.git", branch = "main", optional = true }
regex = { version = "1.11.1", optional = true }
walkdir = { version = "2.5.0", optional = true }
indicatif = { version = "0.17.11", optional = true }
fedimint-tonic-lnd = { version = "0.2.0", optional = true, default-features = false, features = ["invoicesrpc", "lightningrpc"] }

View File

@ -13,19 +13,39 @@ max_upload_bytes: 5e+9
# Public facing url
public_url: "http://localhost:8000"
# Whitelisted pubkeys, leave out to disable
# (Optional) Whitelisted pubkeys, leave out to disable
# whitelist: ["63fe6318dc58583cfe16810f86dd09e18bfd76aabc24a0081ce2856f330504ed"]
# Path for ViT(224) image model (https://huggingface.co/google/vit-base-patch16-224)
vit_model:
model: "/home/kieran/Downloads/falcon_nsfw.safetensors"
config: "/home/kieran/Downloads/falcon_nsfw.json"
# (Optional) Path for ViT(224) image model (https://huggingface.co/google/vit-base-patch16-224)
# vit_model:
# model: "falcon_nsfw.safetensors"
# config: "falcon_nsfw.json"
# Analytics support
# (Optional) Analytics support
# plausible_url: "https://plausible.com/"
# Support legacy void
# void_cat_database: "postgres://postgres:postgres@localhost:41911/void"
# (Optional) Legacy file path for void.cat uploads
# void_cat_files: "/my/void.cat/data"
# Legacy file path for void.cat uploads
# void_cat_files: "/my/void.cat/data"
# (Optional) Payment system config
payments:
# (Optional) Free quota in bytes for users without payments (default: 100MB)
free_quota_bytes: 104857600
# (Optional) Fiat currency used to track exchange rate along with invoices
# If [cost] is using a fiat currency, exchange rates will always be stored
# in that currency, so this config is not needed
fiat: "USD"
# LND node config
lnd:
endpoint: "https://127.0.0.1:10001"
tls: "/home/kieran/.polar/networks/3/volumes/lnd/alice/tls.cert"
macaroon: "/home/kieran/.polar/networks/3/volumes/lnd/alice/data/chain/bitcoin/regtest/admin.macaroon"
# Cost per unit (BTC/USD/EUR/AUD/CAD/JPY/GBP)
cost:
currency: "BTC"
amount: 0.00000100
# Unit metric used to calculate quote (GBSpace, GBEgress)
unit: "GBSpace"
# Billing interval (day / month / year)
interval:
month: 1

View File

@ -0,0 +1,22 @@
-- Add migration script here
alter table users
add column paid_until timestamp,
add column paid_size integer unsigned not null;
create table payments
(
payment_hash binary(32) not null primary key,
user_id integer unsigned not null,
created timestamp default current_timestamp,
amount integer unsigned not null,
is_paid bit(1) not null default 0,
days_value integer unsigned not null,
size_value integer unsigned not null,
settle_index integer unsigned,
rate float,
constraint fk_payments_user_id
foreign key (user_id) references users (id)
on delete cascade
on update restrict
);

View File

@ -1,23 +1,54 @@
use crate::db::Database;
use crate::filesystem::FileStore;
use anyhow::Result;
use log::{error, info, warn};
use tokio::sync::broadcast;
use tokio::task::JoinHandle;
#[cfg(feature = "media-compression")]
mod media_metadata;
pub fn start_background_tasks(db: Database, file_store: FileStore) -> Vec<JoinHandle<Result<()>>> {
#[cfg(feature = "payments")]
mod payments;
pub fn start_background_tasks(
db: Database,
file_store: FileStore,
shutdown_rx: broadcast::Receiver<()>,
#[cfg(feature = "payments")] client: Option<fedimint_tonic_lnd::Client>,
) -> Vec<JoinHandle<()>> {
let mut ret = vec![];
#[cfg(feature = "media-compression")]
{
let db = db.clone();
let rx = shutdown_rx.resubscribe();
ret.push(tokio::spawn(async move {
log::info!("Starting MediaMetadata background task");
let mut m = media_metadata::MediaMetadata::new(db.clone(), file_store.clone());
m.process().await?;
log::info!("MediaMetadata background task completed");
Ok(())
info!("Starting MediaMetadata background task");
let mut m = media_metadata::MediaMetadata::new(db, file_store.clone());
if let Err(e) = m.process(rx).await {
error!("MediaMetadata failed: {}", e);
} else {
info!("MediaMetadata background task completed");
}
}));
}
#[cfg(feature = "payments")]
{
if let Some(client) = client {
let db = db.clone();
let rx = shutdown_rx.resubscribe();
ret.push(tokio::spawn(async move {
info!("Starting PaymentsHandler background task");
let mut m = payments::PaymentsHandler::new(client, db);
if let Err(e) = m.process(rx).await {
error!("PaymentsHandler failed: {}", e);
} else {
info!("PaymentsHandler background task completed");
}
}));
} else {
warn!("Not starting PaymentsHandler, configuration missing")
}
}
ret
}

View File

@ -0,0 +1,71 @@
use crate::db::Database;
use anyhow::Result;
use fedimint_tonic_lnd::lnrpc::invoice::InvoiceState;
use fedimint_tonic_lnd::lnrpc::InvoiceSubscription;
use fedimint_tonic_lnd::Client;
use log::{error, info};
use rocket::futures::StreamExt;
use sqlx::Row;
use tokio::sync::broadcast;
pub struct PaymentsHandler {
client: Client,
database: Database,
}
impl PaymentsHandler {
pub fn new(client: Client, database: Database) -> Self {
PaymentsHandler { client, database }
}
pub async fn process(&mut self, mut rx: broadcast::Receiver<()>) -> Result<()> {
let start_idx = self.database.get_last_settle_index().await?;
let mut invoices = self
.client
.lightning()
.subscribe_invoices(InvoiceSubscription {
add_index: 0,
settle_index: start_idx,
})
.await?;
info!("Starting invoice subscription from {}", start_idx);
let invoices = invoices.get_mut();
loop {
tokio::select! {
Ok(_) = rx.recv() => {
break;
}
Some(Ok(msg)) = invoices.next() => {
if msg.state == InvoiceState::Settled as i32 {
if let Ok(Some(mut p)) = self.database.get_payment(&msg.r_hash).await {
p.settle_index = Some(msg.settle_index);
p.is_paid = true;
match self.database.complete_payment(&p).await {
Ok(()) => info!(
"Successfully completed payment: {}",
hex::encode(&msg.r_hash)
),
Err(e) => error!("Failed to complete payment: {}", e),
}
}
}
}
}
}
Ok(())
}
}
impl Database {
async fn get_last_settle_index(&self) -> Result<u64> {
Ok(
sqlx::query("select max(settle_index) from payments where is_paid = true")
.fetch_one(&self.pool)
.await?
.try_get(0)
.unwrap_or(0),
)
}
}

View File

@ -3,6 +3,8 @@ use std::net::{IpAddr, SocketAddr};
use anyhow::Error;
use clap::Parser;
use config::Config;
#[cfg(feature = "payments")]
use fedimint_tonic_lnd::lnrpc::GetInfoRequest;
use log::{error, info};
use rocket::config::Ident;
use rocket::data::{ByteUnit, Limits};
@ -19,6 +21,7 @@ use route96::filesystem::FileStore;
use route96::routes;
use route96::routes::{get_blob, head_blob, root};
use route96::settings::Settings;
use tokio::sync::broadcast;
#[derive(Parser, Debug)]
#[command(version, about)]
@ -29,7 +32,7 @@ struct Args {
#[rocket::main]
async fn main() -> Result<(), Error> {
env_logger::init();
pretty_env_logger::init();
let args: Args = Args::parse();
@ -101,19 +104,47 @@ async fn main() -> Result<(), Error> {
{
rocket = rocket.mount("/", routes![routes::get_blob_thumb]);
}
#[cfg(feature = "payments")]
let lnd = {
if let Some(lnd) = settings.payments.as_ref().map(|p| &p.lnd) {
let lnd = fedimint_tonic_lnd::connect(
lnd.endpoint.clone(),
lnd.tls.clone(),
lnd.macaroon.clone(),
)
.await?;
let jh = start_background_tasks(db, fs);
let info = {
let mut lnd = lnd.clone();
lnd.lightning().get_info(GetInfoRequest::default()).await?
};
info!(
"LND connected: {} v{}",
info.get_ref().alias,
info.get_ref().version
);
rocket = rocket
.manage(lnd.clone())
.mount("/", routes::payment::routes());
Some(lnd)
} else {
None
}
};
let (shutdown_tx, shutdown_rx) = broadcast::channel(1);
let jh = start_background_tasks(db, fs, shutdown_rx, lnd);
if let Err(e) = rocket.launch().await {
error!("Rocker error {}", e);
for j in jh {
let _ = j.await?;
}
Err(Error::from(e))
} else {
for j in jh {
let _ = j.await?;
}
Ok(())
}
shutdown_tx
.send(())
.expect("Failed to send shutdown signal");
for j in jh {
j.await?;
}
Ok(())
}

113
src/db.rs
View File

@ -61,6 +61,10 @@ pub struct User {
pub pubkey: Vec<u8>,
pub created: DateTime<Utc>,
pub is_admin: bool,
#[cfg(feature = "payments")]
pub paid_until: Option<DateTime<Utc>>,
#[cfg(feature = "payments")]
pub paid_size: u64,
}
#[cfg(feature = "labels")]
@ -90,6 +94,20 @@ pub struct UserStats {
pub total_size: u64,
}
#[cfg(feature = "payments")]
#[derive(Clone, FromRow, Serialize)]
pub struct Payment {
pub payment_hash: Vec<u8>,
pub user_id: u64,
pub created: DateTime<Utc>,
pub amount: u64,
pub is_paid: bool,
pub days_value: u64,
pub size_value: u64,
pub settle_index: Option<u64>,
pub rate: Option<f32>,
}
#[derive(Clone)]
pub struct Database {
pub(crate) pool: sqlx::pool::Pool<sqlx::mysql::MySql>,
@ -148,7 +166,7 @@ impl Database {
.try_get(0)
}
pub async fn add_file(&self, file: &FileUpload, user_id: Option<u64>) -> Result<(), Error> {
pub async fn add_file(&self, file: &FileUpload, user_id: u64) -> Result<(), Error> {
let mut tx = self.pool.begin().await?;
let q = sqlx::query("insert ignore into \
uploads(id,name,size,mime_type,blur_hash,width,height,alt,created,duration,bitrate) values(?,?,?,?,?,?,?,?,?,?,?)")
@ -165,13 +183,10 @@ impl Database {
.bind(file.bitrate);
tx.execute(q).await?;
if let Some(user_id) = user_id {
let q2 = sqlx::query("insert ignore into user_uploads(file,user_id) values(?,?)")
.bind(&file.id)
.bind(user_id);
tx.execute(q2).await?;
}
let q2 = sqlx::query("insert ignore into user_uploads(file,user_id) values(?,?)")
.bind(&file.id)
.bind(user_id);
tx.execute(q2).await?;
#[cfg(feature = "labels")]
for lbl in &file.labels {
@ -273,3 +288,85 @@ impl Database {
Ok((results, count))
}
}
#[cfg(feature = "payments")]
impl Database {
pub async fn insert_payment(&self, payment: &Payment) -> Result<(), Error> {
sqlx::query("insert into payments(payment_hash,user_id,amount,days_value,size_value,rate) values(?,?,?,?,?,?)")
.bind(&payment.payment_hash)
.bind(payment.user_id)
.bind(payment.amount)
.bind(payment.days_value)
.bind(payment.size_value)
.bind(payment.rate)
.execute(&self.pool)
.await?;
Ok(())
}
pub async fn get_payment(&self, payment_hash: &Vec<u8>) -> Result<Option<Payment>, Error> {
sqlx::query_as("select * from payments where payment_hash = ?")
.bind(payment_hash)
.fetch_optional(&self.pool)
.await
}
pub async fn get_user_payments(&self, uid: u64) -> Result<Vec<Payment>, Error> {
sqlx::query_as("select * from payments where user_id = ?")
.bind(uid)
.fetch_all(&self.pool)
.await
}
pub async fn complete_payment(&self, payment: &Payment) -> Result<(), Error> {
let mut tx = self.pool.begin().await?;
sqlx::query("update payments set is_paid = true, settle_index = ? where payment_hash = ?")
.bind(payment.settle_index)
.bind(&payment.payment_hash)
.execute(&mut *tx)
.await?;
// TODO: check space is not downgraded
sqlx::query("update users set paid_until = TIMESTAMPADD(DAY, ?, IFNULL(paid_until, current_timestamp)), paid_size = ? where id = ?")
.bind(payment.days_value)
.bind(payment.size_value)
.bind(payment.user_id)
.execute(&mut *tx)
.await?;
tx.commit().await?;
Ok(())
}
/// Check if user has sufficient quota for an upload
pub async fn check_user_quota(&self, pubkey: &Vec<u8>, upload_size: u64, free_quota_bytes: u64) -> Result<bool, Error> {
// Get or create user
let user_id = self.upsert_user(pubkey).await?;
// Get user's current storage usage
let user_stats = self.get_user_stats(user_id).await.unwrap_or(UserStats {
file_count: 0,
total_size: 0
});
// Get user's paid quota
let user = self.get_user(pubkey).await?;
let (paid_size, paid_until) = (user.paid_size, user.paid_until);
// Calculate total available quota
let mut available_quota = free_quota_bytes;
// Add paid quota if still valid
if let Some(paid_until) = paid_until {
if paid_until > chrono::Utc::now() {
available_quota += paid_size;
}
}
// Check if upload would exceed quota
Ok(user_stats.total_size + upload_size <= available_quota)
}
}

View File

@ -5,14 +5,14 @@ pub mod background;
pub mod cors;
pub mod db;
pub mod filesystem;
#[cfg(feature = "payments")]
pub mod payments;
#[cfg(feature = "media-compression")]
pub mod processing;
pub mod routes;
pub mod settings;
#[cfg(any(feature = "void-cat-redirects", feature = "bin-void-cat-migrate"))]
pub mod void_db;
pub mod void_file;
pub fn can_compress(mime_type: &str) -> bool {
mime_type.starts_with("image/")
}
}

53
src/payments.rs Normal file
View File

@ -0,0 +1,53 @@
use serde::{Deserialize, Serialize};
use std::fmt::{Display, Formatter};
#[cfg(feature = "payments")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaymentAmount {
pub currency: Currency,
pub amount: f32,
}
#[cfg(feature = "payments")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum Currency {
BTC,
USD,
EUR,
GBP,
JPY,
CAD,
AUD,
}
#[cfg(feature = "payments")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PaymentUnit {
GBSpace,
GBEgress,
}
impl PaymentUnit {
/// Get the total size from a number of units
pub fn to_size(&self, units: f32) -> u64 {
(1000f32 * 1000f32 * 1000f32 * units) as u64
}
}
impl Display for PaymentUnit {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self {
PaymentUnit::GBSpace => write!(f, "GB Space"),
PaymentUnit::GBEgress => write!(f, "GB Egress"),
}
}
}
#[cfg(feature = "payments")]
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PaymentInterval {
Day(u16),
Month(u16),
Year(u16),
}

View File

@ -53,6 +53,14 @@ pub struct SelfUser {
pub is_admin: bool,
pub file_count: u64,
pub total_size: u64,
#[cfg(feature = "payments")]
pub paid_until: u64,
#[cfg(feature = "payments")]
pub quota: u64,
#[cfg(feature = "payments")]
pub free_quota: u64,
#[cfg(feature = "payments")]
pub total_available_quota: u64,
}
#[derive(Serialize)]
@ -63,7 +71,7 @@ pub struct AdminNip94File {
}
#[rocket::get("/self")]
async fn admin_get_self(auth: Nip98Auth, db: &State<Database>) -> AdminResponse<SelfUser> {
async fn admin_get_self(auth: Nip98Auth, db: &State<Database>, settings: &State<Settings>) -> AdminResponse<SelfUser> {
let pubkey_vec = auth.event.pubkey.to_bytes().to_vec();
match db.get_user(&pubkey_vec).await {
Ok(user) => {
@ -73,10 +81,40 @@ async fn admin_get_self(auth: Nip98Auth, db: &State<Database>) -> AdminResponse<
return AdminResponse::error(&format!("Failed to load user stats: {}", e))
}
};
#[cfg(feature = "payments")]
let (free_quota, total_available_quota) = {
let free_quota = settings.payments.as_ref()
.and_then(|p| p.free_quota_bytes)
.unwrap_or(104857600);
let mut total_available = free_quota;
// Add paid quota if still valid
if let Some(paid_until) = &user.paid_until {
if *paid_until > chrono::Utc::now() {
total_available += user.paid_size;
}
}
(free_quota, total_available)
};
AdminResponse::success(SelfUser {
is_admin: user.is_admin,
file_count: s.file_count,
total_size: s.total_size,
#[cfg(feature = "payments")]
paid_until: if let Some(u) = &user.paid_until {
u.timestamp() as u64
} else {
0
},
#[cfg(feature = "payments")]
quota: user.paid_size,
#[cfg(feature = "payments")]
free_quota,
#[cfg(feature = "payments")]
total_available_quota,
})
}
Err(_) => AdminResponse::error("User not found"),

View File

@ -25,7 +25,7 @@ pub struct BlobDescriptor {
pub size: u64,
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
pub mime_type: Option<String>,
pub uploaded: u64,
pub created: u64,
#[serde(rename = "nip94", skip_serializing_if = "Option::is_none")]
pub nip94: Option<HashMap<String, String>>,
}
@ -45,7 +45,7 @@ impl BlobDescriptor {
sha256: id_hex,
size: value.size,
mime_type: Some(value.mime_type.clone()),
uploaded: value.created.timestamp() as u64,
created: value.created.timestamp() as u64,
nip94: Some(
Nip94Event::from_upload(settings, value)
.tags
@ -362,6 +362,21 @@ async fn process_upload(
return e;
}
// check quota
#[cfg(feature = "payments")]
if let Some(upload_size) = size {
let free_quota = settings.payments.as_ref()
.and_then(|p| p.free_quota_bytes)
.unwrap_or(104857600); // Default to 100MB
let pubkey_vec = auth.event.pubkey.to_bytes().to_vec();
match db.check_user_quota(&pubkey_vec, upload_size, free_quota).await {
Ok(false) => return BlossomResponse::error("Upload would exceed quota"),
Err(_) => return BlossomResponse::error("Failed to check quota"),
Ok(true) => {} // Quota check passed
}
}
process_stream(
data.open(ByteUnit::Byte(settings.max_upload_bytes)),
&auth
@ -415,7 +430,7 @@ where
return BlossomResponse::error(format!("Failed to save file (db): {}", e));
}
};
if let Err(e) = db.add_file(&upload, Some(user_id)).await {
if let Err(e) = db.add_file(&upload, user_id).await {
error!("{}", e.to_string());
BlossomResponse::error(format!("Error saving file (db): {}", e))
} else {

View File

@ -207,6 +207,20 @@ async fn upload(
}
let pubkey_vec = auth.event.pubkey.to_bytes().to_vec();
// check quota
#[cfg(feature = "payments")]
{
let free_quota = settings.payments.as_ref()
.and_then(|p| p.free_quota_bytes)
.unwrap_or(104857600); // Default to 100MB
match db.check_user_quota(&pubkey_vec, form.size, free_quota).await {
Ok(false) => return Nip96Response::error("Upload would exceed quota"),
Err(_) => return Nip96Response::error("Failed to check quota"),
Ok(true) => {} // Quota check passed
}
}
let upload = match fs
.put(file, content_type, !form.no_transform.unwrap_or(false))
.await
@ -232,7 +246,7 @@ async fn upload(
Err(e) => return Nip96Response::error(&format!("Could not save user: {}", e)),
};
if let Err(e) = db.add_file(&upload, Some(user_id)).await {
if let Err(e) = db.add_file(&upload, user_id).await {
error!("{}", e.to_string());
return Nip96Response::error(&format!("Could not save file (db): {}", e));
}

131
src/routes/payment.rs Normal file
View File

@ -0,0 +1,131 @@
use crate::auth::nip98::Nip98Auth;
use crate::db::{Database, Payment};
use crate::payments::{Currency, PaymentAmount, PaymentInterval, PaymentUnit};
use crate::settings::Settings;
use chrono::{Months, Utc};
use fedimint_tonic_lnd::lnrpc::Invoice;
use fedimint_tonic_lnd::Client;
use log::{error, info};
use rocket::serde::json::Json;
use rocket::{routes, Route, State};
use serde::{Deserialize, Serialize};
use std::ops::{Add, Deref};
pub fn routes() -> Vec<Route> {
routes![get_payment, req_payment]
}
#[derive(Deserialize, Serialize)]
struct PaymentInfo {
/// Billing quota metric
pub unit: PaymentUnit,
/// Amount of time to bill units (GB/mo, Gb Egress/day etc.)
pub interval: PaymentInterval,
/// Value amount of payment
pub cost: PaymentAmount,
}
#[derive(Deserialize, Serialize)]
struct PaymentRequest {
/// Number of units requested to make payment
pub units: f32,
/// Quantity of orders to make
pub quantity: u16,
}
#[derive(Deserialize, Serialize)]
struct PaymentResponse {
pub pr: String,
}
#[rocket::get("/payment")]
async fn get_payment(settings: &State<Settings>) -> Option<Json<PaymentInfo>> {
settings.payments.as_ref().map(|p| {
Json::from(PaymentInfo {
unit: p.unit.clone(),
interval: p.interval.clone(),
cost: p.cost.clone(),
})
})
}
#[rocket::post("/payment", data = "<req>", format = "json")]
async fn req_payment(
auth: Nip98Auth,
db: &State<Database>,
settings: &State<Settings>,
lnd: &State<Client>,
req: Json<PaymentRequest>,
) -> Result<Json<PaymentResponse>, String> {
let cfg = if let Some(p) = &settings.payments {
p
} else {
return Err("Payment not enabled, missing configuration option(s)".to_string());
};
let btc_amount = match cfg.cost.currency {
Currency::BTC => cfg.cost.amount,
_ => return Err("Currency not supported".to_string()),
};
let amount = btc_amount * req.units * req.quantity as f32;
let pubkey_vec = auth.event.pubkey.to_bytes().to_vec();
let uid = db
.upsert_user(&pubkey_vec)
.await
.map_err(|_| "Failed to get user account".to_string())?;
let mut lnd = lnd.deref().clone();
let c = lnd.lightning();
let msat = (amount * 1e11f32) as u64;
let memo = format!(
"{}x {} {} for {}",
req.quantity, req.units, cfg.unit, auth.event.pubkey
);
info!("Requesting {} msats: {}", msat, memo);
let invoice = c
.add_invoice(Invoice {
value_msat: msat as i64,
memo,
..Default::default()
})
.await
.map_err(|e| e.message().to_string())?;
let days_value = match cfg.interval {
PaymentInterval::Day(d) => d as u64,
PaymentInterval::Month(m) => {
let now = Utc::now();
(now.add(Months::new(m as u32)) - now).num_days() as u64
}
PaymentInterval::Year(y) => {
let now = Utc::now();
(now.add(Months::new(12 * y as u32)) - now).num_days() as u64
}
};
let record = Payment {
payment_hash: invoice.get_ref().r_hash.clone(),
user_id: uid,
created: Default::default(),
amount: msat,
is_paid: false,
days_value,
size_value: cfg.unit.to_size(req.units),
settle_index: None,
rate: None,
};
if let Err(e) = db.insert_payment(&record).await {
error!("Failed to insert payment: {}", e);
return Err("Failed to insert payment".to_string());
}
Ok(Json(PaymentResponse {
pr: invoice.get_ref().payment_request.clone(),
}))
}

View File

@ -1,3 +1,5 @@
#[cfg(feature = "payments")]
use crate::payments::{Currency, PaymentAmount, PaymentInterval, PaymentUnit};
use serde::{Deserialize, Serialize};
use std::path::PathBuf;
@ -30,11 +32,12 @@ pub struct Settings {
/// Analytics tracking
pub plausible_url: Option<String>,
#[cfg(feature = "void-cat-redirects")]
pub void_cat_database: Option<String>,
/// Path to void.cat uploads (files-v2)
pub void_cat_files: Option<PathBuf>,
#[cfg(feature = "payments")]
/// Payment options for paid storage
pub payments: Option<PaymentConfig>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@ -42,3 +45,33 @@ pub struct VitModelConfig {
pub model: PathBuf,
pub config: PathBuf,
}
#[cfg(feature = "payments")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PaymentConfig {
/// LND connection details
pub lnd: LndConfig,
/// Pricing per unit
pub cost: PaymentAmount,
/// What metric to bill payments on
pub unit: PaymentUnit,
/// Billing interval time per unit
pub interval: PaymentInterval,
/// Fiat base currency to store exchange rates along with invoice
pub fiat: Option<Currency>,
/// Free quota in bytes for users without payments (default: 100MB)
pub free_quota_bytes: Option<u64>,
}
#[cfg(feature = "payments")]
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LndConfig {
pub endpoint: String,
pub tls: PathBuf,
pub macaroon: PathBuf,
}

View File

@ -6,6 +6,10 @@ export interface AdminSelf {
is_admin: boolean;
file_count: number;
total_size: number;
paid_until?: number;
quota?: number;
free_quota?: number;
total_available_quota?: number;
}
export class Route96 {