feat: allow whitelisting of pubkeys for new events

This adds a configuration option, `authorization.pubkey_whitelist`
which is an array of pubkeys that are allowed to publish events on
this relay.
This commit is contained in:
Greg Heartsfield 2022-01-26 21:39:03 -06:00
parent 452bbbb0e5
commit 98c6fa6f39
5 changed files with 62 additions and 11 deletions

View File

@ -10,13 +10,14 @@ mirrored on [GitHub](https://github.com/scsibug/nostr-rs-relay).
## Features
NIPs with relay-specific implementation requirements are listed here.
NIPs with a relay-specific implementation are listed here.
- [x] NIP-01: Core event model
- [x] NIP-01: Hide old metadata events
- [x] NIP-01: Id/Author prefix search (_experimental_)
- [x] NIP-02: Hide old contact list events
- [ ] NIP-03: OpenTimestamps
- [ ] NIP-05: Mapping Nostr keys to DNS identifiers
- [ ] NIP-09: Event deletion
- [x] NIP-11: Relay information document
- [x] NIP-12: Generic tag search (_experimental_)

View File

@ -65,3 +65,11 @@ reject_future_seconds = 1800
# Event persistence buffer size, in number of events. This provides
# backpressure to senders if writes are slow. Defaults to 16.
#event_persist_buffer = 16
[authorization]
# Pubkey addresses in this array are whitelisted for event publishing.
# Only valid events by these authors will be accepted.
#pubkey_whitelist = [
# "35d26e4690cbe1a898af61cc3515661eb5fa763b57bd0b42e45099c8b32fd50f",
# "887645fef0ce0c3c1218d2f5d8e6132a19304cdc57cd20281d082f38cfea0072",
#]

View File

@ -61,6 +61,12 @@ pub struct Limits {
pub event_persist_buffer: usize, // events to buffer for database commits (block senders if database writes are too slow)
}
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct Authorization {
pub pubkey_whitelist: Option<Vec<String>>, // If present, only allow these pubkeys to publish events
}
#[derive(Debug, Serialize, Deserialize)]
#[allow(unused)]
pub struct Settings {
@ -68,6 +74,7 @@ pub struct Settings {
pub database: Database,
pub network: Network,
pub limits: Limits,
pub authorization: Authorization,
pub retention: Retention,
pub options: Options,
}
@ -133,6 +140,9 @@ impl Default for Settings {
broadcast_buffer: 4096,
event_persist_buffer: 16,
},
authorization: Authorization {
pubkey_whitelist: None, // Allow any address to publish
},
retention: Retention {
max_events: None, // max events
max_bytes: None, // max size

View File

@ -18,6 +18,7 @@ use rusqlite::limits::Limit;
use rusqlite::types::ToSql;
use std::path::Path;
use std::thread;
use std::time::Duration;
use std::time::Instant;
use tokio::task;
@ -103,6 +104,13 @@ pub fn build_read_pool() -> SqlitePool {
let config = config::SETTINGS.read().unwrap();
let db_dir = &config.database.data_directory;
let full_path = Path::new(db_dir).join(DB_FILE);
// small hack; if the database doesn't exist yet, that means the
// writer thread hasn't finished. Give it a chance to work. This
// is only an issue with the first time we run.
while !full_path.exists() {
debug!("Database reader pool is waiting on the database to be created...");
thread::sleep(Duration::from_millis(500));
}
let manager = SqliteConnectionManager::file(&full_path)
.with_flags(OpenFlags::SQLITE_OPEN_READ_ONLY)
.with_init(|c| c.execute_batch(STARTUP_SQL));
@ -248,6 +256,9 @@ pub async fn db_writer(
info!("opened database {:?} for writing", full_path);
upgrade_db(&mut conn)?;
// Make a copy of the whitelist
let whitelist = &config.authorization.pubkey_whitelist.clone();
// get rate limit settings
let rps_setting = config.limits.messages_per_sec;
let mut most_recent_rate_limit = Instant::now();
@ -273,6 +284,21 @@ pub async fn db_writer(
}
let mut event_write = false;
let event = next_event.unwrap();
// check if this event is authorized.
if let Some(allowed_addrs) = whitelist {
debug!("Checking against whitelist");
// if the event address is not in allowed_addrs.
if !allowed_addrs.contains(&event.pubkey) {
info!(
"Rejecting event {}, unauthorized author",
event.get_event_id_prefix()
);
// TODO: define a channel that can send NOTICEs back to the client.
continue;
}
}
let start = Instant::now();
match write_event(&mut conn, &event) {
Ok(updated) => {

View File

@ -177,9 +177,15 @@ fn main() -> Result<(), Error> {
error!("Database directory does not exist");
return Err(Error::DatabaseDirError);
}
debug!("config: {:?}", config);
trace!("config: {:?}", config);
let addr = format!("{}:{}", config.network.address.trim(), config.network.port);
let socket_addr = addr.parse().expect("listening address not valid");
if let Some(addr_whitelist) = &config.authorization.pubkey_whitelist {
info!(
"Event publishing restricted to {} pubkey(s)",
addr_whitelist.len()
);
}
// configure tokio runtime
let rt = Builder::new_multi_thread()
.enable_all()
@ -190,8 +196,6 @@ fn main() -> Result<(), Error> {
rt.block_on(async {
let settings = config::SETTINGS.read().unwrap();
info!("listening on: {}", socket_addr);
// build a connection pool for sqlite connections
let pool = db::build_read_pool();
// all client-submitted valid events are broadcast to every
// other client on this channel. This should be large enough
// to accomodate slower readers (messages are dropped if
@ -203,18 +207,20 @@ fn main() -> Result<(), Error> {
// establish a channel for letting all threads now about a
// requested server shutdown.
let (invoke_shutdown, _) = broadcast::channel::<()>(1);
let ctrl_c_shutdown = invoke_shutdown.clone();
// // listen for ctrl-c interruupts
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT");
ctrl_c_shutdown.send(()).ok();
});
// start the database writer thread. Give it a channel for
// writing events, and for publishing events that have been
// written (to all connected clients).
db::db_writer(event_rx, bcast_tx.clone(), invoke_shutdown.subscribe()).await;
info!("db writer created");
// // listen for ctrl-c interruupts
let ctrl_c_shutdown = invoke_shutdown.clone();
tokio::spawn(async move {
tokio::signal::ctrl_c().await.unwrap();
info!("shutting down due to SIGINT");
ctrl_c_shutdown.send(()).ok();
});
// build a connection pool for sqlite connections
let pool = db::build_read_pool();
// A `Service` is needed for every connection, so this
// creates one from our `handle_request` function.
let make_svc = make_service_fn(|conn: &AddrStream| {