Merge branch 'lists' into unstable

This commit is contained in:
Mike Dilger 2023-09-07 10:30:12 +12:00
commit 7bb9f8d1d3
23 changed files with 1860 additions and 1259 deletions

View File

@ -14,112 +14,7 @@ use std::time::Duration;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tokio::task; use tokio::task;
#[derive(Debug, Clone, Serialize, Deserialize)] pub type Person = crate::storage::types::Person1;
pub struct Person {
pub pubkey: PublicKey,
pub petname: Option<String>,
pub followed: bool,
pub followed_last_updated: i64,
pub muted: bool,
pub metadata: Option<Metadata>,
pub metadata_created_at: Option<i64>,
pub metadata_last_received: i64,
pub nip05_valid: bool,
pub nip05_last_checked: Option<u64>,
pub relay_list_created_at: Option<i64>,
pub relay_list_last_received: i64,
}
impl Person {
pub fn new(pubkey: PublicKey) -> Person {
Person {
pubkey,
petname: None,
followed: false,
followed_last_updated: 0,
muted: false,
metadata: None,
metadata_created_at: None,
metadata_last_received: 0,
nip05_valid: false,
nip05_last_checked: None,
relay_list_created_at: None,
relay_list_last_received: 0,
}
}
pub fn display_name(&self) -> Option<&str> {
if let Some(pn) = &self.petname {
Some(pn)
} else if let Some(md) = &self.metadata {
if md.other.contains_key("display_name") {
if let Some(serde_json::Value::String(s)) = md.other.get("display_name") {
if !s.is_empty() {
return Some(s);
}
}
}
md.name.as_deref()
} else {
None
}
}
pub fn name(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.name.as_deref()
} else {
None
}
}
pub fn about(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.about.as_deref()
} else {
None
}
}
pub fn picture(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.picture.as_deref()
} else {
None
}
}
pub fn nip05(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.nip05.as_deref()
} else {
None
}
}
}
impl PartialEq for Person {
fn eq(&self, other: &Self) -> bool {
self.pubkey.eq(&other.pubkey)
}
}
impl Eq for Person {}
impl PartialOrd for Person {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
match (self.display_name(), other.display_name()) {
(Some(a), Some(b)) => a.to_lowercase().partial_cmp(&b.to_lowercase()),
_ => self.pubkey.partial_cmp(&other.pubkey),
}
}
}
impl Ord for Person {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
match (self.display_name(), other.display_name()) {
(Some(a), Some(b)) => a.to_lowercase().cmp(&b.to_lowercase()),
_ => self.pubkey.cmp(&other.pubkey),
}
}
}
pub struct People { pub struct People {
// active person's relays (pull from db as needed) // active person's relays (pull from db as needed)

View File

@ -1,166 +1 @@
use nostr_types::{PublicKey, RelayUrl, Unixtime}; pub type PersonRelay = crate::storage::types::PersonRelay1;
use speedy::{Readable, Writable};
#[derive(Debug, Readable, Writable)]
pub struct PersonRelay {
// The person
pub pubkey: PublicKey,
// The relay associated with that person
pub url: RelayUrl,
// The last time we fetched one of the person's events from this relay
pub last_fetched: Option<u64>,
// When we follow someone at a relay
pub last_suggested_kind3: Option<u64>,
// When we get their nip05 and it specifies this relay
pub last_suggested_nip05: Option<u64>,
// Updated when a 'p' tag on any event associates this person and relay via the
// recommended_relay_url field
pub last_suggested_bytag: Option<u64>,
pub read: bool,
pub write: bool,
// When we follow someone at a relay, this is set true
pub manually_paired_read: bool,
// When we follow someone at a relay, this is set true
pub manually_paired_write: bool,
}
impl PersonRelay {
pub fn new(pubkey: PublicKey, url: RelayUrl) -> PersonRelay {
PersonRelay {
pubkey,
url,
last_fetched: None,
last_suggested_kind3: None,
last_suggested_nip05: None,
last_suggested_bytag: None,
read: false,
write: false,
manually_paired_read: false,
manually_paired_write: false,
}
}
// This ranks the relays that a person writes to, but does not consider local
// factors such as our relay rank or the success rate of the relay.
pub fn write_rank(mut dbprs: Vec<PersonRelay>) -> Vec<(RelayUrl, u64)> {
let now = Unixtime::now().unwrap().0 as u64;
let mut output: Vec<(RelayUrl, u64)> = Vec::new();
let scorefn = |when: u64, fade_period: u64, base: u64| -> u64 {
let dur = now.saturating_sub(when); // seconds since
let periods = (dur / fade_period) + 1; // minimum one period
base / periods
};
for dbpr in dbprs.drain(..) {
let mut score = 0;
// 'write' is an author-signed explicit claim of where they write
if dbpr.write || dbpr.manually_paired_write {
score += 20;
}
// kind3 is our memory of where we are following someone
if let Some(when) = dbpr.last_suggested_kind3 {
score += scorefn(when, 60 * 60 * 24 * 30, 7);
}
// nip05 is an unsigned dns-based author claim of using this relay
if let Some(when) = dbpr.last_suggested_nip05 {
score += scorefn(when, 60 * 60 * 24 * 15, 4);
}
// last_fetched is gossip verified happened-to-work-before
if let Some(when) = dbpr.last_fetched {
score += scorefn(when, 60 * 60 * 24 * 3, 3);
}
// last_suggested_bytag is an anybody-signed suggestion
if let Some(when) = dbpr.last_suggested_bytag {
score += scorefn(when, 60 * 60 * 24 * 2, 1);
}
// Prune score=0 associations
if score == 0 {
continue;
}
output.push((dbpr.url, score));
}
output.sort_by(|(_, score1), (_, score2)| score2.cmp(score1));
// prune everything below a score of 20, but only after the first 6 entries
while output.len() > 6 && output[output.len() - 1].1 < 20 {
let _ = output.pop();
}
output
}
// This ranks the relays that a person reads from, but does not consider local
// factors such as our relay rank or the success rate of the relay.
pub fn read_rank(mut dbprs: Vec<PersonRelay>) -> Vec<(RelayUrl, u64)> {
let now = Unixtime::now().unwrap().0 as u64;
let mut output: Vec<(RelayUrl, u64)> = Vec::new();
let scorefn = |when: u64, fade_period: u64, base: u64| -> u64 {
let dur = now.saturating_sub(when); // seconds since
let periods = (dur / fade_period) + 1; // minimum one period
base / periods
};
for dbpr in dbprs.drain(..) {
let mut score = 0;
// 'read' is an author-signed explicit claim of where they read
if dbpr.read || dbpr.manually_paired_read {
score += 20;
}
// kind3 is our memory of where we are following someone
if let Some(when) = dbpr.last_suggested_kind3 {
score += scorefn(when, 60 * 60 * 24 * 30, 7);
}
// nip05 is an unsigned dns-based author claim of using this relay
if let Some(when) = dbpr.last_suggested_nip05 {
score += scorefn(when, 60 * 60 * 24 * 15, 4);
}
// last_fetched is gossip verified happened-to-work-before
if let Some(when) = dbpr.last_fetched {
score += scorefn(when, 60 * 60 * 24 * 3, 3);
}
// last_suggested_bytag is an anybody-signed suggestion
if let Some(when) = dbpr.last_suggested_bytag {
score += scorefn(when, 60 * 60 * 24 * 2, 1);
}
// Prune score=0 associations
if score == 0 {
continue;
}
output.push((dbpr.url, score));
}
output.sort_by(|(_, score1), (_, score2)| score2.cmp(score1));
// prune everything below a score 20, but only after the first 6 entries
while output.len() > 6 && output[output.len() - 1].1 < 20 {
let _ = output.pop();
}
output
}
}

View File

@ -1,115 +1 @@
use crate::error::Error; pub type Relay = crate::storage::types::Relay1;
use crate::globals::GLOBALS;
use gossip_relay_picker::Direction;
use nostr_types::{Id, RelayInformationDocument, RelayUrl, Unixtime};
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Relay {
pub url: RelayUrl,
pub success_count: u64,
pub failure_count: u64,
pub last_connected_at: Option<u64>,
pub last_general_eose_at: Option<u64>,
pub rank: u64,
pub hidden: bool,
pub usage_bits: u64,
pub nip11: Option<RelayInformationDocument>,
pub last_attempt_nip11: Option<u64>,
}
impl Relay {
pub const READ: u64 = 1 << 0; // 1
pub const WRITE: u64 = 1 << 1; // 2
pub const ADVERTISE: u64 = 1 << 2; // 4
pub const INBOX: u64 = 1 << 3; // 8 this is 'read' of kind 10002
pub const OUTBOX: u64 = 1 << 4; // 16 this is 'write' of kind 10002
pub const DISCOVER: u64 = 1 << 5; // 32
pub fn new(url: RelayUrl) -> Relay {
Relay {
url,
success_count: 0,
failure_count: 0,
last_connected_at: None,
last_general_eose_at: None,
rank: 3,
hidden: false,
usage_bits: 0,
nip11: None,
last_attempt_nip11: None,
}
}
#[inline]
pub fn set_usage_bits(&mut self, bits: u64) {
self.usage_bits |= bits;
}
#[inline]
pub fn clear_usage_bits(&mut self, bits: u64) {
self.usage_bits &= !bits;
}
#[inline]
pub fn adjust_usage_bit(&mut self, bit: u64, value: bool) {
if value {
self.set_usage_bits(bit);
} else {
self.clear_usage_bits(bit);
}
}
#[inline]
pub fn has_usage_bits(&self, bits: u64) -> bool {
self.usage_bits & bits == bits
}
#[inline]
pub fn attempts(&self) -> u64 {
self.success_count + self.failure_count
}
#[inline]
pub fn success_rate(&self) -> f32 {
let attempts = self.attempts();
if attempts == 0 {
return 0.5;
} // unknown, so we put it in the middle
self.success_count as f32 / attempts as f32
}
/// This generates a "recommended_relay_url" for an 'e' tag.
pub async fn recommended_relay_for_reply(reply_to: Id) -> Result<Option<RelayUrl>, Error> {
let seen_on_relays: Vec<(RelayUrl, Unixtime)> =
GLOBALS.storage.get_event_seen_on_relay(reply_to)?;
let maybepubkey = GLOBALS.storage.read_setting_public_key();
if let Some(pubkey) = maybepubkey {
let my_inbox_relays: Vec<(RelayUrl, u64)> =
GLOBALS.storage.get_best_relays(pubkey, Direction::Read)?;
// Find the first-best intersection
for mir in &my_inbox_relays {
for sor in &seen_on_relays {
if mir.0 == sor.0 {
return Ok(Some(mir.0.clone()));
}
}
}
// Else use my first inbox
if let Some(mir) = my_inbox_relays.first() {
return Ok(Some(mir.0.clone()));
}
// Else fall through to seen on relays only
}
if let Some(sor) = seen_on_relays.first() {
return Ok(Some(sor.0.clone()));
}
Ok(None)
}
}

View File

@ -0,0 +1,42 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use std::sync::Mutex;
// EventKind::ReverseUnixtime -> Id
// (dup keys, so multiple Ids per key)
// val: id.as_slice() | Id(val[0..32].try_into()?)
static EVENT_EK_C_INDEX1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENT_EK_C_INDEX1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_event_ek_c_index1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENT_EK_C_INDEX1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENT_EK_C_INDEX1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENT_EK_C_INDEX1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("event_ek_c_index")
.create(&mut txn)?;
txn.commit()?;
EVENT_EK_C_INDEX1_DB = Some(db);
Ok(db)
}
}
}
}

View File

@ -0,0 +1,43 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use std::sync::Mutex;
// EventKind:PublicKey -> Id
// (pubkey is event author)
// (dup keys, so multiple Ids per key)
// val: id.as_slice() | Id(val[0..32].try_into()?)
static EVENT_EK_PK_INDEX1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENT_EK_PK_INDEX1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_event_ek_pk_index1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENT_EK_PK_INDEX1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENT_EK_PK_INDEX1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENT_EK_PK_INDEX1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("event_ek_pk_index")
.create(&mut txn)?;
txn.commit()?;
EVENT_EK_PK_INDEX1_DB = Some(db);
Ok(db)
}
}
}
}

View File

@ -0,0 +1,154 @@
use crate::error::{Error, ErrorKind};
use crate::globals::GLOBALS;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::{Event, EventKind, Id, PublicKey, Unixtime};
use speedy::Readable;
use std::collections::HashSet;
use std::ops::Bound;
use std::sync::Mutex;
// PublicKey:ReverseUnixtime -> Id
// (pubkey is referenced by the event somehow)
// (only feed-displayable events are included)
// (dup keys, so multiple Ids per key)
// NOTE: this may be far too much data. Maybe we should only build this for the
// user's pubkey as their inbox.
static EVENT_REFERENCES_PERSON1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENT_REFERENCES_PERSON1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_event_references_person1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENT_REFERENCES_PERSON1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENT_REFERENCES_PERSON1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENT_REFERENCES_PERSON1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("event_references_person")
.create(&mut txn)?;
txn.commit()?;
EVENT_REFERENCES_PERSON1_DB = Some(db);
Ok(db)
}
}
}
pub fn write_event_references_person1<'a>(
&'a self,
event: &Event,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
let mut event = event;
// If giftwrap, index the inner rumor instead
let mut rumor_event: Event;
if event.kind == EventKind::GiftWrap {
match GLOBALS.signer.unwrap_giftwrap(event) {
Ok(rumor) => {
rumor_event = rumor.into_event_with_bad_signature();
rumor_event.id = event.id; // lie, so it indexes it under the giftwrap
event = &rumor_event;
}
Err(e) => {
if matches!(e.kind, ErrorKind::NoPrivateKey) {
// Store as unindexed for later indexing
let bytes = vec![];
self.db_unindexed_giftwraps()?
.put(txn, event.id.as_slice(), &bytes)?;
}
}
}
}
if !event.kind.is_feed_displayable() {
return Ok(());
}
let bytes = event.id.as_slice();
let mut pubkeys: HashSet<PublicKey> = HashSet::new();
for (pubkeyhex, _, _) in event.people() {
let pubkey = match PublicKey::try_from_hex_string(pubkeyhex.as_str(), false) {
Ok(pk) => pk,
Err(_) => continue,
};
pubkeys.insert(pubkey);
}
for pubkey in event.people_referenced_in_content() {
pubkeys.insert(pubkey);
}
if !pubkeys.is_empty() {
for pubkey in pubkeys.drain() {
let mut key: Vec<u8> = pubkey.to_bytes();
key.extend((i64::MAX - event.created_at.0).to_be_bytes().as_slice()); // reverse created_at
self.db_event_references_person1()?.put(txn, &key, bytes)?;
}
}
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
// Read all events referencing a given person in reverse time order
pub fn read_events_referencing_person1<F>(
&self,
pubkey: &PublicKey,
since: Unixtime,
f: F,
) -> Result<Vec<Event>, Error>
where
F: Fn(&Event) -> bool,
{
let txn = self.env.read_txn()?;
let now = Unixtime::now().unwrap();
let mut start_key: Vec<u8> = pubkey.to_bytes();
let mut end_key: Vec<u8> = start_key.clone();
start_key.extend((i64::MAX - now.0).to_be_bytes().as_slice()); // work back from now
end_key.extend((i64::MAX - since.0).to_be_bytes().as_slice()); // until since
let range = (Bound::Included(&*start_key), Bound::Excluded(&*end_key));
let iter = self.db_event_references_person1()?.range(&txn, &range)?;
let mut events: Vec<Event> = Vec::new();
for result in iter {
let (_key, val) = result?;
// Take the event
let id = Id(val[0..32].try_into()?);
// (like read_event, but we supply our on transaction)
if let Some(bytes) = self.db_events1()?.get(&txn, id.as_slice())? {
let event = Event::read_from_buffer(bytes)?;
if f(&event) {
events.push(event);
}
}
}
Ok(events)
}
}

View File

@ -0,0 +1,96 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage, MAX_LMDB_KEY};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::{Id, RelayUrl, Unixtime};
use std::sync::Mutex;
// Id:Url -> Unixtime
// key: key!(id.as_slice(), url.0.as_bytes())
// val: unixtime.0.to_be_bytes()
static EVENT_SEEN_ON_RELAY1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENT_SEEN_ON_RELAY1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_event_seen_on_relay1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENT_SEEN_ON_RELAY1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENT_SEEN_ON_RELAY1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENT_SEEN_ON_RELAY1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("event_seen_on_relay")
.create(&mut txn)?;
txn.commit()?;
EVENT_SEEN_ON_RELAY1_DB = Some(db);
Ok(db)
}
}
}
pub fn get_event_seen_on_relay1_len(&self) -> Result<u64, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_event_seen_on_relay1()?.len(&txn)?)
}
pub fn add_event_seen_on_relay1<'a>(
&'a self,
id: Id,
url: &RelayUrl,
when: Unixtime,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let mut key: Vec<u8> = id.as_slice().to_owned();
key.extend(url.0.as_bytes());
key.truncate(MAX_LMDB_KEY);
let bytes = when.0.to_be_bytes();
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_event_seen_on_relay1()?.put(txn, &key, &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn get_event_seen_on_relay1(&self, id: Id) -> Result<Vec<(RelayUrl, Unixtime)>, Error> {
let start_key: Vec<u8> = id.as_slice().to_owned();
let txn = self.env.read_txn()?;
let mut output: Vec<(RelayUrl, Unixtime)> = Vec::new();
for result in self
.db_event_seen_on_relay1()?
.prefix_iter(&txn, &start_key)?
{
let (key, val) = result?;
// Extract off the Url
let url = RelayUrl(std::str::from_utf8(&key[32..])?.to_owned());
let time = Unixtime(i64::from_be_bytes(val[..8].try_into()?));
output.push((url, time));
}
Ok(output)
}
}

View File

@ -0,0 +1,78 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::Id;
use std::sync::Mutex;
// Id -> ()
// key: id.as_slice()
// val: vec![]
static EVENT_VIEWED1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENT_VIEWED1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_event_viewed1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENT_VIEWED1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENT_VIEWED1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENT_VIEWED1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("event_viewed")
.create(&mut txn)?;
txn.commit()?;
EVENT_VIEWED1_DB = Some(db);
Ok(db)
}
}
}
pub fn get_event_viewed1_len(&self) -> Result<u64, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_event_viewed1()?.len(&txn)?)
}
pub fn mark_event_viewed1<'a>(
&'a self,
id: Id,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let bytes = vec![];
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_event_viewed1()?.put(txn, id.as_slice(), &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn is_event_viewed1(&self, id: Id) -> Result<bool, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_event_viewed1()?.get(&txn, id.as_slice())?.is_some())
}
}

113
src/storage/events1.rs Normal file
View File

@ -0,0 +1,113 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::{Event, Id};
use speedy::{Readable, Writable};
use std::sync::Mutex;
// Id -> Event
// key: id.as_slice() | Id(val[0..32].try_into()?)
// val: event.write_to_vec() | Event::read_from_buffer(val)
static EVENTS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENTS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_events1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENTS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENTS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENTS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("events")
.create(&mut txn)?;
txn.commit()?;
EVENTS1_DB = Some(db);
Ok(db)
}
}
}
pub fn write_event1<'a>(
&'a self,
event: &Event,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
// write to lmdb 'events'
let bytes = event.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_events1()?.put(txn, event.id.as_slice(), &bytes)?;
// also index the event
self.write_event_ek_pk_index(event, Some(txn))?;
self.write_event_ek_c_index(event, Some(txn))?;
self.write_event_references_person(event, Some(txn))?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn read_event1(&self, id: Id) -> Result<Option<Event>, Error> {
let txn = self.env.read_txn()?;
match self.db_events1()?.get(&txn, id.as_slice())? {
None => Ok(None),
Some(bytes) => Ok(Some(Event::read_from_buffer(bytes)?)),
}
}
pub fn has_event1(&self, id: Id) -> Result<bool, Error> {
let txn = self.env.read_txn()?;
match self.db_events1()?.get(&txn, id.as_slice())? {
None => Ok(false),
Some(_) => Ok(true),
}
}
pub fn delete_event1<'a>(
&'a self,
id: Id,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
let _ = self.db_events1()?.delete(txn, id.as_slice());
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
}

94
src/storage/hashtags1.rs Normal file
View File

@ -0,0 +1,94 @@
use crate::error::{Error, ErrorKind};
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::Id;
use std::sync::Mutex;
// Hashtag -> Id
// (dup keys, so multiple Ids per hashtag)
// key: key!(hashtag.as_bytes())
// val: id.as_slice() | Id(val[0..32].try_into()?)
static HASHTAGS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut HASHTAGS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_hashtags1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = HASHTAGS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = HASHTAGS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = HASHTAGS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("hashtags")
.create(&mut txn)?;
txn.commit()?;
HASHTAGS1_DB = Some(db);
Ok(db)
}
}
}
pub fn add_hashtag1<'a>(
&'a self,
hashtag: &String,
id: Id,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let key = key!(hashtag.as_bytes());
if key.is_empty() {
return Err(ErrorKind::Empty("hashtag".to_owned()).into());
}
let bytes = id.as_slice();
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_hashtags1()?.put(txn, key, bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
#[allow(dead_code)]
pub fn get_event_ids_with_hashtag1(&self, hashtag: &String) -> Result<Vec<Id>, Error> {
let key = key!(hashtag.as_bytes());
if key.is_empty() {
return Err(ErrorKind::Empty("hashtag".to_owned()).into());
}
let txn = self.env.read_txn()?;
let mut output: Vec<Id> = Vec::new();
let iter = match self.db_hashtags1()?.get_duplicates(&txn, key)? {
Some(i) => i,
None => return Ok(vec![]),
};
for result in iter {
let (_key, val) = result?;
let id = Id(val[0..32].try_into()?);
output.push(id);
}
Ok(output)
}
}

View File

@ -73,7 +73,7 @@ impl Storage {
let mut count = 0; let mut count = 0;
let event_txn = self.env.read_txn()?; let event_txn = self.env.read_txn()?;
for result in self.events.iter(&event_txn)? { for result in self.db_events1()?.iter(&event_txn)? {
let pair = result?; let pair = result?;
let event = Event::read_from_buffer(pair.1)?; let event = Event::read_from_buffer(pair.1)?;
let _ = self.process_relationships_of_event(&event, Some(txn))?; let _ = self.process_relationships_of_event(&event, Some(txn))?;
@ -309,7 +309,7 @@ impl Storage {
pub fn delete_rumors<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> { pub fn delete_rumors<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let mut ids: Vec<Id> = Vec::new(); let mut ids: Vec<Id> = Vec::new();
let iter = self.events.iter(txn)?; let iter = self.db_events1()?.iter(txn)?;
for result in iter { for result in iter {
let (_key, val) = result?; let (_key, val) = result?;
let event = Event::read_from_buffer(val)?; let event = Event::read_from_buffer(val)?;
@ -319,7 +319,7 @@ impl Storage {
} }
for id in ids { for id in ids {
self.events.delete(txn, id.as_slice())?; self.db_events1()?.delete(txn, id.as_slice())?;
} }
Ok(()) Ok(())

File diff suppressed because it is too large Load Diff

108
src/storage/people1.rs Normal file
View File

@ -0,0 +1,108 @@
use crate::error::Error;
use crate::storage::types::Person1;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::PublicKey;
use std::sync::Mutex;
// PublicKey -> Person
// key: pubkey.as_bytes()
// val: serde_json::to_vec(person) | serde_json::from_slice(bytes)
static PEOPLE1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut PEOPLE1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_people1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = PEOPLE1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = PEOPLE1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = PEOPLE1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("people")
.create(&mut txn)?;
txn.commit()?;
PEOPLE1_DB = Some(db);
Ok(db)
}
}
}
pub fn get_people1_len(&self) -> Result<u64, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_people1()?.len(&txn)?)
}
#[allow(dead_code)]
pub fn write_person1<'a>(
&'a self,
person: &Person1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key: Vec<u8> = person.pubkey.to_bytes();
let bytes = serde_json::to_vec(person)?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_people1()?.put(txn, &key, &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn read_person1(&self, pubkey: &PublicKey) -> Result<Option<Person1>, Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key: Vec<u8> = pubkey.to_bytes();
let txn = self.env.read_txn()?;
Ok(match self.db_people1()?.get(&txn, &key)? {
Some(bytes) => Some(serde_json::from_slice(bytes)?),
None => None,
})
}
pub fn filter_people1<F>(&self, f: F) -> Result<Vec<Person1>, Error>
where
F: Fn(&Person1) -> bool,
{
let txn = self.env.read_txn()?;
let iter = self.db_people1()?.iter(&txn)?;
let mut output: Vec<Person1> = Vec::new();
for result in iter {
let (_key, val) = result?;
let person: Person1 = serde_json::from_slice(val)?;
if f(&person) {
output.push(person);
}
}
Ok(output)
}
}

View File

@ -0,0 +1,147 @@
use crate::error::Error;
use crate::storage::types::PersonRelay1;
use crate::storage::{RawDatabase, Storage, MAX_LMDB_KEY};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::{PublicKey, RelayUrl};
use speedy::{Readable, Writable};
use std::sync::Mutex;
// PublicKey:Url -> PersonRelay
// key: key!(pubkey.as_bytes + url.0.as_bytes)
// val: person_relay.write_to_vec) | PersonRelay::read_from_buffer(bytes)
static PERSON_RELAYS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut PERSON_RELAYS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_person_relays1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = PERSON_RELAYS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = PERSON_RELAYS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = PERSON_RELAYS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("person_relays")
.create(&mut txn)?;
txn.commit()?;
PERSON_RELAYS1_DB = Some(db);
Ok(db)
}
}
}
pub fn get_person_relays1_len(&self) -> Result<u64, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_person_relays1()?.len(&txn)?)
}
#[allow(dead_code)]
pub fn write_person_relay1<'a>(
&'a self,
person_relay: &PersonRelay1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let mut key = person_relay.pubkey.to_bytes();
key.extend(person_relay.url.0.as_bytes());
key.truncate(MAX_LMDB_KEY);
let bytes = person_relay.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_person_relays1()?.put(txn, &key, &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn read_person_relay1(
&self,
pubkey: PublicKey,
url: &RelayUrl,
) -> Result<Option<PersonRelay1>, Error> {
let mut key = pubkey.to_bytes();
key.extend(url.0.as_bytes());
key.truncate(MAX_LMDB_KEY);
let txn = self.env.read_txn()?;
Ok(match self.db_person_relays1()?.get(&txn, &key)? {
Some(bytes) => Some(PersonRelay1::read_from_buffer(bytes)?),
None => None,
})
}
pub fn get_person_relays1(&self, pubkey: PublicKey) -> Result<Vec<PersonRelay1>, Error> {
let start_key = pubkey.to_bytes();
let txn = self.env.read_txn()?;
let iter = self.db_person_relays1()?.prefix_iter(&txn, &start_key)?;
let mut output: Vec<PersonRelay1> = Vec::new();
for result in iter {
let (_key, val) = result?;
let person_relay = PersonRelay1::read_from_buffer(val)?;
output.push(person_relay);
}
Ok(output)
}
pub fn delete_person_relays1<'a, F>(
&'a self,
filter: F,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error>
where
F: Fn(&PersonRelay1) -> bool,
{
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
// Delete any person_relay with this relay
let mut deletions: Vec<Vec<u8>> = Vec::new();
{
for result in self.db_person_relays1()?.iter(txn)? {
let (key, val) = result?;
if let Ok(person_relay) = PersonRelay1::read_from_buffer(val) {
if filter(&person_relay) {
deletions.push(key.to_owned());
}
}
}
}
for deletion in deletions.drain(..) {
self.db_person_relays1()?.delete(txn, &deletion)?;
}
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
}

View File

@ -0,0 +1,88 @@
use crate::error::Error;
use crate::relationship::Relationship;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::Id;
use speedy::{Readable, Writable};
use std::sync::Mutex;
// Id:Id -> Relationship
// key: id.as_slice(), id.as_slice() | Id(val[32..64].try_into()?)
// val: relationship.write_to_vec() | Relationship::read_from_buffer(val)
static RELATIONSHIPS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut RELATIONSHIPS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_relationships1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = RELATIONSHIPS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = RELATIONSHIPS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = RELATIONSHIPS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("relationships")
.create(&mut txn)?;
txn.commit()?;
RELATIONSHIPS1_DB = Some(db);
Ok(db)
}
}
}
pub fn write_relationship1<'a>(
&'a self,
id: Id,
related: Id,
relationship: Relationship,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let mut key = id.as_ref().as_slice().to_owned();
key.extend(related.as_ref());
let value = relationship.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_relationships1()?.put(txn, &key, &value)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn find_relationships1(&self, id: Id) -> Result<Vec<(Id, Relationship)>, Error> {
let start_key = id.as_slice();
let txn = self.env.read_txn()?;
let iter = self.db_relationships1()?.prefix_iter(&txn, start_key)?;
let mut output: Vec<(Id, Relationship)> = Vec::new();
for result in iter {
let (key, val) = result?;
let id2 = Id(key[32..64].try_into().unwrap());
let relationship = Relationship::read_from_buffer(val)?;
output.push((id2, relationship));
}
Ok(output)
}
}

221
src/storage/relays1.rs Normal file
View File

@ -0,0 +1,221 @@
use crate::error::{Error, ErrorKind};
use crate::storage::types::Relay1;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use heed::RwTxn;
use nostr_types::RelayUrl;
use std::sync::Mutex;
// Url -> Relay
// key: key!(url.0.as_bytes())
// val: serde_json::to_vec(relay) | serde_json::from_slice(bytes)
static RELAYS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut RELAYS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_relays1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = RELAYS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = RELAYS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = RELAYS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("relays")
.create(&mut txn)?;
txn.commit()?;
RELAYS1_DB = Some(db);
Ok(db)
}
}
}
pub fn get_relays1_len(&self) -> Result<u64, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_relays1()?.len(&txn)?)
}
#[allow(dead_code)]
pub fn write_relay1<'a>(
&'a self,
relay: &Relay1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key = key!(relay.url.0.as_bytes());
if key.is_empty() {
return Err(ErrorKind::Empty("relay url".to_owned()).into());
}
let bytes = serde_json::to_vec(relay)?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_relays1()?.put(txn, key, &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn delete_relay1<'a>(
&'a self,
url: &RelayUrl,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key = key!(url.0.as_bytes());
if key.is_empty() {
return Err(ErrorKind::Empty("relay url".to_owned()).into());
}
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
// Delete any PersonRelay with this url
self.delete_person_relays(|f| f.url == *url, Some(txn))?;
// Delete the relay
self.db_relays1()?.delete(txn, key)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn modify_relay1<'a, M>(
&'a self,
url: &RelayUrl,
mut modify: M,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error>
where
M: FnMut(&mut Relay1),
{
let key = key!(url.0.as_bytes());
if key.is_empty() {
return Err(ErrorKind::Empty("relay url".to_owned()).into());
}
let mut f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
let bytes = self.db_relays1()?.get(txn, key)?;
if let Some(bytes) = bytes {
let mut relay = serde_json::from_slice(bytes)?;
modify(&mut relay);
let bytes = serde_json::to_vec(&relay)?;
self.db_relays1()?.put(txn, key, &bytes)?;
}
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn modify_all_relays1<'a, M>(
&'a self,
mut modify: M,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error>
where
M: FnMut(&mut Relay1),
{
let mut f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
let mut iter = self.db_relays1()?.iter_mut(txn)?;
while let Some(result) = iter.next() {
let (key, val) = result?;
let mut dbrelay: Relay1 = serde_json::from_slice(val)?;
modify(&mut dbrelay);
let bytes = serde_json::to_vec(&dbrelay)?;
// to deal with the unsafety of put_current
let key = key.to_owned();
unsafe {
iter.put_current(&key, &bytes)?;
}
}
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
pub fn read_relay1(&self, url: &RelayUrl) -> Result<Option<Relay1>, Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key = key!(url.0.as_bytes());
if key.is_empty() {
return Err(ErrorKind::Empty("relay url".to_owned()).into());
}
let txn = self.env.read_txn()?;
match self.db_relays1()?.get(&txn, key)? {
Some(bytes) => Ok(Some(serde_json::from_slice(bytes)?)),
None => Ok(None),
}
}
pub fn filter_relays1<F>(&self, f: F) -> Result<Vec<Relay1>, Error>
where
F: Fn(&Relay1) -> bool,
{
let txn = self.env.read_txn()?;
let mut output: Vec<Relay1> = Vec::new();
let iter = self.db_relays1()?.iter(&txn)?;
for result in iter {
let (_key, val) = result?;
let relay: Relay1 = serde_json::from_slice(val)?;
if f(&relay) {
output.push(relay);
}
}
Ok(output)
}
}

View File

@ -1,6 +1,3 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{Metadata, PublicKey}; use nostr_types::{Metadata, PublicKey};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@ -20,33 +17,93 @@ pub struct Person1 {
pub relay_list_last_received: i64, pub relay_list_last_received: i64,
} }
impl Storage { impl Person1 {
#[allow(dead_code)] pub fn new(pubkey: PublicKey) -> Person1 {
pub fn write_person1<'a>( Person1 {
&'a self, pubkey,
person: &Person1, petname: None,
rw_txn: Option<&mut RwTxn<'a>>, followed: false,
) -> Result<(), Error> { followed_last_updated: 0,
// Note that we use serde instead of speedy because the complexity of the muted: false,
// serde_json::Value type makes it difficult. Any other serde serialization metadata: None,
// should work though: Consider bincode. metadata_created_at: None,
let key: Vec<u8> = person.pubkey.to_bytes(); metadata_last_received: 0,
let bytes = serde_json::to_vec(person)?; nip05_valid: false,
nip05_last_checked: None,
relay_list_created_at: None,
relay_list_last_received: 0,
}
}
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> { pub fn display_name(&self) -> Option<&str> {
self.people.put(txn, &key, &bytes)?; if let Some(pn) = &self.petname {
Ok(()) Some(pn)
}; } else if let Some(md) = &self.metadata {
if md.other.contains_key("display_name") {
match rw_txn { if let Some(serde_json::Value::String(s)) = md.other.get("display_name") {
Some(txn) => f(txn)?, if !s.is_empty() {
None => { return Some(s);
let mut txn = self.env.write_txn()?; }
f(&mut txn)?; }
txn.commit()?;
} }
}; md.name.as_deref()
} else {
None
}
}
Ok(()) pub fn name(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.name.as_deref()
} else {
None
}
}
pub fn about(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.about.as_deref()
} else {
None
}
}
pub fn picture(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.picture.as_deref()
} else {
None
}
}
pub fn nip05(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.nip05.as_deref()
} else {
None
}
}
}
impl PartialEq for Person1 {
fn eq(&self, other: &Self) -> bool {
self.pubkey.eq(&other.pubkey)
}
}
impl Eq for Person1 {}
impl PartialOrd for Person1 {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
match (self.display_name(), other.display_name()) {
(Some(a), Some(b)) => a.to_lowercase().partial_cmp(&b.to_lowercase()),
_ => self.pubkey.partial_cmp(&other.pubkey),
}
}
}
impl Ord for Person1 {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
match (self.display_name(), other.display_name()) {
(Some(a), Some(b)) => a.to_lowercase().cmp(&b.to_lowercase()),
_ => self.pubkey.cmp(&other.pubkey),
}
} }
} }

View File

@ -1,7 +1,4 @@
use crate::error::Error; use nostr_types::{PublicKey, RelayUrl, Unixtime};
use crate::storage::{Storage, MAX_LMDB_KEY};
use heed::RwTxn;
use nostr_types::{PublicKey, RelayUrl};
use speedy::{Readable, Writable}; use speedy::{Readable, Writable};
#[derive(Debug, Readable, Writable)] #[derive(Debug, Readable, Writable)]
@ -36,32 +33,134 @@ pub struct PersonRelay1 {
pub manually_paired_write: bool, pub manually_paired_write: bool,
} }
impl Storage { impl PersonRelay1 {
#[allow(dead_code)] pub fn new(pubkey: PublicKey, url: RelayUrl) -> PersonRelay1 {
pub fn write_person_relay1<'a>( PersonRelay1 {
&'a self, pubkey,
person_relay: &PersonRelay1, url,
rw_txn: Option<&mut RwTxn<'a>>, last_fetched: None,
) -> Result<(), Error> { last_suggested_kind3: None,
let mut key = person_relay.pubkey.to_bytes(); last_suggested_nip05: None,
key.extend(person_relay.url.0.as_bytes()); last_suggested_bytag: None,
key.truncate(MAX_LMDB_KEY); read: false,
let bytes = person_relay.write_to_vec()?; write: false,
manually_paired_read: false,
manually_paired_write: false,
}
}
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> { // This ranks the relays that a person writes to, but does not consider local
self.person_relays.put(txn, &key, &bytes)?; // factors such as our relay rank or the success rate of the relay.
Ok(()) pub fn write_rank(mut dbprs: Vec<PersonRelay1>) -> Vec<(RelayUrl, u64)> {
let now = Unixtime::now().unwrap().0 as u64;
let mut output: Vec<(RelayUrl, u64)> = Vec::new();
let scorefn = |when: u64, fade_period: u64, base: u64| -> u64 {
let dur = now.saturating_sub(when); // seconds since
let periods = (dur / fade_period) + 1; // minimum one period
base / periods
}; };
match rw_txn { for dbpr in dbprs.drain(..) {
Some(txn) => f(txn)?, let mut score = 0;
None => {
let mut txn = self.env.write_txn()?; // 'write' is an author-signed explicit claim of where they write
f(&mut txn)?; if dbpr.write || dbpr.manually_paired_write {
txn.commit()?; score += 20;
} }
// kind3 is our memory of where we are following someone
if let Some(when) = dbpr.last_suggested_kind3 {
score += scorefn(when, 60 * 60 * 24 * 30, 7);
}
// nip05 is an unsigned dns-based author claim of using this relay
if let Some(when) = dbpr.last_suggested_nip05 {
score += scorefn(when, 60 * 60 * 24 * 15, 4);
}
// last_fetched is gossip verified happened-to-work-before
if let Some(when) = dbpr.last_fetched {
score += scorefn(when, 60 * 60 * 24 * 3, 3);
}
// last_suggested_bytag is an anybody-signed suggestion
if let Some(when) = dbpr.last_suggested_bytag {
score += scorefn(when, 60 * 60 * 24 * 2, 1);
}
// Prune score=0 associations
if score == 0 {
continue;
}
output.push((dbpr.url, score));
}
output.sort_by(|(_, score1), (_, score2)| score2.cmp(score1));
// prune everything below a score of 20, but only after the first 6 entries
while output.len() > 6 && output[output.len() - 1].1 < 20 {
let _ = output.pop();
}
output
}
// This ranks the relays that a person reads from, but does not consider local
// factors such as our relay rank or the success rate of the relay.
pub fn read_rank(mut dbprs: Vec<PersonRelay1>) -> Vec<(RelayUrl, u64)> {
let now = Unixtime::now().unwrap().0 as u64;
let mut output: Vec<(RelayUrl, u64)> = Vec::new();
let scorefn = |when: u64, fade_period: u64, base: u64| -> u64 {
let dur = now.saturating_sub(when); // seconds since
let periods = (dur / fade_period) + 1; // minimum one period
base / periods
}; };
Ok(()) for dbpr in dbprs.drain(..) {
let mut score = 0;
// 'read' is an author-signed explicit claim of where they read
if dbpr.read || dbpr.manually_paired_read {
score += 20;
}
// kind3 is our memory of where we are following someone
if let Some(when) = dbpr.last_suggested_kind3 {
score += scorefn(when, 60 * 60 * 24 * 30, 7);
}
// nip05 is an unsigned dns-based author claim of using this relay
if let Some(when) = dbpr.last_suggested_nip05 {
score += scorefn(when, 60 * 60 * 24 * 15, 4);
}
// last_fetched is gossip verified happened-to-work-before
if let Some(when) = dbpr.last_fetched {
score += scorefn(when, 60 * 60 * 24 * 3, 3);
}
// last_suggested_bytag is an anybody-signed suggestion
if let Some(when) = dbpr.last_suggested_bytag {
score += scorefn(when, 60 * 60 * 24 * 2, 1);
}
// Prune score=0 associations
if score == 0 {
continue;
}
output.push((dbpr.url, score));
}
output.sort_by(|(_, score1), (_, score2)| score2.cmp(score1));
// prune everything below a score 20, but only after the first 6 entries
while output.len() > 6 && output[output.len() - 1].1 < 20 {
let _ = output.pop();
}
output
} }
} }

View File

@ -1,7 +1,7 @@
use crate::error::{Error, ErrorKind}; use crate::error::Error;
use crate::storage::Storage; use crate::globals::GLOBALS;
use heed::RwTxn; use gossip_relay_picker::Direction;
use nostr_types::{RelayInformationDocument, RelayUrl}; use nostr_types::{Id, RelayInformationDocument, RelayUrl, Unixtime};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -18,36 +18,98 @@ pub struct Relay1 {
pub last_attempt_nip11: Option<u64>, pub last_attempt_nip11: Option<u64>,
} }
impl Storage { impl Relay1 {
#[allow(dead_code)] pub const READ: u64 = 1 << 0; // 1
pub fn write_relay1<'a>( pub const WRITE: u64 = 1 << 1; // 2
&'a self, pub const ADVERTISE: u64 = 1 << 2; // 4
relay: &Relay1, pub const INBOX: u64 = 1 << 3; // 8 this is 'read' of kind 10002
rw_txn: Option<&mut RwTxn<'a>>, pub const OUTBOX: u64 = 1 << 4; // 16 this is 'write' of kind 10002
) -> Result<(), Error> { pub const DISCOVER: u64 = 1 << 5; // 32
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization pub fn new(url: RelayUrl) -> Relay1 {
// should work though: Consider bincode. Relay1 {
let key = key!(relay.url.0.as_bytes()); url,
if key.is_empty() { success_count: 0,
return Err(ErrorKind::Empty("relay url".to_owned()).into()); failure_count: 0,
last_connected_at: None,
last_general_eose_at: None,
rank: 3,
hidden: false,
usage_bits: 0,
nip11: None,
last_attempt_nip11: None,
} }
let bytes = serde_json::to_vec(relay)?; }
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> { #[inline]
self.relays.put(txn, key, &bytes)?; pub fn set_usage_bits(&mut self, bits: u64) {
Ok(()) self.usage_bits |= bits;
}; }
match rw_txn { #[inline]
Some(txn) => f(txn)?, pub fn clear_usage_bits(&mut self, bits: u64) {
None => { self.usage_bits &= !bits;
let mut txn = self.env.write_txn()?; }
f(&mut txn)?;
txn.commit()?; #[inline]
pub fn adjust_usage_bit(&mut self, bit: u64, value: bool) {
if value {
self.set_usage_bits(bit);
} else {
self.clear_usage_bits(bit);
}
}
#[inline]
pub fn has_usage_bits(&self, bits: u64) -> bool {
self.usage_bits & bits == bits
}
#[inline]
pub fn attempts(&self) -> u64 {
self.success_count + self.failure_count
}
#[inline]
pub fn success_rate(&self) -> f32 {
let attempts = self.attempts();
if attempts == 0 {
return 0.5;
} // unknown, so we put it in the middle
self.success_count as f32 / attempts as f32
}
/// This generates a "recommended_relay_url" for an 'e' tag.
pub async fn recommended_relay_for_reply(reply_to: Id) -> Result<Option<RelayUrl>, Error> {
let seen_on_relays: Vec<(RelayUrl, Unixtime)> =
GLOBALS.storage.get_event_seen_on_relay(reply_to)?;
let maybepubkey = GLOBALS.storage.read_setting_public_key();
if let Some(pubkey) = maybepubkey {
let my_inbox_relays: Vec<(RelayUrl, u64)> =
GLOBALS.storage.get_best_relays(pubkey, Direction::Read)?;
// Find the first-best intersection
for mir in &my_inbox_relays {
for sor in &seen_on_relays {
if mir.0 == sor.0 {
return Ok(Some(mir.0.clone()));
}
}
} }
};
Ok(()) // Else use my first inbox
if let Some(mir) = my_inbox_relays.first() {
return Ok(Some(mir.0.clone()));
}
// Else fall through to seen on relays only
}
if let Some(sor) = seen_on_relays.first() {
return Ok(Some(sor.0.clone()));
}
Ok(None)
} }
} }

View File

@ -6,6 +6,7 @@ use nostr_types::PublicKey;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use speedy::{Readable, Writable}; use speedy::{Readable, Writable};
// THIS IS HISTORICAL FOR MIGRATIONS. DO NOT EDIT.
#[derive(Clone, Debug, Serialize, Deserialize, Readable, Writable)] #[derive(Clone, Debug, Serialize, Deserialize, Readable, Writable)]
pub struct Settings1 { pub struct Settings1 {
pub feed_chunk: u64, pub feed_chunk: u64,

View File

@ -7,6 +7,7 @@ use nostr_types::PublicKey;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use speedy::{Readable, Writable}; use speedy::{Readable, Writable};
// THIS IS HISTORICAL FOR MIGRATIONS. DO NOT EDIT.
#[derive(Clone, Debug, Serialize, Deserialize, Readable, Writable)] #[derive(Clone, Debug, Serialize, Deserialize, Readable, Writable)]
pub struct Settings2 { pub struct Settings2 {
// ID settings // ID settings

View File

@ -4,6 +4,8 @@ use heed::RwTxn;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use speedy::{Readable, Writable}; use speedy::{Readable, Writable};
// THIS IS HISTORICAL FOR MIGRATIONS AND SHOULD NOT BE EDITED
// note: if we store anything inside the variants, we can't use macro_rules. // note: if we store anything inside the variants, we can't use macro_rules.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, Readable, Writable)] #[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, Readable, Writable)]
pub enum ThemeVariant1 { pub enum ThemeVariant1 {

View File

@ -0,0 +1,75 @@
use crate::error::{Error, ErrorKind};
use crate::globals::GLOBALS;
use crate::storage::{RawDatabase, Storage};
use heed::types::UnalignedSlice;
use nostr_types::Id;
use std::sync::Mutex;
// Id -> ()
// key: id.as_slice()
// val: vec![]
static UNINDEXED_GIFTWRAPS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut UNINDEXED_GIFTWRAPS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_unindexed_giftwraps1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = UNINDEXED_GIFTWRAPS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = UNINDEXED_GIFTWRAPS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = UNINDEXED_GIFTWRAPS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<UnalignedSlice<u8>, UnalignedSlice<u8>>()
.name("unindexed_giftwraps")
.create(&mut txn)?;
txn.commit()?;
UNINDEXED_GIFTWRAPS1_DB = Some(db);
Ok(db)
}
}
}
pub fn index_unindexed_giftwraps1(&self) -> Result<(), Error> {
if !GLOBALS.signer.is_ready() {
return Err(ErrorKind::NoPrivateKey.into());
}
let mut ids: Vec<Id> = Vec::new();
let txn = self.env.read_txn()?;
let iter = self.db_unindexed_giftwraps1()?.iter(&txn)?;
for result in iter {
let (key, _val) = result?;
let a: [u8; 32] = key.try_into()?;
let id = Id(a);
ids.push(id);
}
let mut txn = self.env.write_txn()?;
for id in ids {
if let Some(event) = self.read_event(id)? {
self.write_event_ek_pk_index(&event, Some(&mut txn))?;
self.write_event_ek_c_index(&event, Some(&mut txn))?;
self.write_event_references_person(&event, Some(&mut txn))?;
}
self.db_unindexed_giftwraps1()?
.delete(&mut txn, id.as_slice())?;
}
txn.commit()?;
Ok(())
}
}