Strip out old storage code for databases prior to m19. IF YOU HAVE AN OLD DATABASE:

If you have an old database you must run gossip 0.9 or 0.10 at least once in order
to upgrade your database to something new enough for gossip 0.11 to recognize.

We intend to only expire code from 3 versions back.
This commit is contained in:
Mike Dilger 2024-06-21 10:59:51 +12:00
parent 8bb4ea0aaf
commit afece133e8
32 changed files with 27 additions and 2149 deletions

View File

@ -1,53 +0,0 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::types::Bytes;
use nostr_types::{EventV1, Id};
use speedy::Readable;
use std::sync::Mutex;
// Id -> Event
// key: id.as_slice() | Id(val[0..32].try_into()?)
// val: event.write_to_vec() | Event::read_from_buffer(val)
static EVENTS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut EVENTS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_events1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = EVENTS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = EVENTS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = EVENTS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<Bytes, Bytes>()
// no .flags needed
.name("events")
.create(&mut txn)?;
txn.commit()?;
EVENTS1_DB = Some(db);
Ok(db)
}
}
}
pub(crate) fn read_event1(&self, id: Id) -> Result<Option<EventV1>, Error> {
let txn = self.env.read_txn()?;
match self.db_events1()?.get(&txn, id.as_slice())? {
None => Ok(None),
Some(bytes) => Ok(Some(EventV1::read_from_buffer(bytes)?)),
}
}
}

View File

@ -1,9 +1,6 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::types::Bytes;
use heed::RwTxn;
use nostr_types::EventV2;
use speedy::Writable;
use std::sync::Mutex;
// Id -> Event
@ -43,45 +40,4 @@ impl Storage {
}
}
}
pub(crate) fn write_event2<'a>(
&'a self,
event: &EventV2,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
// write to lmdb 'events'
let bytes = event.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_events2()?.put(txn, event.id.as_slice(), &bytes)?;
// If giftwrap, index the inner rumor instead
let mut eventptr: &EventV2 = event;
let rumor: EventV2;
if let Some(r) = self.switch_to_rumor2(event, txn)? {
rumor = r;
eventptr = &rumor;
}
// also index the event
self.write_event_akci_index(
eventptr.pubkey,
eventptr.kind,
eventptr.created_at,
eventptr.id,
Some(txn),
)?;
self.write_event_kci_index(eventptr.kind, eventptr.created_at, eventptr.id, Some(txn))?;
self.write_event2_tag_index1(eventptr, Some(txn))?;
for hashtag in event.hashtags() {
if hashtag.is_empty() {
continue;
} // upstream bug
self.add_hashtag(&hashtag, event.id, Some(txn))?;
}
Ok(())
};
write_transact!(self, rw_txn, f)
}
}

View File

@ -1,33 +0,0 @@
use super::Storage;
use crate::error::Error;
use nostr_types::Unixtime;
impl Storage {
/// Read the user's last ContactList edit time
/// DEPRECATED - use get_person_list_last_edit_time instead
pub(in crate::storage) fn read_last_contact_list_edit(&self) -> Result<i64, Error> {
let txn = self.env.read_txn()?;
match self.general.get(&txn, b"last_contact_list_edit")? {
None => {
let now = Unixtime::now().unwrap();
Ok(now.0)
}
Some(bytes) => Ok(i64::from_be_bytes(bytes[..8].try_into().unwrap())),
}
}
/// Read the user's last MuteList edit time
/// DEPRECATED - use get_person_list_last_edit_time instead
pub(in crate::storage) fn read_last_mute_list_edit(&self) -> Result<i64, Error> {
let txn = self.env.read_txn()?;
match self.general.get(&txn, b"last_mute_list_edit")? {
None => {
let now = Unixtime::now().unwrap();
Ok(now.0)
}
Some(bytes) => Ok(i64::from_be_bytes(bytes[..8].try_into().unwrap())),
}
}
}

View File

@ -1,129 +0,0 @@
use crate::error::Error;
use crate::storage::types::Relationship1;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{EventReference, EventV1};
use speedy::Readable;
impl Storage {
pub(super) fn m1_trigger(&self) -> Result<(), Error> {
let _ = self.db_events1()?;
Ok(())
}
pub(super) fn m1_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let read_txn = self.env.read_txn()?;
let total = self.db_events1()?.len(&read_txn)?;
// Info message
tracing::info!("{prefix}: Computing and storing event relationships for {total} events...");
// Migrate
let mut count = 0;
let event_txn = self.env.read_txn()?;
for result in self.db_events1()?.iter(&event_txn)? {
let pair = result?;
let event = EventV1::read_from_buffer(pair.1)?;
self.m1_process_relationships_of_event(&event, txn)?;
count += 1;
for checkpoint in &[10, 20, 30, 40, 50, 60, 70, 80, 90] {
if count == checkpoint * total / 100 {
tracing::info!("{}% done", checkpoint);
}
}
}
tracing::info!("syncing...");
Ok(())
}
/// Process relationships of an eventv1.
fn m1_process_relationships_of_event<'a>(
&'a self,
event: &EventV1,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// replies to
match event.replies_to() {
Some(EventReference::Id { id, .. }) => {
self.write_relationship1(id, event.id, Relationship1::Reply, Some(txn))?;
}
Some(EventReference::Addr(_ea)) => {
// will only work if we already have it... yuck.
// We need a new relationships database for EventAddrs
// FIXME
}
None => (),
}
// reacts to
if let Some((reacted_to_id, reaction, _maybe_url)) = event.reacts_to() {
if let Some(reacted_to_event) = self.read_event1(reacted_to_id)? {
// Only if they are different people (no liking your own posts)
if reacted_to_event.pubkey != event.pubkey {
self.write_relationship1(
reacted_to_id, // event reacted to
event.id, // the reaction event id
Relationship1::Reaction(event.pubkey, reaction),
Some(txn),
)?;
}
} else {
// Store the reaction to the event we dont have yet.
// We filter bad ones when reading them back too, so even if this
// turns out to be a reaction by the author, they can't like
// their own post
self.write_relationship1(
reacted_to_id, // event reacted to
event.id, // the reaction event id
Relationship1::Reaction(event.pubkey, reaction),
Some(txn),
)?;
}
}
// deletes
if let Some((deleted_event_ids, reason)) = event.deletes() {
for deleted_event_id in deleted_event_ids {
// since it is a delete, we don't actually desire the event.
if let Some(deleted_event) = self.read_event1(deleted_event_id)? {
// Only if it is the same author
if deleted_event.pubkey == event.pubkey {
self.write_relationship1(
deleted_event_id,
event.id,
Relationship1::Deletion(reason.clone()),
Some(txn),
)?;
}
} else {
// We don't have the deleted event. Presume it is okay. We check again
// when we read these back
self.write_relationship1(
deleted_event_id,
event.id,
Relationship1::Deletion(reason.clone()),
Some(txn),
)?;
}
}
}
// zaps
match event.zaps() {
Ok(Some(zapdata)) => {
self.write_relationship1(
zapdata.id,
event.id,
Relationship1::ZapReceipt(event.pubkey, zapdata.amount),
Some(txn),
)?;
}
Err(e) => tracing::error!("Invalid zap receipt: {}", e),
_ => {}
}
Ok(())
}
}

View File

@ -1,49 +0,0 @@
use crate::error::Error;
use crate::storage::types::{Theme1, ThemeVariant1};
use crate::storage::Storage;
use heed::RwTxn;
use speedy::Readable;
impl Storage {
pub(super) fn m10_trigger(&self) -> Result<(), Error> {
let _ = self.db_relays1()?;
Ok(())
}
pub(super) fn m10_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: rewriting theme settings...");
// Migrate
self.m10_rewrite_theme_settings(txn)?;
Ok(())
}
fn m10_rewrite_theme_settings<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
const DEF: Theme1 = Theme1 {
variant: ThemeVariant1::Default,
dark_mode: false,
follow_os_dark_mode: true,
};
let theme = match self.general.get(txn, b"theme") {
Err(_) => DEF,
Ok(None) => DEF,
Ok(Some(bytes)) => match Theme1::read_from_buffer(bytes) {
Ok(val) => val,
Err(_) => DEF,
},
};
self.write_setting_theme_variant(&theme.variant.name().to_owned(), Some(txn))?;
self.write_setting_dark_mode(&theme.dark_mode, Some(txn))?;
self.write_setting_follow_os_dark_mode(&theme.follow_os_dark_mode, Some(txn))?;
Ok(())
}
}

View File

@ -1,105 +0,0 @@
use crate::error::{Error, ErrorKind};
use crate::globals::GLOBALS;
use crate::storage::event_tag_index1::INDEXED_TAGS;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{EventKind, EventV1, PublicKeyHex};
use speedy::Readable;
impl Storage {
pub(super) fn m11_trigger(&self) -> Result<(), Error> {
let _ = self.db_events1()?;
let _ = self.db_event_tag_index1()?;
Ok(())
}
pub(super) fn m11_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: populating event tag index...");
// Migrate
self.m11_populate_event_tag_index(txn)?;
Ok(())
}
fn m11_populate_event_tag_index<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let loop_txn = self.env.read_txn()?;
for result in self.db_events1()?.iter(&loop_txn)? {
let (_key, val) = result?;
let event = EventV1::read_from_buffer(val)?;
self.m11_write_event_tag_index1_event1(&event, txn)?;
}
Ok(())
}
// We had to copy this from event_tag_index1 which uses an unversioned Event
pub fn m11_write_event_tag_index1_event1<'a>(
&'a self,
event: &EventV1,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
let mut event = event;
let mut rumor_event: EventV1;
if event.kind == EventKind::GiftWrap {
match GLOBALS.identity.unwrap_giftwrap1(event) {
Ok(rumor) => {
rumor_event = rumor.into_event_with_bad_signature();
rumor_event.id = event.id; // lie, so it indexes it under the giftwrap
event = &rumor_event;
}
Err(e) => {
if matches!(e.kind, ErrorKind::NoPrivateKey) {
// Store as unindexed for later indexing
let bytes = vec![];
self.db_unindexed_giftwraps1()?
.put(txn, event.id.as_slice(), &bytes)?;
}
}
}
}
// our user's public key
let pk: Option<PublicKeyHex> = self.read_setting_public_key().map(|p| p.into());
for tag in &event.tags {
let tagname = tag.tagname();
let value = match tag.value(1) {
Ok(v) => v,
Err(_) => continue, // no tag value, not indexable.
};
// Only index tags we intend to lookup later by tag.
// If that set changes, (1) add to this code and (2) do a reindex migration
if !INDEXED_TAGS.contains(&&*tagname) {
continue;
}
// For 'p' tags, only index them if 'p' is our user
if tagname == "p" {
match &pk {
None => continue,
Some(pk) => {
if value != pk.as_str() {
continue;
}
}
}
}
let mut key: Vec<u8> = tagname.as_bytes().to_owned();
key.push(b'\"'); // double quote separator, unlikely to be inside of a tagname
key.extend(value.as_bytes());
let key = key!(&key); // limit the size
let bytes = event.id.as_slice();
self.db_event_tag_index1()?.put(txn, key, bytes)?;
}
Ok(())
}
}

View File

@ -1,42 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::types::Bytes;
use heed::{DatabaseFlags, RwTxn};
impl Storage {
pub(super) fn m12_trigger(&self) -> Result<(), Error> {
Ok(())
}
pub(super) fn m12_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: removing now unused event_references_person index...");
// Migrate
self.m12_remove_event_references_person(txn)?;
Ok(())
}
fn m12_remove_event_references_person<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
{
let db = self
.env
.database_options()
.types::<Bytes, Bytes>()
.flags(DatabaseFlags::DUP_SORT | DatabaseFlags::DUP_FIXED)
.name("event_references_person")
.create(txn)?;
db.clear(txn)?;
}
// heed doesn't expose mdb_drop(1) yet, so we can't actually remove this database.
Ok(())
}
}

View File

@ -1,31 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
impl Storage {
pub(super) fn m14_trigger(&self) -> Result<(), Error> {
Ok(())
}
pub(super) fn m14_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: removing a retired setting...");
// Migrate
self.m14_remove_setting_custom_person_list_names(txn)?;
Ok(())
}
fn m14_remove_setting_custom_person_list_names<'a>(
&'a self,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
self.general.delete(txn, b"custom_person_list_names")?;
Ok(())
}
}

View File

@ -1,41 +0,0 @@
use crate::error::Error;
use crate::storage::types::PersonList1;
use crate::storage::Storage;
use heed::RwTxn;
use speedy::Writable;
use std::collections::HashMap;
impl Storage {
pub(super) fn m15_trigger(&self) -> Result<(), Error> {
Ok(())
}
pub(super) fn m15_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: moving person list last edit times...");
// Migrate
self.m15_move_person_list_last_edit_times(txn)?;
Ok(())
}
fn m15_move_person_list_last_edit_times<'a>(
&'a self,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
let mut edit_times: HashMap<PersonList1, i64> = HashMap::new();
edit_times.insert(PersonList1::Followed, self.read_last_contact_list_edit()?);
edit_times.insert(PersonList1::Muted, self.read_last_mute_list_edit()?);
let bytes = edit_times.write_to_vec()?;
self.general
.put(txn, b"person_lists_last_edit_times", bytes.as_slice())?;
Ok(())
}
}

View File

@ -1,56 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{EventV1, EventV2, TagV2};
use speedy::Readable;
impl Storage {
pub(super) fn m16_trigger(&self) -> Result<(), Error> {
let _ = self.db_events1()?;
let _ = self.db_events2()?;
Ok(())
}
pub(super) fn m16_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: migrating events...");
// Migrate
self.m16_migrate_to_events2(txn)?;
Ok(())
}
fn m16_migrate_to_events2<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let loop_txn = self.env.read_txn()?;
let mut count: usize = 0;
for result in self.db_events1()?.iter(&loop_txn)? {
let (_key, val) = result?;
let event1 = EventV1::read_from_buffer(val)?;
let tags_json: String = serde_json::to_string(&event1.tags)?;
let tags2: Vec<TagV2> = serde_json::from_str(&tags_json)?;
let event2 = EventV2 {
id: event1.id,
pubkey: event1.pubkey,
created_at: event1.created_at,
kind: event1.kind,
sig: event1.sig,
content: event1.content,
tags: tags2,
};
self.write_event2(&event2, Some(txn))?;
count += 1;
}
tracing::info!("Migrated {} events", count);
// clear events1 database (we don't have an interface to delete it)
self.db_events1()?.clear(txn)?;
Ok(())
}
}

View File

@ -1,30 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
impl Storage {
pub(super) fn m17_trigger(&self) -> Result<(), Error> {
let _ = self.db_relationships1();
let _ = self.db_reprel1();
Ok(())
}
pub(super) fn m17_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: reindexing event relationships...");
// Migrate
self.m17_reindex_event_relationships(txn)?;
Ok(())
}
fn m17_reindex_event_relationships<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
self.set_flag_rebuild_relationships_needed(true, Some(txn))?;
Ok(())
}
}

View File

@ -1,39 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
impl Storage {
pub(super) fn m18_trigger(&self) -> Result<(), Error> {
let _ = self.db_relationships1();
let _ = self.db_reprel1();
let _ = self.db_relationships_by_id1();
let _ = self.db_relationships_by_addr1();
Ok(())
}
pub(super) fn m18_migrate<'a>(
&'a self,
prefix: &str,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: moving to new relationships storage...");
// Migrate
self.m18_move_to_new_relationships_storage(txn)?;
Ok(())
}
fn m18_move_to_new_relationships_storage<'a>(
&'a self,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
// Clear old relationships tables (we don't have an interface to delete it)
self.db_relationships1()?.clear(txn)?;
self.db_reprel1()?.clear(txn)?;
self.set_flag_rebuild_relationships_needed(true, Some(txn))?;
Ok(())
}
}

View File

@ -1,49 +0,0 @@
use crate::error::{Error, ErrorKind};
use crate::storage::types::{Settings1, Settings2};
use crate::storage::Storage;
use heed::RwTxn;
use speedy::{Readable, Writable};
impl Storage {
pub(super) fn m2_trigger(&self) -> Result<(), Error> {
let _ = self.db_events1()?;
Ok(())
}
pub(super) fn m2_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: Updating Settings...");
// Migrate
self.m2_try_migrate_settings1_settings2(txn)?;
Ok(())
}
fn m2_try_migrate_settings1_settings2<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// If something is under the old "settings" key
if let Ok(Some(bytes)) = self.general.get(txn, b"settings") {
let settings1 = match Settings1::read_from_buffer(bytes) {
Ok(s1) => s1,
Err(_) => {
tracing::error!("Settings are not deserializing. This is probably a code issue (although I have not found the bug yet). The best I can do is reset your settings to the default. This is better than the other option of wiping your entire database and starting over.");
Settings1::default()
}
};
// Convert it to the new Settings2 structure
let settings2: Settings2 = settings1.into();
let bytes = settings2.write_to_vec()?;
// And store it under the new "settings2" key
self.general.put(txn, b"settings2", &bytes)?;
// Then delete the old "settings" key
self.general.delete(txn, b"settings")?;
} else {
return Err(ErrorKind::General("Settings missing.".to_string()).into());
}
Ok(())
}
}

View File

@ -1,36 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::RelayUrl;
impl Storage {
pub(super) fn m3_trigger(&self) -> Result<(), Error> {
let _ = self.db_relays1()?;
Ok(())
}
pub(super) fn m3_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: Removing invalid relays...");
// Migrate
self.m3_remove_invalid_relays(txn)?;
Ok(())
}
fn m3_remove_invalid_relays<'a>(&'a self, rw_txn: &mut RwTxn<'a>) -> Result<(), Error> {
let bad_relays =
self.filter_relays1(|relay| RelayUrl::try_from_str(relay.url.as_str()).is_err())?;
for relay in &bad_relays {
tracing::info!("Deleting bad relay: {}", relay.url);
self.delete_relay1(&relay.url, Some(rw_txn))?;
}
tracing::info!("Deleted {} bad relays", bad_relays.len());
Ok(())
}
}

View File

@ -1,162 +0,0 @@
use crate::error::{Error, ErrorKind};
use crate::storage::Storage;
use heed::RwTxn;
impl Storage {
pub(super) fn m4_trigger(&self) -> Result<(), Error> {
Ok(())
}
pub(super) fn m4_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: Using kv for settings...");
// Migrate
self.m4_use_kv_for_settings(txn)?;
Ok(())
}
fn m4_use_kv_for_settings<'a>(&'a self, rw_txn: &mut RwTxn<'a>) -> Result<(), Error> {
let settings = match self.read_settings2()? {
Some(settings) => settings,
None => match self.read_settings2_from_wrong_key()? {
Some(settings) => settings,
None => {
if 4 >= Self::MAX_MIGRATION_LEVEL {
// At migraiton level < 4 we know this is safe to do:
crate::globals::GLOBALS.status_queue.write().write(
"Settings missing or corrupted. We had to reset to defaults. Sorry about that."
.to_owned(),
);
return Ok(());
} else {
return Err(ErrorKind::General("Settings missing.".to_string()).into());
}
}
},
};
self.write_setting_public_key(&settings.public_key, Some(rw_txn))?;
self.write_setting_log_n(&settings.log_n, Some(rw_txn))?;
self.write_setting_offline(&settings.offline, Some(rw_txn))?;
self.write_setting_load_avatars(&settings.load_avatars, Some(rw_txn))?;
self.write_setting_load_media(&settings.load_media, Some(rw_txn))?;
self.write_setting_check_nip05(&settings.check_nip05, Some(rw_txn))?;
self.write_setting_automatically_fetch_metadata(
&settings.automatically_fetch_metadata,
Some(rw_txn),
)?;
self.write_setting_num_relays_per_person(&settings.num_relays_per_person, Some(rw_txn))?;
self.write_setting_max_relays(&settings.max_relays, Some(rw_txn))?;
self.write_setting_reposts(&settings.reposts, Some(rw_txn))?;
self.write_setting_show_long_form(&settings.show_long_form, Some(rw_txn))?;
self.write_setting_show_mentions(&settings.show_mentions, Some(rw_txn))?;
self.write_setting_direct_messages(&settings.direct_messages, Some(rw_txn))?;
self.write_setting_future_allowance_secs(&settings.future_allowance_secs, Some(rw_txn))?;
self.write_setting_reactions(&settings.reactions, Some(rw_txn))?;
self.write_setting_enable_zap_receipts(&settings.enable_zap_receipts, Some(rw_txn))?;
self.write_setting_show_media(&settings.show_media, Some(rw_txn))?;
self.write_setting_pow(&settings.pow, Some(rw_txn))?;
self.write_setting_set_client_tag(&settings.set_client_tag, Some(rw_txn))?;
self.write_setting_set_user_agent(&settings.set_user_agent, Some(rw_txn))?;
self.write_setting_delegatee_tag(&settings.delegatee_tag, Some(rw_txn))?;
self.write_setting_max_fps(&settings.max_fps, Some(rw_txn))?;
self.write_setting_recompute_feed_periodically(
&settings.recompute_feed_periodically,
Some(rw_txn),
)?;
self.write_setting_feed_recompute_interval_ms(
&settings.feed_recompute_interval_ms,
Some(rw_txn),
)?;
self.write_setting_theme1(&settings.theme, Some(rw_txn))?;
self.write_setting_override_dpi(&settings.override_dpi, Some(rw_txn))?;
self.write_setting_highlight_unread_events(
&settings.highlight_unread_events,
Some(rw_txn),
)?;
self.write_setting_posting_area_at_top(&settings.posting_area_at_top, Some(rw_txn))?;
self.write_setting_status_bar(&settings.status_bar, Some(rw_txn))?;
self.write_setting_image_resize_algorithm(&settings.image_resize_algorithm, Some(rw_txn))?;
self.write_setting_relay_list_becomes_stale_minutes(
&(settings.relay_list_becomes_stale_hours * 60),
Some(rw_txn),
)?;
self.write_setting_metadata_becomes_stale_minutes(
&(settings.metadata_becomes_stale_hours * 60),
Some(rw_txn),
)?;
self.write_setting_nip05_becomes_stale_if_valid_hours(
&settings.nip05_becomes_stale_if_valid_hours,
Some(rw_txn),
)?;
self.write_setting_nip05_becomes_stale_if_invalid_minutes(
&settings.nip05_becomes_stale_if_invalid_minutes,
Some(rw_txn),
)?;
self.write_setting_avatar_becomes_stale_hours(
&settings.avatar_becomes_stale_hours,
Some(rw_txn),
)?;
self.write_setting_media_becomes_stale_hours(
&settings.media_becomes_stale_hours,
Some(rw_txn),
)?;
self.write_setting_max_websocket_message_size_kb(
&settings.max_websocket_message_size_kb,
Some(rw_txn),
)?;
self.write_setting_max_websocket_frame_size_kb(
&settings.max_websocket_frame_size_kb,
Some(rw_txn),
)?;
self.write_setting_websocket_accept_unmasked_frames(
&settings.websocket_accept_unmasked_frames,
Some(rw_txn),
)?;
self.write_setting_websocket_connect_timeout_sec(
&settings.websocket_connect_timeout_sec,
Some(rw_txn),
)?;
self.write_setting_websocket_ping_frequency_sec(
&settings.websocket_ping_frequency_sec,
Some(rw_txn),
)?;
self.write_setting_fetcher_metadata_looptime_ms(
&settings.fetcher_metadata_looptime_ms,
Some(rw_txn),
)?;
self.write_setting_fetcher_looptime_ms(&settings.fetcher_looptime_ms, Some(rw_txn))?;
self.write_setting_fetcher_connect_timeout_sec(
&settings.fetcher_connect_timeout_sec,
Some(rw_txn),
)?;
self.write_setting_fetcher_timeout_sec(&settings.fetcher_timeout_sec, Some(rw_txn))?;
self.write_setting_fetcher_max_requests_per_host(
&settings.fetcher_max_requests_per_host,
Some(rw_txn),
)?;
self.write_setting_fetcher_host_exclusion_on_low_error_secs(
&settings.fetcher_host_exclusion_on_low_error_secs,
Some(rw_txn),
)?;
self.write_setting_fetcher_host_exclusion_on_med_error_secs(
&settings.fetcher_host_exclusion_on_med_error_secs,
Some(rw_txn),
)?;
self.write_setting_fetcher_host_exclusion_on_high_error_secs(
&settings.fetcher_host_exclusion_on_high_error_secs,
Some(rw_txn),
)?;
self.write_setting_nip11_lines_to_output_on_error(
&settings.nip11_lines_to_output_on_error,
Some(rw_txn),
)?;
self.write_setting_prune_period_days(&settings.prune_period_days, Some(rw_txn))?;
self.general.delete(rw_txn, b"settings2")?;
Ok(())
}
}

View File

@ -1,40 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{EventV1, Id, Signature};
use speedy::Readable;
impl Storage {
pub(super) fn m5_trigger(&self) -> Result<(), Error> {
let _ = self.db_events1()?;
Ok(())
}
pub(super) fn m5_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: deleting decrypted rumors...");
// Migrate
self.m5_delete_rumors(txn)?;
Ok(())
}
fn m5_delete_rumors<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let mut ids: Vec<Id> = Vec::new();
let iter = self.db_events1()?.iter(txn)?;
for result in iter {
let (_key, val) = result?;
let event = EventV1::read_from_buffer(val)?;
if event.sig == Signature::zeroes() {
ids.push(event.id);
}
}
for id in ids {
self.db_events1()?.delete(txn, id.as_slice())?;
}
Ok(())
}
}

View File

@ -1,50 +0,0 @@
use crate::error::Error;
use crate::storage::types::PersonList1;
use crate::storage::Storage;
use heed::RwTxn;
impl Storage {
pub(super) fn m6_trigger(&self) -> Result<(), Error> {
let _ = self.db_people1()?;
let _ = self.db_person_lists1()?;
Ok(())
}
pub(super) fn m6_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: populating new lists...");
// Migrate
self.m6_populate_new_lists(txn)?;
Ok(())
}
fn m6_populate_new_lists<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let mut count: usize = 0;
let mut followed_count: usize = 0;
for person1 in self.filter_people1(|_| true)?.iter() {
let mut lists: Vec<PersonList1> = Vec::new();
if person1.followed {
lists.push(PersonList1::Followed);
followed_count += 1;
}
if person1.muted {
lists.push(PersonList1::Muted);
}
if !lists.is_empty() {
self.write_person_lists1(&person1.pubkey, lists, Some(txn))?;
count += 1;
}
}
tracing::info!(
"{} people added to new lists, {} followed",
count,
followed_count
);
// This migration does not remove the old data. The next one will.
Ok(())
}
}

View File

@ -1,49 +0,0 @@
use crate::error::Error;
use crate::storage::types::Person2;
use crate::storage::Storage;
use heed::RwTxn;
impl Storage {
pub(super) fn m7_trigger(&self) -> Result<(), Error> {
let _ = self.db_people1()?;
let _ = self.db_people2()?;
Ok(())
}
pub(super) fn m7_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: migrating person records...");
// Migrate
self.m7_migrate_people(txn)?;
Ok(())
}
fn m7_migrate_people<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let mut count: usize = 0;
for person1 in self.filter_people1(|_| true)?.drain(..) {
let person2 = Person2 {
pubkey: person1.pubkey,
petname: person1.petname,
metadata: person1.metadata,
metadata_created_at: person1.metadata_created_at,
metadata_last_received: person1.metadata_last_received,
nip05_valid: person1.nip05_valid,
nip05_last_checked: person1.nip05_last_checked,
relay_list_created_at: person1.relay_list_created_at,
relay_list_last_sought: person1.relay_list_last_received,
};
self.write_person2(&person2, Some(txn))?;
count += 1;
}
tracing::info!("Migrated {} people", count);
// delete people1 database
self.db_people1()?.clear(txn)?;
// self.general.delete(txn, b"people")?; // LMDB doesn't allow this.
Ok(())
}
}

View File

@ -1,73 +0,0 @@
use crate::error::Error;
use crate::storage::types::PersonRelay1;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{Id, RelayUrl};
impl Storage {
pub(super) fn m8_trigger(&self) -> Result<(), Error> {
let _ = self.db_relays1()?;
Ok(())
}
pub(super) fn m8_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: populating missing last_fetched data...");
// Migrate
self.m8_populate_last_fetched(txn)?;
Ok(())
}
fn m8_populate_last_fetched<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let total = self.get_event_seen_on_relay1_len()?;
let mut count = 0;
// Since we failed to properly collect person_relay.last_fetched, we will
// use seen_on data to reconstruct it
let loop_txn = self.env.read_txn()?;
for result in self.db_event_seen_on_relay1()?.iter(&loop_txn)? {
let (key, val) = result?;
// Extract out the data
let id = Id(key[..32].try_into().unwrap());
let url = match RelayUrl::try_from_str(std::str::from_utf8(&key[32..])?) {
Ok(url) => url,
Err(_) => continue, // skip if relay url is bad. We will prune these in the future.
};
let time = u64::from_be_bytes(val[..8].try_into()?);
// Read event to get the person
if let Some(event) = self.read_event(id)? {
// Read (or create) person_relay
let (mut pr, update) = match self.read_person_relay1(event.pubkey, &url)? {
Some(pr) => match pr.last_fetched {
Some(lf) => (pr, lf < time),
None => (pr, true),
},
None => {
let pr = PersonRelay1::new(event.pubkey, url.clone());
(pr, true)
}
};
if update {
pr.last_fetched = Some(time);
self.write_person_relay1(&pr, Some(txn))?;
}
}
count += 1;
for checkpoint in &[10, 20, 30, 40, 50, 60, 70, 80, 90] {
if count == checkpoint * total / 100 {
tracing::info!("{}% done", checkpoint);
}
}
}
Ok(())
}
}

View File

@ -1,92 +0,0 @@
use crate::error::{Error, ErrorKind};
use crate::globals::GLOBALS;
use crate::storage::Storage;
use heed::RwTxn;
use nostr_types::{EventKind, EventV1};
use speedy::Readable;
impl Storage {
pub(super) fn m9_trigger(&self) -> Result<(), Error> {
let _ = self.db_events1()?;
let _ = self.db_event_ek_pk_index1()?;
let _ = self.db_event_ek_c_index1()?;
let _ = self.db_hashtags1()?;
Ok(())
}
pub(super) fn m9_migrate<'a>(&'a self, prefix: &str, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Info message
tracing::info!("{prefix}: rebuilding event indices...");
// Migrate
self.m9_rebuild_event_indices(txn)?;
Ok(())
}
/// Rebuild all the event indices. This is generally internal, but might be used
/// to fix a broken database.
pub fn m9_rebuild_event_indices<'a>(&'a self, txn: &mut RwTxn<'a>) -> Result<(), Error> {
// Erase all indices first
self.db_event_ek_pk_index1()?.clear(txn)?;
self.db_event_ek_c_index1()?.clear(txn)?;
self.db_hashtags1()?.clear(txn)?;
let loop_txn = self.env.read_txn()?;
for result in self.db_events1()?.iter(&loop_txn)? {
let (_key, val) = result?;
let event = EventV1::read_from_buffer(val)?;
self.m9_write_event_indices(&event, txn)?;
for hashtag in event.hashtags() {
if hashtag.is_empty() {
continue;
} // upstream bug
self.add_hashtag1(&hashtag, event.id, Some(txn))?;
}
}
Ok(())
}
fn m9_write_event_indices<'a>(
&'a self,
event: &EventV1,
txn: &mut RwTxn<'a>,
) -> Result<(), Error> {
let mut event = event;
// If giftwrap, index the inner rumor instead
let mut rumor_event: EventV1;
if event.kind == EventKind::GiftWrap {
match GLOBALS.identity.unwrap_giftwrap1(event) {
Ok(rumor) => {
rumor_event = rumor.into_event_with_bad_signature();
rumor_event.id = event.id; // lie, so it indexes it under the giftwrap
event = &rumor_event;
}
Err(e) => {
if matches!(e.kind, ErrorKind::NoPrivateKey) {
// Store as unindexed for later indexing
let bytes = vec![];
self.db_unindexed_giftwraps1()?
.put(txn, event.id.as_slice(), &bytes)?;
}
}
}
}
let ek: u32 = event.kind.into();
let mut key: Vec<u8> = ek.to_be_bytes().as_slice().to_owned(); // event kind
key.extend(event.pubkey.as_bytes()); // pubkey
let bytes = event.id.as_slice();
self.db_event_ek_pk_index1()?.put(txn, &key, bytes)?;
let mut key: Vec<u8> = ek.to_be_bytes().as_slice().to_owned(); // event kind
key.extend((i64::MAX - event.created_at.0).to_be_bytes().as_slice()); // reverse created_at
let bytes = event.id.as_slice();
self.db_event_ek_c_index1()?.put(txn, &key, bytes)?;
Ok(())
}
}

View File

@ -1,20 +1,12 @@
mod deprecated;
mod m1;
mod m10;
mod m11;
mod m12;
mod m13;
mod m14;
mod m15;
mod m16;
mod m17;
mod m18;
mod m19;
mod m2;
mod m20;
mod m21;
mod m22;
// Migrations before m23 (except critical ones) are dropped from gossip-0.11
// so you must run gossip-0.9 or gossip-0.10 at least once to come up to
// m23 (or m28) first.
mod m19; // Creates person list metadata
mod m20; // Initializes person list metadata
mod m21; // Migrates person list metadata
mod m22; // Migrates person list metadata again
mod m23;
mod m24;
mod m25;
@ -22,7 +14,6 @@ mod m26;
mod m27;
mod m28;
mod m29;
mod m3;
mod m30;
mod m31;
mod m32;
@ -33,18 +24,13 @@ mod m36;
mod m37;
mod m38;
mod m39;
mod m4;
mod m5;
mod m6;
mod m7;
mod m8;
mod m9;
use super::Storage;
use crate::error::{Error, ErrorKind};
use heed::RwTxn;
impl Storage {
const MIN_MIGRATION_LEVEL: u32 = 23;
const MAX_MIGRATION_LEVEL: u32 = 39;
/// Initialize the database from empty
@ -54,8 +40,6 @@ impl Storage {
// modify that created data
#[rustfmt::skip]
let necessary: Vec<u32> = vec![
6, // Creates Followed and Muted default person lists
13, // Migrates Person Lists
19, // Creates person list metadata
20, // Initializes person list metadata
21, // Migrates person list metadata
@ -78,6 +62,23 @@ impl Storage {
}
pub(super) fn migrate(&self, mut level: u32) -> Result<(), Error> {
if level < Self::MIN_MIGRATION_LEVEL {
let lmdb_dir = crate::profile::Profile::current().map_or(
"<notfound>".to_owned(),
|p| format!("{}/", p.lmdb_dir.display()),
);
eprintln!("DATABASE IS TOO OLD");
eprintln!("-------------------");
eprintln!("This version of gossip cannot handle your old database. You have two options:");
eprintln!("Option 1: Run gossip 0.9 or 0.10 at least once to upgrade, or");
eprintln!("Option 2: Delete your database directory {} and restart to start fresh", lmdb_dir);
return Err(ErrorKind::General(format!(
"Migration level {} is too old.",
level
))
.into());
}
if level > Self::MAX_MIGRATION_LEVEL {
return Err(ErrorKind::General(format!(
"Migration level {} unknown: This client is older than your data.",
@ -100,24 +101,6 @@ impl Storage {
fn trigger(&self, level: u32) -> Result<(), Error> {
match level {
1 => self.m1_trigger()?,
2 => self.m2_trigger()?,
3 => self.m3_trigger()?,
4 => self.m4_trigger()?,
5 => self.m5_trigger()?,
6 => self.m6_trigger()?,
7 => self.m7_trigger()?,
8 => self.m8_trigger()?,
9 => self.m9_trigger()?,
10 => self.m10_trigger()?,
11 => self.m11_trigger()?,
12 => self.m12_trigger()?,
13 => self.m13_trigger()?,
14 => self.m14_trigger()?,
15 => self.m15_trigger()?,
16 => self.m16_trigger()?,
17 => self.m17_trigger()?,
18 => self.m18_trigger()?,
19 => self.m19_trigger()?,
20 => self.m20_trigger()?,
21 => self.m21_trigger()?,
@ -148,24 +131,6 @@ impl Storage {
fn migrate_inner<'a>(&'a self, level: u32, txn: &mut RwTxn<'a>) -> Result<(), Error> {
let prefix = format!("LMDB Migration {}", level);
match level {
1 => self.m1_migrate(&prefix, txn)?,
2 => self.m2_migrate(&prefix, txn)?,
3 => self.m3_migrate(&prefix, txn)?,
4 => self.m4_migrate(&prefix, txn)?,
5 => self.m5_migrate(&prefix, txn)?,
6 => self.m6_migrate(&prefix, txn)?,
7 => self.m7_migrate(&prefix, txn)?,
8 => self.m8_migrate(&prefix, txn)?,
9 => self.m9_migrate(&prefix, txn)?,
10 => self.m10_migrate(&prefix, txn)?,
11 => self.m11_migrate(&prefix, txn)?,
12 => self.m12_migrate(&prefix, txn)?,
13 => self.m13_migrate(&prefix, txn)?,
14 => self.m14_migrate(&prefix, txn)?,
15 => self.m15_migrate(&prefix, txn)?,
16 => self.m16_migrate(&prefix, txn)?,
17 => self.m17_migrate(&prefix, txn)?,
18 => self.m18_migrate(&prefix, txn)?,
19 => self.m19_migrate(&prefix, txn)?,
20 => self.m20_migrate(&prefix, txn)?,
21 => self.m21_migrate(&prefix, txn)?,

View File

@ -27,22 +27,18 @@ mod event_ek_pk_index1;
mod event_seen_on_relay1;
mod event_tag_index1;
mod event_viewed1;
mod events1;
mod events2;
mod events3;
mod hashtags1;
mod nip46servers1;
mod nip46servers2;
mod people1;
mod people2;
mod person_lists1;
mod person_lists2;
mod person_lists_metadata1;
mod person_lists_metadata2;
mod person_lists_metadata3;
mod person_relays1;
mod person_relays2;
mod relationships1;
mod relationships_by_addr1;
mod relationships_by_addr2;
mod relationships_by_id1;
@ -50,7 +46,6 @@ mod relationships_by_id2;
mod relays1;
mod relays2;
mod relays3;
mod reprel1;
mod unindexed_giftwraps1;
mod versioned;

View File

@ -1,104 +0,0 @@
use crate::error::Error;
use crate::storage::types::Person1;
use crate::storage::{RawDatabase, Storage};
use heed::types::Bytes;
use heed::RwTxn;
use nostr_types::PublicKey;
use std::sync::Mutex;
// PublicKey -> Person
// key: pubkey.as_bytes()
// val: serde_json::to_vec(person) | serde_json::from_slice(bytes)
static PEOPLE1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut PEOPLE1_DB: Option<RawDatabase> = None;
impl Storage {
#[allow(dead_code)]
pub(super) fn db_people1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = PEOPLE1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = PEOPLE1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = PEOPLE1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<Bytes, Bytes>()
// no .flags needed
.name("people")
.create(&mut txn)?;
txn.commit()?;
PEOPLE1_DB = Some(db);
Ok(db)
}
}
}
#[allow(dead_code)]
pub(crate) fn get_people1_len(&self) -> Result<u64, Error> {
let txn = self.env.read_txn()?;
Ok(self.db_people1()?.len(&txn)?)
}
#[allow(dead_code)]
pub(crate) fn write_person1<'a>(
&'a self,
person: &Person1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key: Vec<u8> = person.pubkey.to_bytes();
let bytes = serde_json::to_vec(person)?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_people1()?.put(txn, &key, &bytes)?;
Ok(())
};
write_transact!(self, rw_txn, f)
}
#[allow(dead_code)]
pub(crate) fn read_person1(&self, pubkey: &PublicKey) -> Result<Option<Person1>, Error> {
// Note that we use serde instead of speedy because the complexity of the
// serde_json::Value type makes it difficult. Any other serde serialization
// should work though: Consider bincode.
let key: Vec<u8> = pubkey.to_bytes();
let txn = self.env.read_txn()?;
Ok(match self.db_people1()?.get(&txn, &key)? {
Some(bytes) => Some(serde_json::from_slice(bytes)?),
None => None,
})
}
#[allow(dead_code)]
pub(crate) fn filter_people1<F>(&self, f: F) -> Result<Vec<Person1>, Error>
where
F: Fn(&Person1) -> bool,
{
let txn = self.env.read_txn()?;
let iter = self.db_people1()?.iter(&txn)?;
let mut output: Vec<Person1> = Vec::new();
for result in iter {
let (_key, val) = result?;
let person: Person1 = serde_json::from_slice(val)?;
if f(&person) {
output.push(person);
}
}
Ok(output)
}
}

View File

@ -1,62 +0,0 @@
use crate::error::Error;
use crate::people::PersonList;
use crate::storage::{RawDatabase, Storage};
use heed::types::Bytes;
use heed::RwTxn;
use nostr_types::PublicKey;
use std::sync::Mutex;
// Pubkey -> Vec<u8>
// key: pubkey.as_bytes()
static PERSON_LISTS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut PERSON_LISTS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_person_lists1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = PERSON_LISTS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = PERSON_LISTS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = PERSON_LISTS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<Bytes, Bytes>()
// no .flags needed
.name("person_lists")
.create(&mut txn)?;
txn.commit()?;
PERSON_LISTS1_DB = Some(db);
Ok(db)
}
}
}
pub(crate) fn write_person_lists1<'a>(
&'a self,
pubkey: &PublicKey,
lists: Vec<PersonList>,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let key: Vec<u8> = pubkey.to_bytes();
let bytes = lists.iter().map(|l| (*l).into()).collect::<Vec<u8>>();
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_person_lists1()?.put(txn, &key, &bytes)?;
Ok(())
};
write_transact!(self, rw_txn, f)
}
}

View File

@ -3,8 +3,7 @@ use crate::storage::types::PersonRelay1;
use crate::storage::{RawDatabase, Storage, MAX_LMDB_KEY};
use heed::types::Bytes;
use heed::RwTxn;
use nostr_types::{PublicKey, RelayUrl};
use speedy::{Readable, Writable};
use speedy::Writable;
use std::sync::Mutex;
// PublicKey:Url -> PersonRelay
@ -63,19 +62,4 @@ impl Storage {
write_transact!(self, rw_txn, f)
}
pub(crate) fn read_person_relay1(
&self,
pubkey: PublicKey,
url: &RelayUrl,
) -> Result<Option<PersonRelay1>, Error> {
let mut key = pubkey.to_bytes();
key.extend(url.as_str().as_bytes());
key.truncate(MAX_LMDB_KEY);
let txn = self.env.read_txn()?;
Ok(match self.db_person_relays1()?.get(&txn, &key)? {
Some(bytes) => Some(PersonRelay1::read_from_buffer(bytes)?),
None => None,
})
}
}

View File

@ -1,72 +0,0 @@
use crate::error::Error;
use crate::storage::types::Relationship1;
use crate::storage::{RawDatabase, Storage};
use heed::types::Bytes;
use heed::RwTxn;
use nostr_types::Id;
use speedy::Writable;
use std::sync::Mutex;
// Id:Id -> Relationship1
// key: id.as_slice(), id.as_slice() | Id(val[32..64].try_into()?)
// val: relationship.write_to_vec() | Relationship1::read_from_buffer(val)
// NOTE: this means the SECOND Id relates to the FIRST Id, e.g.
// id2 replies to id1
// id2 reacts to id1
// id2 deletes id1
// id2 is a zap receipt on id1
static RELATIONSHIPS1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut RELATIONSHIPS1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_relationships1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = RELATIONSHIPS1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = RELATIONSHIPS1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = RELATIONSHIPS1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<Bytes, Bytes>()
// no .flags needed?
.name("relationships")
.create(&mut txn)?;
txn.commit()?;
RELATIONSHIPS1_DB = Some(db);
Ok(db)
}
}
}
pub(crate) fn write_relationship1<'a>(
&'a self,
id: Id,
related: Id,
relationship: Relationship1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let mut key = id.as_ref().as_slice().to_owned();
key.extend(related.as_ref());
let value = relationship.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.db_relationships1()?.put(txn, &key, &value)?;
Ok(())
};
write_transact!(self, rw_txn, f)
}
}

View File

@ -1,42 +0,0 @@
use crate::error::Error;
use crate::storage::{RawDatabase, Storage};
use heed::{types::Bytes, DatabaseFlags};
use std::sync::Mutex;
// Kind:Pubkey:d-tag -> Relationship1:Id
// (has dups)
static REPREL1_DB_CREATE_LOCK: Mutex<()> = Mutex::new(());
static mut REPREL1_DB: Option<RawDatabase> = None;
impl Storage {
pub(super) fn db_reprel1(&self) -> Result<RawDatabase, Error> {
unsafe {
if let Some(db) = REPREL1_DB {
Ok(db)
} else {
// Lock. This drops when anything returns.
let _lock = REPREL1_DB_CREATE_LOCK.lock();
// In case of a race, check again
if let Some(db) = REPREL1_DB {
return Ok(db);
}
// Create it. We know that nobody else is doing this and that
// it cannot happen twice.
let mut txn = self.env.write_txn()?;
let db = self
.env
.database_options()
.types::<Bytes, Bytes>()
.flags(DatabaseFlags::DUP_SORT) // NOT FIXED, Relationship1 serialized isn't.
.name("reprel1")
.create(&mut txn)?;
txn.commit()?;
REPREL1_DB = Some(db);
Ok(db)
}
}
}
}

View File

@ -1,6 +1,3 @@
mod person1;
pub(crate) use person1::Person1;
mod person2;
pub use person2::Person2;
@ -49,15 +46,6 @@ pub use relay2::Relay2;
mod relay3;
pub use relay3::Relay3;
mod settings1;
pub(crate) use settings1::Settings1;
mod settings2;
pub(crate) use settings2::Settings2;
mod theme1;
pub(crate) use theme1::{Theme1, ThemeVariant1};
use crate::error::Error;
use nostr_types::{Id, PublicKey};

View File

@ -1,109 +0,0 @@
use nostr_types::{Metadata, PublicKey};
use serde::{Deserialize, Serialize};
// THIS IS HISTORICAL FOR MIGRATIONS AND THE STRUCTURES SHOULD NOT BE EDITED
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Person1 {
pub pubkey: PublicKey,
pub petname: Option<String>,
pub followed: bool,
pub followed_last_updated: i64,
pub muted: bool,
pub metadata: Option<Metadata>,
pub metadata_created_at: Option<i64>,
pub metadata_last_received: i64,
pub nip05_valid: bool,
pub nip05_last_checked: Option<u64>,
pub relay_list_created_at: Option<i64>,
pub relay_list_last_received: i64,
}
#[allow(dead_code)]
impl Person1 {
pub fn new(pubkey: PublicKey) -> Person1 {
Person1 {
pubkey,
petname: None,
followed: false,
followed_last_updated: 0,
muted: false,
metadata: None,
metadata_created_at: None,
metadata_last_received: 0,
nip05_valid: false,
nip05_last_checked: None,
relay_list_created_at: None,
relay_list_last_received: 0,
}
}
pub fn display_name(&self) -> Option<&str> {
if let Some(pn) = &self.petname {
Some(pn)
} else if let Some(md) = &self.metadata {
if md.other.contains_key("display_name") {
if let Some(serde_json::Value::String(s)) = md.other.get("display_name") {
if !s.is_empty() {
return Some(s);
}
}
}
md.name.as_deref()
} else {
None
}
}
pub fn name(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.name.as_deref()
} else {
None
}
}
pub fn about(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.about.as_deref()
} else {
None
}
}
pub fn picture(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.picture.as_deref()
} else {
None
}
}
pub fn nip05(&self) -> Option<&str> {
if let Some(md) = &self.metadata {
md.nip05.as_deref()
} else {
None
}
}
}
impl PartialEq for Person1 {
fn eq(&self, other: &Self) -> bool {
self.pubkey.eq(&other.pubkey)
}
}
impl Eq for Person1 {}
impl PartialOrd for Person1 {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for Person1 {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
match (self.display_name(), other.display_name()) {
(Some(a), Some(b)) => a.to_lowercase().cmp(&b.to_lowercase()),
_ => self.pubkey.cmp(&other.pubkey),
}
}
}

View File

@ -1,121 +0,0 @@
use super::super::Storage;
use super::theme1::{Theme1, ThemeVariant1};
use crate::error::Error;
use heed::RwTxn;
use nostr_types::PublicKey;
use serde::{Deserialize, Serialize};
use speedy::{Readable, Writable};
// THIS IS HISTORICAL FOR MIGRATIONS AND THE STRUCTURES SHOULD NOT BE EDITED
#[derive(Clone, Debug, Serialize, Deserialize, Readable, Writable)]
pub struct Settings1 {
pub feed_chunk: u64,
pub replies_chunk: u64,
pub overlap: u64,
pub num_relays_per_person: u8,
pub max_relays: u8,
pub public_key: Option<PublicKey>,
pub max_fps: u32,
pub recompute_feed_periodically: bool,
pub feed_recompute_interval_ms: u32,
pub pow: u8,
pub offline: bool,
pub theme: Theme1,
pub set_client_tag: bool,
pub set_user_agent: bool,
pub override_dpi: Option<u32>,
pub reactions: bool,
pub reposts: bool,
pub show_long_form: bool,
pub show_mentions: bool,
pub show_media: bool,
pub approve_content_warning: bool,
pub load_avatars: bool,
pub load_media: bool,
pub check_nip05: bool,
pub direct_messages: bool,
pub automatically_fetch_metadata: bool,
pub delegatee_tag: String,
pub highlight_unread_events: bool,
pub posting_area_at_top: bool,
pub enable_zap_receipts: bool,
}
impl Default for Settings1 {
fn default() -> Settings1 {
Settings1 {
feed_chunk: 60 * 60 * 12, // 12 hours
replies_chunk: 60 * 60 * 24 * 7, // 1 week
overlap: 300, // 5 minutes
num_relays_per_person: 2,
max_relays: 50,
public_key: None,
max_fps: 12,
recompute_feed_periodically: true,
feed_recompute_interval_ms: 8000,
pow: 0,
offline: false,
theme: Theme1 {
variant: ThemeVariant1::Default,
dark_mode: false,
follow_os_dark_mode: false,
},
set_client_tag: false,
set_user_agent: false,
override_dpi: None,
reactions: true,
reposts: true,
show_long_form: false,
show_mentions: true,
show_media: true,
approve_content_warning: false,
load_avatars: true,
load_media: true,
check_nip05: true,
direct_messages: true,
automatically_fetch_metadata: true,
delegatee_tag: String::new(),
highlight_unread_events: true,
posting_area_at_top: true,
enable_zap_receipts: true,
}
}
}
impl Storage {
#[allow(dead_code)]
pub(crate) fn write_settings1<'a>(
&'a self,
settings: &Settings1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let bytes = settings.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.general.put(txn, b"settings", &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
#[allow(dead_code)]
pub(crate) fn read_settings1(&self) -> Result<Option<Settings1>, Error> {
let txn = self.env.read_txn()?;
match self.general.get(&txn, b"settings")? {
None => Ok(None),
Some(bytes) => Ok(Some(Settings1::read_from_buffer(bytes)?)),
}
}
}

View File

@ -1,281 +0,0 @@
use super::super::Storage;
use super::settings1::Settings1;
use super::theme1::{Theme1, ThemeVariant1};
use crate::error::Error;
use heed::RwTxn;
use nostr_types::PublicKey;
use serde::{Deserialize, Serialize};
use speedy::{Readable, Writable};
// THIS IS HISTORICAL FOR MIGRATIONS AND THE STRUCTURES SHOULD NOT BE EDITED
#[derive(Clone, Debug, Serialize, Deserialize, Readable, Writable)]
pub struct Settings2 {
// ID settings
pub public_key: Option<PublicKey>,
pub log_n: u8,
// Network settings
pub offline: bool,
pub load_avatars: bool,
pub load_media: bool,
pub check_nip05: bool,
pub automatically_fetch_metadata: bool,
// Relay settings
pub num_relays_per_person: u8,
pub max_relays: u8,
// Feed Settings
pub feed_chunk: u64,
pub replies_chunk: u64,
pub person_feed_chunk: u64,
pub overlap: u64,
// Event Selection
pub reposts: bool,
pub show_long_form: bool,
pub show_mentions: bool,
pub direct_messages: bool,
pub future_allowance_secs: u64,
// Event Content Settings
pub reactions: bool,
pub enable_zap_receipts: bool,
pub show_media: bool,
// Posting Settings
pub pow: u8,
pub set_client_tag: bool,
pub set_user_agent: bool,
pub delegatee_tag: String,
// UI settings
pub max_fps: u32,
pub recompute_feed_periodically: bool,
pub feed_recompute_interval_ms: u32,
pub theme: Theme1,
pub override_dpi: Option<u32>,
pub highlight_unread_events: bool,
pub posting_area_at_top: bool,
pub status_bar: bool,
pub image_resize_algorithm: String,
// Staletime settings
pub relay_list_becomes_stale_hours: u64,
pub metadata_becomes_stale_hours: u64,
pub nip05_becomes_stale_if_valid_hours: u64,
pub nip05_becomes_stale_if_invalid_minutes: u64,
pub avatar_becomes_stale_hours: u64,
pub media_becomes_stale_hours: u64,
// Websocket settings
pub max_websocket_message_size_kb: usize,
pub max_websocket_frame_size_kb: usize,
pub websocket_accept_unmasked_frames: bool,
pub websocket_connect_timeout_sec: u64,
pub websocket_ping_frequency_sec: u64,
// HTTP settings
pub fetcher_metadata_looptime_ms: u64,
pub fetcher_looptime_ms: u64,
pub fetcher_connect_timeout_sec: u64,
pub fetcher_timeout_sec: u64,
pub fetcher_max_requests_per_host: usize,
pub fetcher_host_exclusion_on_low_error_secs: u64,
pub fetcher_host_exclusion_on_med_error_secs: u64,
pub fetcher_host_exclusion_on_high_error_secs: u64,
pub nip11_lines_to_output_on_error: usize,
// Database settings
pub prune_period_days: u64,
}
impl Default for Settings2 {
fn default() -> Settings2 {
Settings2 {
// ID settings
public_key: None,
log_n: 18,
// Network settings
offline: false,
load_avatars: true,
load_media: true,
check_nip05: true,
automatically_fetch_metadata: true,
// Relay settings
num_relays_per_person: 2,
max_relays: 50,
// Feed settings
feed_chunk: 60 * 60 * 12, // 12 hours
replies_chunk: 60 * 60 * 24 * 7, // 1 week
person_feed_chunk: 60 * 60 * 24 * 30, // 1 month
overlap: 300, // 5 minutes
// Event Selection
reposts: true,
show_long_form: false,
show_mentions: true,
direct_messages: true,
future_allowance_secs: 60 * 15, // 15 minutes
// Event Content Settings
reactions: true,
enable_zap_receipts: true,
show_media: true,
// Posting settings
pow: 0,
set_client_tag: false,
set_user_agent: false,
delegatee_tag: String::new(),
// UI settings
max_fps: 12,
recompute_feed_periodically: true,
feed_recompute_interval_ms: 8000,
theme: Theme1 {
variant: ThemeVariant1::Default,
dark_mode: false,
follow_os_dark_mode: false,
},
override_dpi: None,
highlight_unread_events: true,
posting_area_at_top: true,
status_bar: false,
image_resize_algorithm: "CatmullRom".to_owned(),
// Staletime settings
relay_list_becomes_stale_hours: 8,
metadata_becomes_stale_hours: 8,
nip05_becomes_stale_if_valid_hours: 8,
nip05_becomes_stale_if_invalid_minutes: 30, // 30 minutes
avatar_becomes_stale_hours: 8,
media_becomes_stale_hours: 8,
// Websocket settings
max_websocket_message_size_kb: 1024, // 1 MB
max_websocket_frame_size_kb: 1024, // 1 MB
websocket_accept_unmasked_frames: false,
websocket_connect_timeout_sec: 15,
websocket_ping_frequency_sec: 55,
// HTTP settings
fetcher_metadata_looptime_ms: 3000,
fetcher_looptime_ms: 1800,
fetcher_connect_timeout_sec: 15,
fetcher_timeout_sec: 30,
fetcher_max_requests_per_host: 3,
fetcher_host_exclusion_on_low_error_secs: 30,
fetcher_host_exclusion_on_med_error_secs: 60,
fetcher_host_exclusion_on_high_error_secs: 600,
nip11_lines_to_output_on_error: 10,
// Database settings
prune_period_days: 30,
}
}
}
impl From<Settings1> for Settings2 {
fn from(old: Settings1) -> Settings2 {
Settings2 {
// ID settings
public_key: old.public_key,
// Network settings
offline: old.offline,
load_avatars: old.load_avatars,
load_media: old.load_media,
check_nip05: old.check_nip05,
automatically_fetch_metadata: old.automatically_fetch_metadata,
// Relay settings
num_relays_per_person: old.num_relays_per_person,
max_relays: old.max_relays,
// Feed settings
feed_chunk: old.feed_chunk,
replies_chunk: old.replies_chunk,
overlap: old.overlap,
// Event Selection
reposts: old.reposts,
show_long_form: old.show_long_form,
show_mentions: old.show_mentions,
direct_messages: old.direct_messages,
// Event Content Settings
reactions: old.reactions,
enable_zap_receipts: old.enable_zap_receipts,
show_media: old.show_media,
// Posting settings
pow: old.pow,
set_client_tag: old.set_client_tag,
set_user_agent: old.set_user_agent,
delegatee_tag: old.delegatee_tag,
// UI settings
max_fps: old.max_fps,
recompute_feed_periodically: old.recompute_feed_periodically,
feed_recompute_interval_ms: old.feed_recompute_interval_ms,
theme: old.theme,
override_dpi: old.override_dpi,
highlight_unread_events: old.highlight_unread_events,
posting_area_at_top: old.posting_area_at_top,
..Default::default()
}
}
}
impl Storage {
#[allow(dead_code)]
pub(crate) fn write_settings2<'a>(
&'a self,
settings: &Settings2,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let bytes = settings.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
self.general.put(txn, b"settings2", &bytes)?;
Ok(())
};
match rw_txn {
Some(txn) => f(txn)?,
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
#[allow(dead_code)]
pub(crate) fn read_settings2(&self) -> Result<Option<Settings2>, Error> {
let txn = self.env.read_txn()?;
match self.general.get(&txn, b"settings2")? {
None => Ok(None),
Some(bytes) => Ok(Some(Settings2::read_from_buffer(bytes)?)),
}
}
#[allow(dead_code)]
pub(crate) fn read_settings2_from_wrong_key(&self) -> Result<Option<Settings2>, Error> {
let txn = self.env.read_txn()?;
match self.general.get(&txn, b"settings")? {
None => Ok(None),
Some(bytes) => Ok(Some(Settings2::read_from_buffer(bytes)?)),
}
}
}

View File

@ -1,60 +0,0 @@
use crate::error::Error;
use crate::storage::Storage;
use heed::RwTxn;
use serde::{Deserialize, Serialize};
use speedy::{Readable, Writable};
// THIS IS HISTORICAL FOR MIGRATIONS AND THE STRUCTURES SHOULD NOT BE EDITED
// note: if we store anything inside the variants, we can't use macro_rules.
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, Readable, Writable)]
pub enum ThemeVariant1 {
Classic,
Default,
Roundy,
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Serialize, Deserialize, Readable, Writable)]
pub struct Theme1 {
pub variant: ThemeVariant1,
pub dark_mode: bool,
pub follow_os_dark_mode: bool,
}
impl ThemeVariant1 {
pub fn name(&self) -> &'static str {
match *self {
ThemeVariant1::Classic => "Classic",
ThemeVariant1::Default => "Default",
ThemeVariant1::Roundy => "Roundy",
}
}
}
impl Storage {
#[allow(dead_code)]
pub(crate) fn write_setting_theme1<'a>(
&'a self,
theme: &Theme1,
rw_txn: Option<&mut RwTxn<'a>>,
) -> Result<(), Error> {
let bytes = theme.write_to_vec()?;
let f = |txn: &mut RwTxn<'a>| -> Result<(), Error> {
Ok(self.general.put(txn, b"theme", &bytes)?)
};
match rw_txn {
Some(txn) => {
f(txn)?;
}
None => {
let mut txn = self.env.write_txn()?;
f(&mut txn)?;
txn.commit()?;
}
};
Ok(())
}
}