Use Bytes32 instead of std::string where possible, to reduce memory usage

This commit is contained in:
Doug Hoyte
2024-09-05 15:12:40 -04:00
parent 55fa4dc032
commit 169e633a08
7 changed files with 82 additions and 32 deletions

View File

@ -4,6 +4,7 @@
#include "golpe.h"
#include "Bytes32.h"
#include "Subscription.h"
#include "filters.h"
@ -27,8 +28,8 @@ struct ActiveMonitors : NonCopyable {
};
using MonitorSet = flat_hash_map<NostrFilter*, MonitorItem>;
btree_map<std::string, MonitorSet> allIds;
btree_map<std::string, MonitorSet> allAuthors;
btree_map<Bytes32, MonitorSet> allIds;
btree_map<Bytes32, MonitorSet> allAuthors;
btree_map<std::string, MonitorSet> allTags;
btree_map<uint64_t, MonitorSet> allKinds;
MonitorSet allOthers;
@ -116,15 +117,15 @@ struct ActiveMonitors : NonCopyable {
auto packed = PackedEventView(ev.buf);
{
auto id = std::string(packed.id());
processMonitorsExact(allIds, id, static_cast<std::function<bool(const std::string&)>>([&](const std::string &val){
Bytes32 id(packed.id());
processMonitorsExact(allIds, id, static_cast<std::function<bool(const Bytes32&)>>([&](const Bytes32 &val){
return id == val;
}));
}
{
auto pubkey = std::string(packed.pubkey());
processMonitorsExact(allAuthors, pubkey, static_cast<std::function<bool(const std::string&)>>([&](const std::string &val){
Bytes32 pubkey(packed.pubkey());
processMonitorsExact(allAuthors, pubkey, static_cast<std::function<bool(const Bytes32&)>>([&](const Bytes32 &val){
return pubkey == val;
}));
}
@ -167,12 +168,12 @@ struct ActiveMonitors : NonCopyable {
for (auto &f : m->sub.filterGroup.filters) {
if (f.ids) {
for (size_t i = 0; i < f.ids->size(); i++) {
auto res = allIds.try_emplace(f.ids->at(i));
auto res = allIds.try_emplace(Bytes32(f.ids->at(i)));
res.first->second.try_emplace(&f, MonitorItem{m, currEventId});
}
} else if (f.authors) {
for (size_t i = 0; i < f.authors->size(); i++) {
auto res = allAuthors.try_emplace(f.authors->at(i));
auto res = allAuthors.try_emplace(Bytes32(f.authors->at(i)));
res.first->second.try_emplace(&f, MonitorItem{m, currEventId});
}
} else if (f.tags.size()) {
@ -198,15 +199,17 @@ struct ActiveMonitors : NonCopyable {
for (auto &f : m->sub.filterGroup.filters) {
if (f.ids) {
for (size_t i = 0; i < f.ids->size(); i++) {
auto &monSet = allIds.at(f.ids->at(i));
Bytes32 id(f.ids->at(i));
auto &monSet = allIds.at(id);
monSet.erase(&f);
if (monSet.empty()) allIds.erase(f.ids->at(i));
if (monSet.empty()) allIds.erase(id);
}
} else if (f.authors) {
for (size_t i = 0; i < f.authors->size(); i++) {
auto &monSet = allAuthors.at(f.authors->at(i));
Bytes32 author(f.authors->at(i));
auto &monSet = allAuthors.at(author);
monSet.erase(&f);
if (monSet.empty()) allAuthors.erase(f.authors->at(i));
if (monSet.empty()) allAuthors.erase(author);
}
} else if (f.tags.size()) {
for (const auto &[tagName, filterSet] : f.tags) {
@ -219,9 +222,10 @@ struct ActiveMonitors : NonCopyable {
}
} else if (f.kinds) {
for (size_t i = 0; i < f.kinds->size(); i++) {
auto &monSet = allKinds.at(f.kinds->at(i));
uint64_t kind = f.kinds->at(i);
auto &monSet = allKinds.at(kind);
monSet.erase(&f);
if (monSet.empty()) allKinds.erase(f.kinds->at(i));
if (monSet.empty()) allKinds.erase(kind);
}
} else {
allOthers.erase(&f);