Newer
Older
// SPDX-FileCopyrightText: 2017 Konstantinos Sideris <siderisk@auth.gr>
// SPDX-FileCopyrightText: 2021 Nheko Contributors
//
// SPDX-License-Identifier: GPL-3.0-or-later
#include <limits>
#include <QCryptographicHash>
#if __has_include(<keychain.h>)
#include <keychain.h>
#else
#include <mtx/responses/common.hpp>
#include "UserSettingsPage.h"
//! Should be changed when a breaking change occurs in the cache format.
//! This will reset client's data.
static const std::string CURRENT_CACHE_FORMAT_VERSION("2020.10.20");
static const std::string SECRET("secret");
static const std::string_view NEXT_BATCH_KEY("next_batch");
static const std::string_view OLM_ACCOUNT_KEY("olm_account");
static const std::string_view CACHE_FORMAT_VERSION_KEY("cache_format_version");
constexpr size_t MAX_RESTORED_MESSAGES = 30'000;
constexpr auto DB_SIZE = 32ULL * 1024ULL * 1024ULL * 1024ULL; // 32 GB
//! Cache databases and their format.
//!
//! Contains UI information for the joined rooms. (i.e name, topic, avatar url etc).
//! Format: room_id -> RoomInfo
constexpr auto ROOMS_DB("rooms");
constexpr auto INVITES_DB("invites");
//! Information that must be kept between sync requests.
constexpr auto SYNC_STATE_DB("sync_state");
constexpr auto READ_RECEIPTS_DB("read_receipts");
constexpr auto NOTIFICATIONS_DB("sent_notifications");
//! Encryption related databases.
//! user_id -> list of devices
constexpr auto DEVICES_DB("devices");
//! device_id -> device keys
constexpr auto DEVICE_KEYS_DB("device_keys");
//! room_ids that have encryption enabled.
constexpr auto ENCRYPTED_ROOMS_DB("encrypted_rooms");
//! room_id -> pickled OlmInboundGroupSession
constexpr auto INBOUND_MEGOLM_SESSIONS_DB("inbound_megolm_sessions");
//! MegolmSessionIndex -> pickled OlmOutboundGroupSession
constexpr auto OUTBOUND_MEGOLM_SESSIONS_DB("outbound_megolm_sessions");
using CachedReceipts = std::multimap<uint64_t, std::string, std::greater<uint64_t>>;
using Receipts = std::map<std::string, std::map<std::string, uint64_t>>;
Q_DECLARE_METATYPE(RoomMember)
Q_DECLARE_METATYPE(mtx::responses::Timeline)
Q_DECLARE_METATYPE(RoomSearchResult)
Q_DECLARE_METATYPE(RoomInfo)
Q_DECLARE_METATYPE(mtx::responses::QueryKeys)
std::unique_ptr<Cache> instance_ = nullptr;
template<class T>
bool
containsStateUpdates(const T &e)
{
return std::visit([](const auto &ev) { return Cache::isStateEvent(ev); }, e);
}
bool
containsStateUpdates(const mtx::events::collections::StrippedEvents &e)
{
using namespace mtx::events;
using namespace mtx::events::state;
return std::holds_alternative<StrippedEvent<state::Avatar>>(e) ||
std::holds_alternative<StrippedEvent<CanonicalAlias>>(e) ||
std::holds_alternative<StrippedEvent<Name>>(e) ||
std::holds_alternative<StrippedEvent<Member>>(e) ||
std::holds_alternative<StrippedEvent<Topic>>(e);
}
bool
Cache::isHiddenEvent(lmdb::txn &txn,
mtx::events::collections::TimelineEvents e,
const std::string &room_id)
{
using namespace mtx::events;
// Always hide edits
if (mtx::accessors::relations(e).replaces())
return true;
if (auto encryptedEvent = std::get_if<EncryptedEvent<msg::Encrypted>>(&e)) {
MegolmSessionIndex index;
index.room_id = room_id;
index.session_id = encryptedEvent->content.session_id;
index.sender_key = encryptedEvent->content.sender_key;
auto result = olm::decryptEvent(index, *encryptedEvent);
if (!result.error)
e = result.event.value();
}
mtx::events::account_data::nheko_extensions::HiddenEvents hiddenEvents;
hiddenEvents.hidden_event_types = {
EventType::Reaction, EventType::CallCandidates, EventType::Unsupported};
if (auto temp = getAccountData(txn, mtx::events::EventType::NhekoHiddenEvents, ""))
hiddenEvents =
std::move(std::get<mtx::events::AccountDataEvent<
mtx::events::account_data::nheko_extensions::HiddenEvents>>(*temp)
.content);
if (auto temp = getAccountData(txn, mtx::events::EventType::NhekoHiddenEvents, room_id))
hiddenEvents =
std::move(std::get<mtx::events::AccountDataEvent<
mtx::events::account_data::nheko_extensions::HiddenEvents>>(*temp)
.content);
[hiddenEvents](const auto &ev) {
return std::any_of(hiddenEvents.hidden_event_types.begin(),
hiddenEvents.hidden_event_types.end(),
[ev](EventType type) { return type == ev.type; });
},
e);
}
Cache::Cache(const QString &userId, QObject *parent)
: QObject{parent}
, env_{nullptr}
{
setup();
connect(this, &Cache::userKeysUpdate, this, &Cache::updateUserKeys, Qt::QueuedConnection);
void
Cache::setup()
{
nhlog::db()->debug("setting up cache");
// Previous location of the cache directory
auto oldCache = QString("%1/%2%3")
.arg(QStandardPaths::writableLocation(QStandardPaths::CacheLocation))
.arg(QString::fromUtf8(localUserId_.toUtf8().toHex()))
.arg(QString::fromUtf8(settings->profile().toUtf8().toHex()));
cacheDirectory_ = QString("%1/%2%3")
.arg(QStandardPaths::writableLocation(QStandardPaths::AppDataLocation))
.arg(QString::fromUtf8(settings->profile().toUtf8().toHex()));
bool isInitial = !QFile::exists(cacheDirectory_);
// NOTE: If both cache directories exist it's better to do nothing: it
// could mean a previous migration failed or was interrupted.
bool needsMigration = isInitial && QFile::exists(oldCache);
if (needsMigration) {
nhlog::db()->info("found old state directory, migrating");
if (!QDir().rename(oldCache, cacheDirectory_)) {
throw std::runtime_error(("Unable to migrate the old state directory (" +
oldCache + ") to the new location (" +
cacheDirectory_ + ")")
.toStdString()
.c_str());
}
nhlog::db()->info("completed state migration");
}
env_ = lmdb::env::create();
env_.set_mapsize(DB_SIZE);
env_.set_max_dbs(MAX_DBS);
if (isInitial) {
Konstantinos Sideris
committed
nhlog::db()->info("initializing LMDB");
if (!QDir().mkpath(cacheDirectory_)) {
throw std::runtime_error(
("Unable to create state directory:" + cacheDirectory_)
.toStdString()
.c_str());
}
}
try {
// NOTE(Nico): We may want to use (MDB_MAPASYNC | MDB_WRITEMAP) in the future, but
// it can really mess up our database, so we shouldn't. For now, hopefully
// NOMETASYNC is fast enough.
env_.open(cacheDirectory_.toStdString().c_str(), MDB_NOMETASYNC | MDB_NOSYNC);
} catch (const lmdb::error &e) {
if (e.code() != MDB_VERSION_MISMATCH && e.code() != MDB_INVALID) {
throw std::runtime_error("LMDB initialization failed" +
std::string(e.what()));
}
Konstantinos Sideris
committed
nhlog::db()->warn("resetting cache due to LMDB version mismatch: {}", e.what());
QDir stateDir(cacheDirectory_);
for (const auto &file : stateDir.entryList(QDir::NoDotAndDotDot)) {
if (!stateDir.remove(file))
throw std::runtime_error(
("Unable to delete file " + file).toStdString().c_str());
}
env_.open(cacheDirectory_.toStdString().c_str());
}
auto txn = lmdb::txn::begin(env_);
syncStateDb_ = lmdb::dbi::open(txn, SYNC_STATE_DB, MDB_CREATE);
roomsDb_ = lmdb::dbi::open(txn, ROOMS_DB, MDB_CREATE);
invitesDb_ = lmdb::dbi::open(txn, INVITES_DB, MDB_CREATE);
readReceiptsDb_ = lmdb::dbi::open(txn, READ_RECEIPTS_DB, MDB_CREATE);
notificationsDb_ = lmdb::dbi::open(txn, NOTIFICATIONS_DB, MDB_CREATE);
// Device management
devicesDb_ = lmdb::dbi::open(txn, DEVICES_DB, MDB_CREATE);
deviceKeysDb_ = lmdb::dbi::open(txn, DEVICE_KEYS_DB, MDB_CREATE);
// Session management
inboundMegolmSessionDb_ = lmdb::dbi::open(txn, INBOUND_MEGOLM_SESSIONS_DB, MDB_CREATE);
outboundMegolmSessionDb_ = lmdb::dbi::open(txn, OUTBOUND_MEGOLM_SESSIONS_DB, MDB_CREATE);
txn.commit();
}
Cache::setEncryptedRoom(lmdb::txn &txn, const std::string &room_id)
Konstantinos Sideris
committed
nhlog::db()->info("mark room {} as encrypted", room_id);
auto db = lmdb::dbi::open(txn, ENCRYPTED_ROOMS_DB, MDB_CREATE);
}
bool
Cache::isRoomEncrypted(const std::string &room_id)
{
auto txn = lmdb::txn::begin(env_);
auto db = lmdb::dbi::open(txn, ENCRYPTED_ROOMS_DB, MDB_CREATE);
txn.commit();
return res;
}
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
std::optional<mtx::events::state::Encryption>
Cache::roomEncryptionSettings(const std::string &room_id)
{
using namespace mtx::events;
using namespace mtx::events::state;
try {
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
auto statesdb = getStatesDb(txn, room_id);
std::string_view event;
bool res =
statesdb.get(txn, to_string(mtx::events::EventType::RoomEncryption), event);
if (res) {
try {
StateEvent<Encryption> msg = json::parse(event);
return msg.content;
} catch (const json::exception &e) {
nhlog::db()->warn("failed to parse m.room.encryption event: {}",
e.what());
return Encryption{};
}
}
} catch (lmdb::error &) {
}
return std::nullopt;
}
mtx::crypto::ExportedSessionKeys
Cache::exportSessionKeys()
{
using namespace mtx::crypto;
ExportedSessionKeys keys;
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
auto cursor = lmdb::cursor::open(txn, inboundMegolmSessionDb_);
while (cursor.get(key, value, MDB_NEXT)) {
ExportedSession exported;
auto saved_session = unpickle<InboundSessionObject>(std::string(value), SECRET);
try {
index = nlohmann::json::parse(key).get<MegolmSessionIndex>();
} catch (const nlohmann::json::exception &e) {
nhlog::db()->critical("failed to export megolm session: {}", e.what());
continue;
}
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
exported.room_id = index.room_id;
exported.sender_key = index.sender_key;
exported.session_id = index.session_id;
exported.session_key = export_session(saved_session.get());
keys.sessions.push_back(exported);
}
cursor.close();
txn.commit();
return keys;
}
void
Cache::importSessionKeys(const mtx::crypto::ExportedSessionKeys &keys)
{
for (const auto &s : keys.sessions) {
MegolmSessionIndex index;
index.room_id = s.room_id;
index.session_id = s.session_id;
index.sender_key = s.sender_key;
auto exported_session = mtx::crypto::import_session(s.session_key);
saveInboundMegolmSession(index, std::move(exported_session));
ChatPage::instance()->receivedSessionKey(index.room_id, index.session_id);
//
// Session Management
//
void
Cache::saveInboundMegolmSession(const MegolmSessionIndex &index,
mtx::crypto::InboundGroupSessionPtr session)
{
using namespace mtx::crypto;
const auto key = json(index).dump();
const auto pickled = pickle<InboundSessionObject>(session.get(), SECRET);
auto txn = lmdb::txn::begin(env_);
inboundMegolmSessionDb_.put(txn, key, pickled);
txn.commit();
}
Cache::getInboundMegolmSession(const MegolmSessionIndex &index)
{
using namespace mtx::crypto;
try {
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
std::string key = json(index).dump();
if (inboundMegolmSessionDb_.get(txn, key, value)) {
auto session = unpickle<InboundSessionObject>(std::string(value), SECRET);
return session;
}
} catch (std::exception &e) {
nhlog::db()->error("Failed to get inbound megolm session {}", e.what());
}
return nullptr;
Cache::inboundMegolmSessionExists(const MegolmSessionIndex &index)
using namespace mtx::crypto;
try {
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
std::string key = json(index).dump();
return inboundMegolmSessionDb_.get(txn, key, value);
} catch (std::exception &e) {
nhlog::db()->error("Failed to get inbound megolm session {}", e.what());
}
return false;
Cache::updateOutboundMegolmSession(const std::string &room_id,
const OutboundGroupSessionData &data_,
mtx::crypto::OutboundGroupSessionPtr &ptr)
{
using namespace mtx::crypto;
if (!outboundMegolmSessionExists(room_id))
return;
OutboundGroupSessionData data = data_;
data.message_index = olm_outbound_group_session_message_index(ptr.get());
data.session_id = mtx::crypto::session_id(ptr.get());
data.session_key = mtx::crypto::session_key(ptr.get());
// Save the updated pickled data for the session.
json j;
j["data"] = data;
j["session"] = pickle<OutboundSessionObject>(ptr.get(), SECRET);
auto txn = lmdb::txn::begin(env_);
outboundMegolmSessionDb_.put(txn, room_id, j.dump());
txn.commit();
}
void
Cache::dropOutboundMegolmSession(const std::string &room_id)
{
using namespace mtx::crypto;
if (!outboundMegolmSessionExists(room_id))
return;
{
auto txn = lmdb::txn::begin(env_);
outboundMegolmSessionDb_.del(txn, room_id);
txn.commit();
}
}
Cache::saveOutboundMegolmSession(const std::string &room_id,
const OutboundGroupSessionData &data,
mtx::crypto::OutboundGroupSessionPtr &session)
{
using namespace mtx::crypto;
const auto pickled = pickle<OutboundSessionObject>(session.get(), SECRET);
json j;
j["data"] = data;
j["session"] = pickled;
auto txn = lmdb::txn::begin(env_);
outboundMegolmSessionDb_.put(txn, room_id, j.dump());
txn.commit();
}
bool
Cache::outboundMegolmSessionExists(const std::string &room_id) noexcept
try {
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
std::string_view value;
return outboundMegolmSessionDb_.get(txn, room_id, value);
} catch (std::exception &e) {
nhlog::db()->error("Failed to retrieve outbound Megolm Session: {}", e.what());
return false;
}
}
OutboundGroupSessionDataRef
Cache::getOutboundMegolmSession(const std::string &room_id)
try {
using namespace mtx::crypto;
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
std::string_view value;
outboundMegolmSessionDb_.get(txn, room_id, value);
auto obj = json::parse(value);
OutboundGroupSessionDataRef ref{};
ref.data = obj.at("data").get<OutboundGroupSessionData>();
ref.session = unpickle<OutboundSessionObject>(obj.at("session"), SECRET);
return ref;
} catch (std::exception &e) {
nhlog::db()->error("Failed to retrieve outbound Megolm Session: {}", e.what());
return {};
}
//
// OLM sessions.
//
Cache::saveOlmSession(const std::string &curve25519,
mtx::crypto::OlmSessionPtr session,
uint64_t timestamp)
{
using namespace mtx::crypto;
auto txn = lmdb::txn::begin(env_);
auto db = getOlmSessionsDb(txn, curve25519);
const auto pickled = pickle<SessionObject>(session.get(), SECRET);
const auto session_id = mtx::crypto::session_id(session.get());
StoredOlmSession stored_session;
stored_session.pickled_session = pickled;
stored_session.last_message_ts = timestamp;
db.put(txn, session_id, json(stored_session).dump());
Cache::getOlmSession(const std::string &curve25519, const std::string &session_id)
using namespace mtx::crypto;
auto txn = lmdb::txn::begin(env_);
auto db = getOlmSessionsDb(txn, curve25519);
std::string_view pickled;
bool found = db.get(txn, session_id, pickled);
txn.commit();
if (found) {
auto data = json::parse(pickled).get<StoredOlmSession>();
return unpickle<SessionObject>(data.pickled_session, SECRET);
std::optional<mtx::crypto::OlmSessionPtr>
Cache::getLatestOlmSession(const std::string &curve25519)
{
using namespace mtx::crypto;
auto txn = lmdb::txn::begin(env_);
auto db = getOlmSessionsDb(txn, curve25519);
std::string_view session_id, pickled_session;
std::optional<StoredOlmSession> currentNewest;
auto cursor = lmdb::cursor::open(txn, db);
while (cursor.get(session_id, pickled_session, MDB_NEXT)) {
auto data = json::parse(pickled_session).get<StoredOlmSession>();
if (!currentNewest || currentNewest->last_message_ts < data.last_message_ts)
currentNewest = data;
}
cursor.close();
txn.commit();
return currentNewest
? std::optional(unpickle<SessionObject>(currentNewest->pickled_session, SECRET))
: std::nullopt;
}
std::vector<std::string>
Cache::getOlmSessions(const std::string &curve25519)
using namespace mtx::crypto;
auto txn = lmdb::txn::begin(env_);
auto db = getOlmSessionsDb(txn, curve25519);
std::vector<std::string> res;
auto cursor = lmdb::cursor::open(txn, db);
while (cursor.get(session_id, unused, MDB_NEXT))
res.emplace_back(session_id);
cursor.close();
txn.commit();
return res;
}
void
Cache::saveOlmAccount(const std::string &data)
{
auto txn = lmdb::txn::begin(env_);
syncStateDb_.put(txn, OLM_ACCOUNT_KEY, data);
txn.commit();
}
std::string
Cache::restoreOlmAccount()
{
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
std::string_view pickled;
syncStateDb_.get(txn, OLM_ACCOUNT_KEY, pickled);
txn.commit();
return std::string(pickled.data(), pickled.size());
void
Cache::storeSecret(const std::string &name, const std::string &secret)
{
QKeychain::WritePasswordJob job(QCoreApplication::applicationName());
job.setAutoDelete(false);
job.setInsecureFallback(true);
QString(QCryptographicHash::hash(settings->profile().toUtf8(),
job.setTextData(QString::fromStdString(secret));
QEventLoop loop;
job.connect(&job, &QKeychain::Job::finished, &loop, &QEventLoop::quit);
job.start();
loop.exec();
if (job.error()) {
nhlog::db()->warn(
"Storing secret '{}' failed: {}", name, job.errorString().toStdString());
} else {
emit secretChanged(name);
}
}
void
Cache::deleteSecret(const std::string &name)
{
QKeychain::DeletePasswordJob job(QCoreApplication::applicationName());
job.setAutoDelete(false);
job.setInsecureFallback(true);
QString(QCryptographicHash::hash(settings->profile().toUtf8(),
QEventLoop loop;
job.connect(&job, &QKeychain::Job::finished, &loop, &QEventLoop::quit);
job.start();
loop.exec();
emit secretChanged(name);
}
std::optional<std::string>
Cache::secret(const std::string &name)
{
QKeychain::ReadPasswordJob job(QCoreApplication::applicationName());
job.setAutoDelete(false);
job.setInsecureFallback(true);
QString(QCryptographicHash::hash(settings->profile().toUtf8(),
QEventLoop loop;
job.connect(&job, &QKeychain::Job::finished, &loop, &QEventLoop::quit);
job.start();
loop.exec();
const QString secret = job.textData();
if (job.error()) {
nhlog::db()->debug(
"Restoring secret '{}' failed: {}", name, job.errorString().toStdString());
return std::nullopt;
}
if (secret.isEmpty()) {
nhlog::db()->debug("Restored empty secret '{}'.", name);
return std::nullopt;
}
return secret.toStdString();
}
void
Cache::removeInvite(lmdb::txn &txn, const std::string &room_id)
{
invitesDb_.del(txn, room_id);
getInviteStatesDb(txn, room_id).drop(txn, true);
getInviteMembersDb(txn, room_id).drop(txn, true);
Cache::removeInvite(const std::string &room_id)
Max Sandholm
committed
void
Cache::removeRoom(lmdb::txn &txn, const std::string &roomid)
Max Sandholm
committed
{
roomsDb_.del(txn, roomid);
getStatesDb(txn, roomid).drop(txn, true);
getAccountDataDb(txn, roomid).drop(txn, true);
getMembersDb(txn, roomid).drop(txn, true);
Max Sandholm
committed
}
{
auto txn = lmdb::txn::begin(env_, nullptr, 0);
Cache::setNextBatchToken(lmdb::txn &txn, const std::string &token)
syncStateDb_.put(txn, NEXT_BATCH_KEY, token);
Cache::setNextBatchToken(lmdb::txn &txn, const QString &token)
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
bool res = syncStateDb_.get(txn, NEXT_BATCH_KEY, token);
txn.commit();
return res;
if (!env_.handle())
throw lmdb::error("Env already closed", MDB_INVALID);
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
bool result = syncStateDb_.get(txn, NEXT_BATCH_KEY, token);
txn.commit();
if (result)
return std::string(token.data(), token.size());
else
return "";
void
Cache::deleteData()
{
// TODO: We need to remove the env_ while not accepting new requests.
lmdb::dbi_close(env_, syncStateDb_);
lmdb::dbi_close(env_, roomsDb_);
lmdb::dbi_close(env_, invitesDb_);
lmdb::dbi_close(env_, readReceiptsDb_);
lmdb::dbi_close(env_, notificationsDb_);
lmdb::dbi_close(env_, devicesDb_);
lmdb::dbi_close(env_, deviceKeysDb_);
lmdb::dbi_close(env_, inboundMegolmSessionDb_);
lmdb::dbi_close(env_, outboundMegolmSessionDb_);
env_.close();
verification_storage.status.clear();
if (!cacheDirectory_.isEmpty()) {
QDir(cacheDirectory_).removeRecursively();
Konstantinos Sideris
committed
nhlog::db()->info("deleted cache files from disk");
deleteSecret(mtx::secret_storage::secrets::megolm_backup_v1);
deleteSecret(mtx::secret_storage::secrets::cross_signing_master);
deleteSecret(mtx::secret_storage::secrets::cross_signing_user_signing);
deleteSecret(mtx::secret_storage::secrets::cross_signing_self_signing);
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);
std::string_view current_version;
bool res = syncStateDb_.get(txn, CACHE_FORMAT_VERSION_KEY, current_version);
txn.commit();
if (!res)
return false;
std::string stored_version(current_version.data(), current_version.size());
std::vector<std::pair<std::string, std::function<bool()>>> migrations{
{"2020.05.01",
[this]() {
try {
auto txn = lmdb::txn::begin(env_, nullptr);
auto pending_receipts =
lmdb::dbi::open(txn, "pending_receipts", MDB_CREATE);
lmdb::dbi_drop(txn, pending_receipts, true);
txn.commit();
} catch (const lmdb::error &) {
nhlog::db()->critical(
"Failed to delete pending_receipts database in migration!");
return false;
}
nhlog::db()->info("Successfully deleted pending receipts database.");
return true;
}},
{"2020.07.05",
[this]() {
try {
auto txn = lmdb::txn::begin(env_, nullptr);
auto room_ids = getRoomIds(txn);
for (const auto &room_id : room_ids) {
try {
auto messagesDb = lmdb::dbi::open(
txn, std::string(room_id + "/messages").c_str());
// keep some old messages and batch token
{
auto roomsCursor =
lmdb::cursor::open(txn, messagesDb);
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
bool start = true;
mtx::responses::Timeline oldMessages;
while (roomsCursor.get(ts,
stored_message,
start ? MDB_FIRST
: MDB_NEXT)) {
start = false;
auto j = json::parse(std::string_view(
stored_message.data(),
stored_message.size()));
if (oldMessages.prev_batch.empty())
oldMessages.prev_batch =
j["token"].get<std::string>();
else if (j["token"] !=
oldMessages.prev_batch)
break;
mtx::events::collections::TimelineEvent
te;
mtx::events::collections::from_json(
j["event"], te);
oldMessages.events.push_back(te.data);
}
// messages were stored in reverse order, so we
// need to reverse them
std::reverse(oldMessages.events.begin(),
oldMessages.events.end());
// save messages using the new method
saveTimelineMessages(txn, room_id, oldMessages);
}
// delete old messages db
lmdb::dbi_drop(txn, messagesDb, true);
} catch (std::exception &e) {
nhlog::db()->error(
"While migrating messages from {}, ignoring error {}",
room_id,
e.what());
}
}
txn.commit();
} catch (const lmdb::error &) {
nhlog::db()->critical(
"Failed to delete messages database in migration!");
return false;
}
nhlog::db()->info("Successfully deleted pending receipts database.");
return true;
}},
{"2020.10.20",
[this]() {
try {
using namespace mtx::crypto;
auto txn = lmdb::txn::begin(env_);
auto mainDb = lmdb::dbi::open(txn, nullptr);
auto olmDbCursor = lmdb::cursor::open(txn, mainDb);
while (olmDbCursor.get(dbName, ignored, MDB_NEXT)) {
// skip every db but olm session dbs
nhlog::db()->debug("Db {}", dbName);
if (dbName.find("olm_sessions/") != 0)
continue;
nhlog::db()->debug("Migrating {}", dbName);
auto olmDb = lmdb::dbi::open(txn, std::string(dbName).c_str());
std::string_view session_id, session_value;
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
std::vector<std::pair<std::string, StoredOlmSession>> sessions;
auto cursor = lmdb::cursor::open(txn, olmDb);
while (cursor.get(session_id, session_value, MDB_NEXT)) {
nhlog::db()->debug("session_id {}, session_value {}",
session_id,
session_value);
StoredOlmSession session;
bool invalid = false;
for (auto c : session_value)
if (!isprint(c)) {
invalid = true;
break;
}
if (invalid)
continue;
nhlog::db()->debug("Not skipped");
session.pickled_session = session_value;
sessions.emplace_back(session_id, session);
}
cursor.close();
olmDb.drop(txn, true);
newDbName.erase(0, sizeof("olm_sessions") - 1);
newDbName = "olm_sessions.v2" + newDbName;
auto newDb = lmdb::dbi::open(txn, newDbName.c_str(), MDB_CREATE);
for (const auto &[key, value] : sessions) {
// nhlog::db()->debug("{}\n{}", key, json(value).dump());
newDb.put(txn, key, json(value).dump());
}
}
olmDbCursor.close();
txn.commit();
} catch (const lmdb::error &) {
nhlog::db()->critical("Failed to migrate olm sessions,");
return false;
}
nhlog::db()->info("Successfully migrated olm sessions.");
return true;
}},
nhlog::db()->info("Running migrations, this may take a while!");
for (const auto &[target_version, migration] : migrations) {
if (target_version > stored_version)
if (!migration()) {
nhlog::db()->critical("migration failure!");
return false;
}
}
nhlog::db()->info("Migrations finished.");
return true;
}
cache::CacheVersion
Cache::formatVersion()
{
auto txn = lmdb::txn::begin(env_, nullptr, MDB_RDONLY);