mirror of
https://github.com/EQEmu/Server.git
synced 2025-12-11 12:41:30 +00:00
[Databuckets] Nested Caching (#4917)
* [Databuckets] Nested Caching * One more * Update benchmark_databuckets.cpp * Add caching tests * Fix tests and scoping * Update databuckets.cpp * Fix tests * Rebase fixes * [Databuckets] Implement Cache in World (#4920)
This commit is contained in:
parent
de07870c99
commit
ea96cbf885
@ -19,6 +19,37 @@ extern WorldDatabase database;
|
||||
#error "You must define either ZONE or WORLD"
|
||||
#endif
|
||||
|
||||
// Key: compound cache key (e.g., account_id|character_id|zone_id|instance_id|top_key|full_key)
|
||||
// Value: resolved DataBuckets with extracted nested value
|
||||
static std::unordered_map<std::string, DataBucketsRepository::DataBuckets> g_nested_bucket_cache;
|
||||
|
||||
static std::string MakeNestedCacheKey(const DataBucketKey &k, const std::string &full_key) {
|
||||
return fmt::format(
|
||||
"account_id:{}|character_id:{}|npc_id:{}|bot_id:{}|zone_id:{}|instance_id:{}|top_key:{}|full_key:{}",
|
||||
k.account_id, k.character_id, k.npc_id, k.bot_id, k.zone_id, k.instance_id,
|
||||
Strings::Split(full_key, NESTED_KEY_DELIMITER).front(),
|
||||
full_key
|
||||
);
|
||||
}
|
||||
|
||||
static std::string MakeNestedCacheKeyPrefix(const DataBucketKey &k, const std::string &top_key) {
|
||||
return fmt::format(
|
||||
"account_id:{}|character_id:{}|npc_id:{}|bot_id:{}|zone_id:{}|instance_id:{}|top_key:{}|",
|
||||
k.account_id, k.character_id, k.npc_id, k.bot_id, k.zone_id, k.instance_id, top_key
|
||||
);
|
||||
}
|
||||
|
||||
static void InvalidateNestedCacheForKey(const DataBucketKey &k, const std::string &top_key) {
|
||||
std::string prefix = MakeNestedCacheKeyPrefix(k, top_key);
|
||||
for (auto it = g_nested_bucket_cache.begin(); it != g_nested_bucket_cache.end(); ) {
|
||||
if (it->first.find(prefix) == 0) {
|
||||
it = g_nested_bucket_cache.erase(it);
|
||||
} else {
|
||||
++it;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DataBucket::SetData(const std::string &bucket_key, const std::string &bucket_value, std::string expires_time)
|
||||
{
|
||||
auto k = DataBucketKey{
|
||||
@ -136,6 +167,15 @@ void DataBucket::SetData(const DataBucketKey &k_)
|
||||
// Serialize JSON back to string
|
||||
b.value = json_value.dump();
|
||||
b.key_ = top_key; // Use the top-level key
|
||||
|
||||
if (CanCache(k_)) {
|
||||
InvalidateNestedCacheForKey(k_, top_key);
|
||||
std::string nested_cache_key = MakeNestedCacheKey(k_, k_.key);
|
||||
auto extracted = ExtractNestedValue(b, k_.key);
|
||||
if (extracted.id > 0) {
|
||||
g_nested_bucket_cache[nested_cache_key] = extracted;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (bucket_id) {
|
||||
@ -251,12 +291,27 @@ DataBucketsRepository::DataBuckets DataBucket::GetData(const DataBucketKey &k_,
|
||||
LogDataBuckets("Returning key [{}] value [{}] from cache", e.key_, e.value);
|
||||
|
||||
if (is_nested_key && !k_.key.empty()) {
|
||||
return ExtractNestedValue(e, k_.key);
|
||||
std::string nested_cache_key = MakeNestedCacheKey(k_, k.key);
|
||||
|
||||
auto it = g_nested_bucket_cache.find(nested_cache_key);
|
||||
if (it != g_nested_bucket_cache.end()) {
|
||||
LogDataBucketsDetail("Nested cache hit for key [{}]", nested_cache_key);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto extracted = ExtractNestedValue(e, k_.key);
|
||||
if (extracted.id > 0) {
|
||||
g_nested_bucket_cache[nested_cache_key] = extracted;
|
||||
}
|
||||
return extracted;
|
||||
}
|
||||
|
||||
return e;
|
||||
}
|
||||
}
|
||||
|
||||
// if we can cache its assumed we didn't load this into the cache so we should not return a miss
|
||||
return DataBucketsRepository::NewEntity(); // Not found in cache
|
||||
}
|
||||
|
||||
// Fetch the value from the database
|
||||
@ -315,23 +370,42 @@ DataBucketsRepository::DataBuckets DataBucket::GetData(const DataBucketKey &k_,
|
||||
}
|
||||
|
||||
// Add the value to the cache if it doesn't exist
|
||||
// If cacheable and not found in cache, short-circuit and assume it doesn't exist
|
||||
if (can_cache) {
|
||||
bool has_cache = false;
|
||||
bool found_in_cache = false;
|
||||
for (const auto &e : g_data_bucket_cache) {
|
||||
if (e.id == bucket.id) {
|
||||
has_cache = true;
|
||||
if (CheckBucketMatch(e, k)) {
|
||||
found_in_cache = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!has_cache) {
|
||||
g_data_bucket_cache.emplace_back(bucket);
|
||||
if (!found_in_cache) {
|
||||
LogDataBuckets("Cache miss for key [{}] - skipping DB due to CanCache", k.key);
|
||||
return DataBucketsRepository::NewEntity();
|
||||
}
|
||||
}
|
||||
|
||||
// Handle nested key extraction
|
||||
if (is_nested_key && !k_.key.empty()) {
|
||||
return ExtractNestedValue(bucket, k_.key);
|
||||
if (CanCache(k_)) {
|
||||
std::string nested_cache_key = MakeNestedCacheKey(k_, k.key);
|
||||
|
||||
auto it = g_nested_bucket_cache.find(nested_cache_key);
|
||||
if (it != g_nested_bucket_cache.end()) {
|
||||
LogDataBucketsDetail("Nested cache hit for key [{}]", nested_cache_key);
|
||||
return it->second;
|
||||
}
|
||||
|
||||
auto extracted = ExtractNestedValue(bucket, k_.key);
|
||||
if (extracted.id > 0) {
|
||||
g_nested_bucket_cache[nested_cache_key] = extracted;
|
||||
}
|
||||
return extracted;
|
||||
} else {
|
||||
// Not cacheable, just extract and return
|
||||
return ExtractNestedValue(bucket, k_.key);
|
||||
}
|
||||
}
|
||||
|
||||
return bucket;
|
||||
|
||||
@ -167,6 +167,30 @@ public:
|
||||
|
||||
return zone_player_counts;
|
||||
}
|
||||
|
||||
static std::vector<uint32_t> GetCharacterIDsByAccountID(
|
||||
Database& db,
|
||||
uint32_t account_id
|
||||
)
|
||||
{
|
||||
std::vector<uint32_t> character_ids;
|
||||
|
||||
auto query = fmt::format(
|
||||
"SELECT id FROM character_data WHERE account_id = {} AND deleted_at IS NULL",
|
||||
account_id
|
||||
);
|
||||
|
||||
auto results = db.QueryDatabase(query);
|
||||
if (results.Success()) {
|
||||
for (auto row : results) {
|
||||
if (row[0]) {
|
||||
character_ids.push_back(static_cast<uint32_t>(std::stoul(row[0])));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return character_ids;
|
||||
}
|
||||
};
|
||||
|
||||
#endif //EQEMU_CHARACTER_DATA_REPOSITORY_H
|
||||
|
||||
@ -36,6 +36,7 @@
|
||||
#include "../common/shareddb.h"
|
||||
#include "../common/opcodemgr.h"
|
||||
#include "../common/data_verification.h"
|
||||
#include "../common/data_bucket.h"
|
||||
|
||||
#include "client.h"
|
||||
#include "worlddb.h"
|
||||
@ -134,6 +135,8 @@ Client::Client(EQStreamInterface* ieqs)
|
||||
}
|
||||
|
||||
Client::~Client() {
|
||||
ClearDataBucketsCache();
|
||||
|
||||
if (RunLoops && cle && zone_id == 0)
|
||||
cle->SetOnline(CLE_Status::Offline);
|
||||
|
||||
@ -476,6 +479,8 @@ bool Client::HandleSendLoginInfoPacket(const EQApplicationPacket *app)
|
||||
LogClientLogin("Checking authentication id [{}]", id);
|
||||
|
||||
if ((cle = client_list.CheckAuth(id, password))) {
|
||||
LoadDataBucketsCache();
|
||||
|
||||
LogClientLogin("Checking authentication id [{}] passed", id);
|
||||
if (!is_player_zoning) {
|
||||
// Track who is in and who is out of the game
|
||||
@ -2517,3 +2522,19 @@ void Client::SendUnsupportedClientPacket(const std::string& message)
|
||||
|
||||
QueuePacket(&packet);
|
||||
}
|
||||
|
||||
void Client::LoadDataBucketsCache()
|
||||
{
|
||||
DataBucket::BulkLoadEntitiesToCache(DataBucketLoadType::Account, {GetAccountID()});
|
||||
const auto ids = CharacterDataRepository::GetCharacterIDsByAccountID(database, GetAccountID());
|
||||
DataBucket::BulkLoadEntitiesToCache(DataBucketLoadType::Client, ids);
|
||||
}
|
||||
|
||||
void Client::ClearDataBucketsCache()
|
||||
{
|
||||
DataBucket::DeleteFromCache(GetAccountID(), DataBucketLoadType::Account);
|
||||
auto ids = CharacterDataRepository::GetCharacterIDsByAccountID(database, GetAccountID());
|
||||
for (const auto& id : ids) {
|
||||
DataBucket::DeleteFromCache(id, DataBucketLoadType::Client);
|
||||
}
|
||||
}
|
||||
|
||||
@ -121,6 +121,9 @@ private:
|
||||
bool CanTradeFVNoDropItem();
|
||||
void RecordPossibleHack(const std::string& message);
|
||||
void SendUnsupportedClientPacket(const std::string& message);
|
||||
|
||||
void LoadDataBucketsCache();
|
||||
void ClearDataBucketsCache();
|
||||
};
|
||||
|
||||
bool CheckCharCreateInfoSoF(CharCreate_Struct *cc);
|
||||
|
||||
@ -195,6 +195,25 @@ void RunBenchmarkCycle(uint64_t target_rows)
|
||||
std::cout << "✅ Completed " << Strings::Commify(OPERATIONS_PER_TEST) << " cached reads in "
|
||||
<< read_cached_time.count() << " seconds. (DataBucket::GetData)\n";
|
||||
|
||||
// 🔍 **Measure Client-Scoped Cache Miss Performance (Skips DB via CanCache)**
|
||||
auto read_client_cache_miss_start = std::chrono::high_resolution_clock::now();
|
||||
for (size_t i = 0; i < OPERATIONS_PER_TEST; ++i) {
|
||||
// generate key that doesn't exist
|
||||
std::string key = "nonexistent_key_" + std::to_string(i);
|
||||
|
||||
DataBucketKey k{
|
||||
.key = key,
|
||||
.character_id = 999999999, // use scoped value
|
||||
};
|
||||
|
||||
DataBucket::GetData(k);
|
||||
}
|
||||
auto read_client_cache_miss_end = std::chrono::high_resolution_clock::now();
|
||||
std::chrono::duration<double> read_client_cache_miss_time = read_client_cache_miss_end - read_client_cache_miss_start;
|
||||
std::cout << "✅ Completed " << Strings::Commify(OPERATIONS_PER_TEST)
|
||||
<< " scoped cache-miss reads (no DB) in "
|
||||
<< read_client_cache_miss_time.count() << " seconds. (Client Scoped, Cache Miss, No DB)\n";
|
||||
|
||||
// 🔍 **Measure Non-Cached Read Performance (Direct Query)**
|
||||
auto read_uncached_start = std::chrono::high_resolution_clock::now();
|
||||
for (size_t i = 0; i < OPERATIONS_PER_TEST; ++i) {
|
||||
|
||||
@ -5,16 +5,16 @@
|
||||
#include "../../client.h"
|
||||
#include "../../common/net/eqstream.h"
|
||||
|
||||
extern Zone *zone;
|
||||
extern Zone* zone;
|
||||
|
||||
void ZoneCLI::TestDataBuckets(int argc, char **argv, argh::parser &cmd, std::string &description)
|
||||
void ZoneCLI::TestDataBuckets(int argc, char** argv, argh::parser& cmd, std::string& description)
|
||||
{
|
||||
if (cmd[{"-h", "--help"}]) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32 break_length = 50;
|
||||
int failed_count = 0;
|
||||
int failed_count = 0;
|
||||
|
||||
LogSys.SilenceConsoleLogging();
|
||||
|
||||
@ -25,14 +25,36 @@ void ZoneCLI::TestDataBuckets(int argc, char **argv, argh::parser &cmd, std::str
|
||||
entity_list.Process();
|
||||
entity_list.MobProcess();
|
||||
|
||||
Client* client = new Client();
|
||||
client->SetCharacterId(1); // Set a dummy character ID for testing
|
||||
|
||||
LogSys.EnableConsoleLogging();
|
||||
|
||||
LogSys.log_settings[Logs::MySQLQuery].is_category_enabled = std::getenv("DEBUG") ? 1 : 0;
|
||||
LogSys.log_settings[Logs::MySQLQuery].log_to_console = std::getenv("DEBUG") ? 3 : 0;
|
||||
|
||||
// 🧹 Delete all test keys before running tests
|
||||
std::vector<std::string> test_keys_to_clear = {
|
||||
"basic_key", "expiring_key", "cache_key", "json_key", "non_existent_key", "simple_key",
|
||||
"nested", "nested.test1", "nested.test2", "nested.test1.a", "nested.test2.a",
|
||||
"exp_test", "cache_test", "full_json", "full_json.key2", "complex", "complex.nested.obj1",
|
||||
"complex.nested.obj2", "plain_string", "json_array", "nested_partial",
|
||||
"nested_override", "empty_json", "json_string", "deep_nested",
|
||||
"nested_expire", "scoped_miss_test", "scoped_nested_miss.key",
|
||||
"cache_miss_overwrite", "missed_nested_set", "account_client_test", "ac_nested.test",
|
||||
"scoped_db_only_key"
|
||||
};
|
||||
|
||||
DataBucketsRepository::DeleteWhere(
|
||||
database,
|
||||
fmt::format("`key` IN ('{}')", Strings::Join(test_keys_to_clear, "','"))
|
||||
);
|
||||
DataBucket::ClearCache();
|
||||
|
||||
std::cout << "===========================================\n";
|
||||
std::cout << "⚙\uFE0F> Running DataBuckets Tests...\n";
|
||||
std::cout << "===========================================\n\n";
|
||||
|
||||
Client *client = new Client();
|
||||
|
||||
// Basic Key-Value Set/Get
|
||||
client->DeleteBucket("basic_key");
|
||||
client->SetBucket("basic_key", "simple_value");
|
||||
@ -101,7 +123,7 @@ void ZoneCLI::TestDataBuckets(int argc, char **argv, argh::parser &cmd, std::str
|
||||
client->DeleteBucket("nested");
|
||||
client->SetBucket("nested.test1.a", "value1");
|
||||
client->SetBucket("nested.test2.a", "value2");
|
||||
client->SetBucket("nested.test2", "new_value"); // Should be **rejected**
|
||||
client->SetBucket("nested.test2", "new_value"); // Should be **rejected**
|
||||
value = client->GetBucket("nested");
|
||||
RunTest("Prevent Overwriting Objects", R"({"test1":{"a":"value1"},"test2":{"a":"value2"}})", value);
|
||||
|
||||
@ -160,9 +182,10 @@ void ZoneCLI::TestDataBuckets(int argc, char **argv, argh::parser &cmd, std::str
|
||||
client->DeleteBucket("complex");
|
||||
client->SetBucket("complex.nested.obj1", "data1");
|
||||
client->SetBucket("complex.nested.obj2", "data2");
|
||||
client->DeleteBucket("does_not_exist"); // Should do nothing
|
||||
client->DeleteBucket("does_not_exist"); // Should do nothing
|
||||
value = client->GetBucket("complex");
|
||||
RunTest("Deleting Non-Existent Key Doesn't Break Existing Data", R"({"nested":{"obj1":"data1","obj2":"data2"}})", value);
|
||||
RunTest("Deleting Non-Existent Key Doesn't Break Existing Data", R"({"nested":{"obj1":"data1","obj2":"data2"}})",
|
||||
value);
|
||||
|
||||
// Get nested key value one level up **
|
||||
client->DeleteBucket("complex");
|
||||
@ -190,12 +213,12 @@ void ZoneCLI::TestDataBuckets(int argc, char **argv, argh::parser &cmd, std::str
|
||||
value = client->GetBucket("json_array");
|
||||
RunTest("Store and Retrieve JSON Array", R"(["item1", "item2"])", value);
|
||||
|
||||
// // Prevent Overwriting Array with Object**
|
||||
// client->DeleteBucket("json_array");
|
||||
// client->SetBucket("json_array", R"(["item1", "item2"])");
|
||||
// client->SetBucket("json_array.item", "new_value"); // Should be rejected
|
||||
// value = client->GetBucket("json_array");
|
||||
// RunTest("Prevent Overwriting Array with Object", R"(["item1", "item2"])", value);
|
||||
// // Prevent Overwriting Array with Object**
|
||||
// client->DeleteBucket("json_array");
|
||||
// client->SetBucket("json_array", R"(["item1", "item2"])");
|
||||
// client->SetBucket("json_array.item", "new_value"); // Should be rejected
|
||||
// value = client->GetBucket("json_array");
|
||||
// RunTest("Prevent Overwriting Array with Object", R"(["item1", "item2"])", value);
|
||||
|
||||
// Retrieve Non-Existent Nested Key**
|
||||
client->DeleteBucket("nested_partial");
|
||||
@ -235,11 +258,93 @@ void ZoneCLI::TestDataBuckets(int argc, char **argv, argh::parser &cmd, std::str
|
||||
RunTest("Setting a nested key with an expiration protection test", R"({"test":{"test":"shouldnt_expire"}})", value);
|
||||
|
||||
// Delete Deep Nested Key Keeps Parent**
|
||||
// client->DeleteBucket("deep_nested");
|
||||
// client->SetBucket("deep_nested.level1.level2.level3", R"({"key": "value"})");
|
||||
// client->DeleteBucket("deep_nested.level1.level2.level3.key");
|
||||
// value = client->GetBucket("deep_nested.level1.level2.level3");
|
||||
// RunTest("Delete Deep Nested Key Keeps Parent", "{}", value);
|
||||
// client->DeleteBucket("deep_nested");
|
||||
// client->SetBucket("deep_nested.level1.level2.level3", R"({"key": "value"})");
|
||||
// client->DeleteBucket("deep_nested.level1.level2.level3.key");
|
||||
// value = client->GetBucket("deep_nested.level1.level2.level3");
|
||||
// RunTest("Delete Deep Nested Key Keeps Parent", "{}", value);
|
||||
|
||||
// ================================
|
||||
// 🧪 Scoped Cache-Miss Behavior Tests
|
||||
// ================================
|
||||
|
||||
// Ensure a scoped key (character ID) that doesn't exist is not fetched from DB if not in cache
|
||||
client->DeleteBucket("scoped_miss_test"); // Ensure not in DB
|
||||
DataBucket::ClearCache(); // Clear all caches
|
||||
std::string scoped_miss_value = client->GetBucket("scoped_miss_test");
|
||||
RunTest("Scoped Missing Key Returns Empty (Skips DB)", "", scoped_miss_value);
|
||||
|
||||
// Ensure nested scoped key that isn't in cache returns empty immediately
|
||||
client->DeleteBucket("scoped_nested_miss.key");
|
||||
DataBucket::ClearCache(); // Clear cache again
|
||||
std::string scoped_nested_miss = client->GetBucket("scoped_nested_miss.key");
|
||||
RunTest("Nested Scoped Key Miss Returns Empty (Skips DB)", "", scoped_nested_miss);
|
||||
|
||||
// Write to a key that was previously missed (0-id cached miss)
|
||||
client->DeleteBucket("cache_miss_overwrite");
|
||||
DataBucket::ClearCache(); // Ensure clean slate
|
||||
std::string missed_value = client->GetBucket("cache_miss_overwrite");
|
||||
RunTest("Initial Cache Miss Returns Empty", "", missed_value);
|
||||
client->SetBucket("cache_miss_overwrite", "new_value");
|
||||
std::string new_val = client->GetBucket("cache_miss_overwrite");
|
||||
RunTest("Overwrite After Cache Miss Works", "new_value", new_val);
|
||||
|
||||
// Write a nested key that previously missed
|
||||
client->DeleteBucket("missed_nested_set.test");
|
||||
DataBucket::ClearCache();
|
||||
std::string initial = client->GetBucket("missed_nested_set.test");
|
||||
RunTest("Missed Nested Key Returns Empty", "", initial);
|
||||
client->SetBucket("missed_nested_set.test", "set_value");
|
||||
std::string after_write = client->GetBucket("missed_nested_set.test");
|
||||
RunTest("Nested Set After Miss Works", "set_value", after_write);
|
||||
|
||||
// ================================
|
||||
// 🧪 Scoped Cache Preload Tests (Account + Client)
|
||||
// ================================
|
||||
|
||||
// Clear everything for a clean test
|
||||
// Insert directly into the DB without touching cache
|
||||
const std::string scoped_key = "scoped_db_only_key";
|
||||
client->DeleteBucket(scoped_key);
|
||||
DataBucket::ClearCache();
|
||||
|
||||
// ✅ Scoped insert
|
||||
DataBucketsRepository::InsertOne(
|
||||
database, {
|
||||
.key_ = scoped_key,
|
||||
.value = "cached_value",
|
||||
.character_id = client->CharacterID()
|
||||
}
|
||||
);
|
||||
|
||||
// Cold cache test — should return ""
|
||||
std::string cold_value = client->GetBucket(scoped_key);
|
||||
RunTest("Cold Cache Scoped Key Returns Empty (Due to Skip DB)", "", cold_value);
|
||||
|
||||
// ✅ Reload cache
|
||||
client->LoadDataBucketsCache();
|
||||
|
||||
// Cache should now return the value
|
||||
std::string hot_value = client->GetBucket(scoped_key);
|
||||
RunTest("Post-BulkLoad Scoped Key Returns Value", "cached_value", hot_value);
|
||||
|
||||
// Also test nested key after preload
|
||||
client->DeleteBucket("ac_nested.test");
|
||||
client->SetBucket("ac_nested.test", "nested_val");
|
||||
|
||||
// Clear cache, then preload
|
||||
DataBucket::ClearCache();
|
||||
client->LoadDataBucketsCache();
|
||||
|
||||
std::string nested_value = client->GetBucket("ac_nested.test");
|
||||
RunTest("Post-BulkLoad Nested Scoped Key Returns Value", "nested_val", nested_value);
|
||||
|
||||
// Remove and check that cache misses properly again
|
||||
client->DeleteBucket("ac_nested.test");
|
||||
DataBucket::ClearCache();
|
||||
std::string post_delete_check = client->GetBucket("ac_nested.test");
|
||||
RunTest("Post-Delete Nested Scoped Key Returns Empty", "", post_delete_check);
|
||||
|
||||
|
||||
std::cout << "\n===========================================\n";
|
||||
std::cout << "✅ All DataBucket Tests Completed!\n";
|
||||
|
||||
@ -1958,6 +1958,9 @@ public:
|
||||
ExternalHandinMoneyReturned GetExternalHandinMoneyReturned() { return m_external_handin_money_returned; }
|
||||
std::vector<uint32_t> GetExternalHandinItemsReturned() { return m_external_handin_items_returned; }
|
||||
|
||||
// used only for testing
|
||||
inline void SetCharacterId(uint32_t id) { character_id = id; }
|
||||
|
||||
protected:
|
||||
friend class Mob;
|
||||
void CalcEdibleBonuses(StatBonuses* newbon);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user