mirror of
https://github.com/EQEmu/Server.git
synced 2025-12-11 16:51:29 +00:00
[Databuckets] Nested Databuckets Protections and Improvements (#4748)
* Check for valid JSON before using it * Do not allow nested keys to set be set an expiration * Prevent overwriting of existing object or array * Nested deletion support * Update data_bucket.cpp * Test cases * More test cases, fix * Update databuckets.cpp * Update databuckets.cpp * Basic databucket tests * Update databuckets.cpp * Update databuckets.cpp
This commit is contained in:
parent
3638d157b2
commit
0615864d51
@ -936,3 +936,11 @@ std::string Strings::Slugify(const std::string& input, const std::string& separa
|
||||
|
||||
return slug;
|
||||
}
|
||||
|
||||
bool Strings::IsValidJson(const std::string &json)
|
||||
{
|
||||
rapidjson::Document doc;
|
||||
rapidjson::ParseResult result = doc.Parse(json.c_str());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -45,6 +45,7 @@
|
||||
#include <type_traits>
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <cereal/external/rapidjson/document.h>
|
||||
|
||||
#ifndef _WIN32
|
||||
// this doesn't appear to affect linux-based systems..need feedback for _WIN64
|
||||
@ -188,6 +189,7 @@ public:
|
||||
}
|
||||
|
||||
static std::string Slugify(const std::string &input, const std::string &separator = "-");
|
||||
static bool IsValidJson(const std::string& json);
|
||||
};
|
||||
|
||||
const std::string StringFormat(const char *format, ...);
|
||||
|
||||
@ -56,8 +56,9 @@ echo "# Running shared_memory"
|
||||
echo "# Running NPC hand-in tests"
|
||||
./bin/zone tests:npc-handins 2>&1 | tee test_output.log
|
||||
./bin/zone tests:npc-handins-multiquest 2>&1 | tee -a test_output.log
|
||||
./bin/zone tests:databuckets 2>&1 | tee -a test_output.log
|
||||
|
||||
if grep -E -q "QueryErr|Error" test_output.log; then
|
||||
if grep -E -q "QueryErr|Error|FAILED" test_output.log; then
|
||||
echo "Error found in test output! Failing build."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
259
zone/cli/databuckets.cpp
Normal file
259
zone/cli/databuckets.cpp
Normal file
@ -0,0 +1,259 @@
|
||||
#include "../../common/http/httplib.h"
|
||||
#include "../../common/eqemu_logsys.h"
|
||||
#include "../../common/platform.h"
|
||||
#include "../zone.h"
|
||||
#include "../client.h"
|
||||
#include "../../common/net/eqstream.h"
|
||||
|
||||
extern Zone *zone;
|
||||
|
||||
void RunTest(const std::string &test_name, const std::string &expected, const std::string &actual)
|
||||
{
|
||||
if (expected == actual) {
|
||||
std::cout << "[✅] " << test_name << " PASSED\n";
|
||||
} else {
|
||||
std::cerr << "[❌] " << test_name << " FAILED\n";
|
||||
std::cerr << " 📌 Expected: " << expected << "\n";
|
||||
std::cerr << " ❌ Got: " << actual << "\n";
|
||||
std::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
void ZoneCLI::DataBuckets(int argc, char **argv, argh::parser &cmd, std::string &description)
|
||||
{
|
||||
if (cmd[{"-h", "--help"}]) {
|
||||
return;
|
||||
}
|
||||
|
||||
uint32 break_length = 50;
|
||||
int failed_count = 0;
|
||||
|
||||
LogSys.SilenceConsoleLogging();
|
||||
|
||||
// boot shell zone for testing
|
||||
Zone::Bootup(ZoneID("qrg"), 0, false);
|
||||
zone->StopShutdownTimer();
|
||||
|
||||
entity_list.Process();
|
||||
entity_list.MobProcess();
|
||||
|
||||
LogSys.EnableConsoleLogging();
|
||||
|
||||
std::cout << "===========================================\n";
|
||||
std::cout << "Running DataBuckets Tests...\n";
|
||||
std::cout << "===========================================\n\n";
|
||||
|
||||
Client *client = new Client();
|
||||
|
||||
// Basic Key-Value Set/Get
|
||||
client->DeleteBucket("basic_key");
|
||||
client->SetBucket("basic_key", "simple_value");
|
||||
std::string value = client->GetBucket("basic_key");
|
||||
RunTest("Basic Key-Value Set/Get", "simple_value", value);
|
||||
|
||||
// Overwriting a Key
|
||||
client->SetBucket("basic_key", "new_value");
|
||||
value = client->GetBucket("basic_key");
|
||||
RunTest("Overwriting a Key", "new_value", value);
|
||||
|
||||
// Deleting a Key
|
||||
client->DeleteBucket("basic_key");
|
||||
value = client->GetBucket("basic_key");
|
||||
RunTest("Deleting a Key", "", value);
|
||||
|
||||
// Setting a Key with an Expiration
|
||||
client->SetBucket("expiring_key", "expires_soon", "S1");
|
||||
value = client->GetBucket("expiring_key");
|
||||
RunTest("Setting a Key with an Expiration", "expires_soon", value);
|
||||
|
||||
// Ensure Expired Key is Deleted
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
value = client->GetBucket("expiring_key");
|
||||
RunTest("Ensure Expired Key is Deleted", "", value);
|
||||
|
||||
// Cache Read/Write Consistency
|
||||
client->SetBucket("cache_key", "cached_value");
|
||||
value = client->GetBucket("cache_key");
|
||||
RunTest("Cache Read/Write Consistency", "cached_value", value);
|
||||
|
||||
// Cache Clears on Key Deletion
|
||||
client->DeleteBucket("cache_key");
|
||||
value = client->GetBucket("cache_key");
|
||||
RunTest("Cache Clears on Key Deletion", "", value);
|
||||
|
||||
// Setting a Full JSON String
|
||||
client->SetBucket("json_key", R"({"key1":"value1","key2":"value2"})");
|
||||
value = client->GetBucket("json_key");
|
||||
RunTest("Setting a Full JSON String", R"({"key1":"value1","key2":"value2"})", value);
|
||||
|
||||
// Overwriting JSON with a Simple String
|
||||
client->SetBucket("json_key", "string_value");
|
||||
value = client->GetBucket("json_key");
|
||||
RunTest("Overwriting JSON with a Simple String", "string_value", value);
|
||||
|
||||
// Deleting Non-Existent Key
|
||||
client->DeleteBucket("non_existent_key");
|
||||
value = client->GetBucket("non_existent_key");
|
||||
RunTest("Deleting Non-Existent Key", "", value);
|
||||
|
||||
// Basic Key-Value Storage**
|
||||
client->DeleteBucket("simple_key"); // Reset
|
||||
client->SetBucket("simple_key", "simple_value");
|
||||
value = client->GetBucket("simple_key");
|
||||
RunTest("Basic Key-Value Set/Get", "simple_value", value);
|
||||
|
||||
// Nested Key Storage**
|
||||
client->DeleteBucket("nested");
|
||||
client->SetBucket("nested.test1", "value1");
|
||||
client->SetBucket("nested.test2", "value2");
|
||||
value = client->GetBucket("nested");
|
||||
RunTest("Nested Key Set/Get", R"({"test1":"value1","test2":"value2"})", value);
|
||||
|
||||
// Prevent Overwriting Objects**
|
||||
client->DeleteBucket("nested");
|
||||
client->SetBucket("nested.test1.a", "value1");
|
||||
client->SetBucket("nested.test2.a", "value2");
|
||||
client->SetBucket("nested.test2", "new_value"); // Should be **rejected**
|
||||
value = client->GetBucket("nested");
|
||||
RunTest("Prevent Overwriting Objects", R"({"test1":{"a":"value1"},"test2":{"a":"value2"}})", value);
|
||||
|
||||
// Deleting a Specific Nested Key**
|
||||
client->DeleteBucket("nested");
|
||||
client->SetBucket("nested.test1", "value1");
|
||||
client->SetBucket("nested.test2", "value2");
|
||||
client->DeleteBucket("nested.test1");
|
||||
value = client->GetBucket("nested");
|
||||
RunTest("Delete Nested Key", R"({"test2":"value2"})", value);
|
||||
|
||||
// Deleting the Entire Parent Key**
|
||||
client->DeleteBucket("nested");
|
||||
value = client->GetBucket("nested");
|
||||
RunTest("Delete Parent Key", "", value);
|
||||
|
||||
// Expiration is Ignored for Nested Keys**
|
||||
client->DeleteBucket("exp_test");
|
||||
client->SetBucket("exp_test.nested", "data", "S20"); // Expiration ignored
|
||||
value = client->GetBucket("exp_test");
|
||||
RunTest("Expiration Ignored for Nested Keys", R"({"nested":"data"})", value);
|
||||
|
||||
// Cache Behavior**
|
||||
client->DeleteBucket("cache_test");
|
||||
client->SetBucket("cache_test", "cache_value");
|
||||
value = client->GetBucket("cache_test");
|
||||
RunTest("Cache Read/Write Consistency", "cache_value", value);
|
||||
|
||||
// Ensure Deleting Parent Key Clears Cache**
|
||||
client->DeleteBucket("cache_test");
|
||||
value = client->GetBucket("cache_test");
|
||||
RunTest("Cache Clears on Parent Delete", "", value);
|
||||
|
||||
// Setting an Entire JSON Object**
|
||||
client->DeleteBucket("full_json");
|
||||
client->SetBucket("full_json", R"({"key1":"value1","key2":{"subkey":"subvalue"}})");
|
||||
value = client->GetBucket("full_json");
|
||||
RunTest("Set and Retrieve Full JSON Structure", R"({"key1":"value1","key2":{"subkey":"subvalue"}})", value);
|
||||
|
||||
// Partial Nested Key Deletion within JSON**
|
||||
client->DeleteBucket("full_json");
|
||||
client->SetBucket("full_json", R"({"key1":"value1","key2":{"subkey":"subvalue"}})");
|
||||
client->DeleteBucket("full_json.key2");
|
||||
value = client->GetBucket("full_json");
|
||||
RunTest("Delete Nested Key within JSON", R"({"key1":"value1"})", value);
|
||||
|
||||
// Ensure Object Protection on Overwrite Attempt**
|
||||
client->DeleteBucket("complex");
|
||||
client->SetBucket("complex.nested.obj1", "data1");
|
||||
client->SetBucket("complex.nested.obj2", "data2");
|
||||
client->SetBucket("complex.nested", "overwrite_attempt"); // Should be rejected
|
||||
value = client->GetBucket("complex");
|
||||
RunTest("Ensure Object Protection on Overwrite Attempt", R"({"nested":{"obj1":"data1","obj2":"data2"}})", value);
|
||||
|
||||
// Deleting Non-Existent Key Doesn't Break Existing Data**
|
||||
client->DeleteBucket("complex");
|
||||
client->SetBucket("complex.nested.obj1", "data1");
|
||||
client->SetBucket("complex.nested.obj2", "data2");
|
||||
client->DeleteBucket("does_not_exist"); // Should do nothing
|
||||
value = client->GetBucket("complex");
|
||||
RunTest("Deleting Non-Existent Key Doesn't Break Existing Data", R"({"nested":{"obj1":"data1","obj2":"data2"}})", value);
|
||||
|
||||
// Get nested key value one level up **
|
||||
client->DeleteBucket("complex");
|
||||
client->SetBucket("complex.nested.obj1", "data1");
|
||||
client->SetBucket("complex.nested.obj2", "data2");
|
||||
value = client->GetBucket("complex.nested");
|
||||
RunTest("Get nested key value", R"({"obj1":"data1","obj2":"data2"})", value);
|
||||
|
||||
// Get nested key value deep **
|
||||
client->DeleteBucket("complex");
|
||||
client->SetBucket("complex.nested.obj1", "data1");
|
||||
client->SetBucket("complex.nested.obj2", "data2");
|
||||
value = client->GetBucket("complex.nested.obj2");
|
||||
RunTest("Get nested key value deep", R"(data2)", value);
|
||||
|
||||
// Retrieve Nested Key from Plain String**
|
||||
client->DeleteBucket("plain_string");
|
||||
client->SetBucket("plain_string", "some_value");
|
||||
value = client->GetBucket("plain_string.nested");
|
||||
RunTest("Retrieve Nested Key from Plain String", "", value);
|
||||
|
||||
// Store and Retrieve JSON Array**
|
||||
client->DeleteBucket("json_array");
|
||||
client->SetBucket("json_array", R"(["item1", "item2"])");
|
||||
value = client->GetBucket("json_array");
|
||||
RunTest("Store and Retrieve JSON Array", R"(["item1", "item2"])", value);
|
||||
|
||||
// // Prevent Overwriting Array with Object**
|
||||
// client->DeleteBucket("json_array");
|
||||
// client->SetBucket("json_array", R"(["item1", "item2"])");
|
||||
// client->SetBucket("json_array.item", "new_value"); // Should be rejected
|
||||
// value = client->GetBucket("json_array");
|
||||
// RunTest("Prevent Overwriting Array with Object", R"(["item1", "item2"])", value);
|
||||
|
||||
// Retrieve Non-Existent Nested Key**
|
||||
client->DeleteBucket("nested_partial");
|
||||
client->SetBucket("nested_partial.level1", R"({"exists": "yes"})");
|
||||
value = client->GetBucket("nested_partial.level1.non_existent");
|
||||
RunTest("Retrieve Non-Existent Nested Key", "", value);
|
||||
|
||||
// Overwriting Parent Key Deletes Children**
|
||||
client->DeleteBucket("nested_override");
|
||||
client->SetBucket("nested_override.child", "data");
|
||||
client->SetBucket("nested_override", "new_parent_value"); // Should remove `child`
|
||||
value = client->GetBucket("nested_override");
|
||||
RunTest("Overwriting Parent Key Deletes Children", "new_parent_value", value);
|
||||
|
||||
// Store and Retrieve Empty JSON Object**
|
||||
client->DeleteBucket("empty_json");
|
||||
client->SetBucket("empty_json", R"({})");
|
||||
value = client->GetBucket("empty_json");
|
||||
RunTest("Store and Retrieve Empty JSON Object", R"({})", value);
|
||||
|
||||
// Store and Retrieve JSON String**
|
||||
client->DeleteBucket("json_string");
|
||||
client->SetBucket("json_string", R"("this is a string")");
|
||||
value = client->GetBucket("json_string");
|
||||
RunTest("Store and Retrieve JSON String", R"("this is a string")", value);
|
||||
|
||||
// Deeply Nested Key Retrieval**
|
||||
client->DeleteBucket("deep_nested");
|
||||
client->SetBucket("deep_nested.level1.level2.level3.level4.level5", "final_value");
|
||||
value = client->GetBucket("deep_nested.level1.level2.level3.level4.level5");
|
||||
RunTest("Deeply Nested Key Retrieval", "final_value", value);
|
||||
|
||||
// Setting a Key with an Expiration
|
||||
client->SetBucket("nested_expire.test.test", "shouldnt_expire", "S1");
|
||||
value = client->GetBucket("nested_expire");
|
||||
std::this_thread::sleep_for(std::chrono::seconds(2));
|
||||
RunTest("Setting a nested key with an expiration protection test", R"({"test":{"test":"shouldnt_expire"}})", value);
|
||||
|
||||
// Delete Deep Nested Key Keeps Parent**
|
||||
// client->DeleteBucket("deep_nested");
|
||||
// client->SetBucket("deep_nested.level1.level2.level3", R"({"key": "value"})");
|
||||
// client->DeleteBucket("deep_nested.level1.level2.level3.key");
|
||||
// value = client->GetBucket("deep_nested.level1.level2.level3");
|
||||
// RunTest("Delete Deep Nested Key Keeps Parent", "{}", value);
|
||||
|
||||
std::cout << "\n===========================================\n";
|
||||
std::cout << "✅ All DataBucket Tests Completed!\n";
|
||||
std::cout << "===========================================\n";
|
||||
}
|
||||
@ -27,7 +27,8 @@ void DataBucket::SetData(const std::string &bucket_key, const std::string &bucke
|
||||
void DataBucket::SetData(const DataBucketKey &k_)
|
||||
{
|
||||
DataBucketKey k = k_; // copy the key so we can modify it
|
||||
if (k.key.find(NESTED_KEY_DELIMITER) != std::string::npos) {
|
||||
bool is_nested = k.key.find(NESTED_KEY_DELIMITER) != std::string::npos;
|
||||
if (is_nested) {
|
||||
k.key = Strings::Split(k.key, NESTED_KEY_DELIMITER).front();
|
||||
}
|
||||
|
||||
@ -63,6 +64,10 @@ void DataBucket::SetData(const DataBucketKey &k_)
|
||||
if (isalpha(k.expires[0]) || isalpha(k.expires[k.expires.length() - 1])) {
|
||||
expires_time_unix = static_cast<int64>(std::time(nullptr)) + Strings::TimeToSeconds(k.expires);
|
||||
}
|
||||
if (is_nested) {
|
||||
LogDataBuckets("Nested keys can't expire; set expiration on the parent key");
|
||||
expires_time_unix = 0;
|
||||
}
|
||||
}
|
||||
|
||||
b.expires = expires_time_unix;
|
||||
@ -75,26 +80,45 @@ void DataBucket::SetData(const DataBucketKey &k_)
|
||||
std::string existing_value = r.id > 0 ? r.value : "{}";
|
||||
json json_value = json::object();
|
||||
|
||||
try {
|
||||
json_value = json::parse(existing_value);
|
||||
} catch (json::parse_error &e) {
|
||||
LogDataBucketsDetail("Failed to parse JSON for key [{}]: {}", k_.key, e.what());
|
||||
json_value = json::object(); // Reset to an empty object on error
|
||||
// Check if the JSON is valid
|
||||
if (Strings::IsValidJson(existing_value)) {
|
||||
try {
|
||||
json_value = json::parse(existing_value);
|
||||
} catch (json::parse_error &e) {
|
||||
LogDataBuckets("Failed to parse JSON for key [{}] [{}]", k_.key, e.what());
|
||||
json_value = json::object(); // Reset to an empty object on error
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively merge new key-value pair into the JSON object
|
||||
auto nested_keys = Strings::Split(k_.key, NESTED_KEY_DELIMITER);
|
||||
auto top_key = nested_keys.front();
|
||||
// remove the top-level key
|
||||
nested_keys.erase(nested_keys.begin());
|
||||
|
||||
json *current = &json_value;
|
||||
|
||||
for (size_t i = 0; i < nested_keys.size(); ++i) {
|
||||
const std::string &key_part = nested_keys[i];
|
||||
|
||||
if (i == nested_keys.size() - 1) {
|
||||
|
||||
LogDataBucketsDetail("Setting key [{}] key_part [{}]", k.key, key_part);
|
||||
|
||||
// If the key already exists and is an object or array, prevent overwriting to avoid data loss
|
||||
if (current->contains(key_part) &&
|
||||
((*current)[key_part].is_object() || (*current)[key_part].is_array())) {
|
||||
LogDataBuckets("Attempted to overwrite an existing object or array at key [{}] - skipping", k_.key);
|
||||
return;
|
||||
}
|
||||
|
||||
// Set the value at the final key
|
||||
(*current)[key_part] = k_.value;
|
||||
} else {
|
||||
// Traverse or create nested objects
|
||||
if (!current->contains(key_part)) {
|
||||
(*current)[key_part] = json::object();
|
||||
LogDataBucketsDetail("Creating nested root key [{}] key_part [{}]", k.key, key_part);
|
||||
} else if (!(*current)[key_part].is_object()) {
|
||||
// If key exists but is not an object, reset to object to avoid conflicts
|
||||
(*current)[key_part] = json::object();
|
||||
@ -105,7 +129,7 @@ void DataBucket::SetData(const DataBucketKey &k_)
|
||||
|
||||
// Serialize JSON back to string
|
||||
b.value = json_value.dump();
|
||||
b.key_ = nested_keys.front(); // Use the top-level key
|
||||
b.key_ = top_key; // Use the top-level key
|
||||
}
|
||||
|
||||
if (bucket_id) {
|
||||
@ -142,12 +166,20 @@ DataBucketsRepository::DataBuckets DataBucket::ExtractNestedValue(
|
||||
const std::string &full_key)
|
||||
{
|
||||
auto nested_keys = Strings::Split(full_key, NESTED_KEY_DELIMITER);
|
||||
auto top_key = nested_keys.front();
|
||||
nested_keys.erase(nested_keys.begin());
|
||||
json json_value;
|
||||
|
||||
// Check if the JSON is valid
|
||||
if (!Strings::IsValidJson(bucket.value)) {
|
||||
LogDataBuckets("Invalid JSON for key [{}]", bucket.key_);
|
||||
return DataBucketsRepository::NewEntity();
|
||||
}
|
||||
|
||||
try {
|
||||
json_value = json::parse(bucket.value); // Parse the JSON
|
||||
} catch (json::parse_error &ex) {
|
||||
LogDataBucketsDetail("Failed to parse JSON for key [{}]: {}", bucket.key_, ex.what());
|
||||
LogDataBuckets("Failed to parse JSON for key [{}] [{}]", bucket.key_, ex.what());
|
||||
return DataBucketsRepository::NewEntity(); // Return empty entity on parse error
|
||||
}
|
||||
|
||||
@ -336,44 +368,116 @@ bool DataBucket::GetDataBuckets(Mob *mob)
|
||||
|
||||
bool DataBucket::DeleteData(const DataBucketKey &k)
|
||||
{
|
||||
if (CanCache(k)) {
|
||||
size_t size_before = g_data_bucket_cache.size();
|
||||
bool is_nested_key = k.key.find(NESTED_KEY_DELIMITER) != std::string::npos;
|
||||
|
||||
// delete from cache where contents match
|
||||
g_data_bucket_cache.erase(
|
||||
std::remove_if(
|
||||
g_data_bucket_cache.begin(),
|
||||
g_data_bucket_cache.end(),
|
||||
[&](DataBucketsRepository::DataBuckets &e) {
|
||||
return CheckBucketMatch(e, k);
|
||||
}
|
||||
),
|
||||
g_data_bucket_cache.end()
|
||||
);
|
||||
if (!is_nested_key) {
|
||||
// Update cache
|
||||
if (CanCache(k)) {
|
||||
// delete from cache where contents match
|
||||
g_data_bucket_cache.erase(
|
||||
std::remove_if(
|
||||
g_data_bucket_cache.begin(),
|
||||
g_data_bucket_cache.end(),
|
||||
[&](DataBucketsRepository::DataBuckets &e) {
|
||||
return CheckBucketMatch(e, k);
|
||||
}
|
||||
),
|
||||
g_data_bucket_cache.end()
|
||||
);
|
||||
}
|
||||
|
||||
LogDataBuckets(
|
||||
"Deleting bucket key [{}] bot_id [{}] account_id [{}] character_id [{}] npc_id [{}] bot_id [{}] zone_id [{}] instance_id [{}] cache size before [{}] after [{}]",
|
||||
k.key,
|
||||
k.bot_id,
|
||||
k.account_id,
|
||||
k.character_id,
|
||||
k.npc_id,
|
||||
k.bot_id,
|
||||
k.zone_id,
|
||||
k.instance_id,
|
||||
size_before,
|
||||
g_data_bucket_cache.size()
|
||||
// Regular key deletion, no nesting involved
|
||||
return DataBucketsRepository::DeleteWhere(
|
||||
database,
|
||||
fmt::format("{} `key` = '{}'", DataBucket::GetScopedDbFilters(k), k.key)
|
||||
);
|
||||
}
|
||||
|
||||
return DataBucketsRepository::DeleteWhere(
|
||||
database,
|
||||
fmt::format(
|
||||
"{} `key` = '{}'",
|
||||
DataBucket::GetScopedDbFilters(k),
|
||||
k.key
|
||||
)
|
||||
);
|
||||
// If it's a nested key, retrieve the top-level JSON object
|
||||
auto top_level_key = Strings::Split(k.key, NESTED_KEY_DELIMITER).front();
|
||||
DataBucketKey top_level_k = k;
|
||||
top_level_k.key = top_level_key;
|
||||
|
||||
auto r = GetData(top_level_k);
|
||||
if (r.id == 0 || r.value.empty() || !Strings::IsValidJson(r.value)) {
|
||||
LogDataBuckets("Attempted to delete nested key [{}] but parent key [{}] does not exist or is invalid JSON", k.key, top_level_key);
|
||||
return false;
|
||||
}
|
||||
|
||||
json json_value;
|
||||
try {
|
||||
json_value = json::parse(r.value);
|
||||
} catch (json::parse_error &ex) {
|
||||
LogDataBuckets("Failed to parse JSON for key [{}] [{}]", top_level_key, ex.what());
|
||||
return false;
|
||||
}
|
||||
|
||||
// Recursively remove the nested key
|
||||
auto nested_keys = Strings::Split(k.key, NESTED_KEY_DELIMITER);
|
||||
auto top_key = nested_keys.front();
|
||||
nested_keys.erase(nested_keys.begin());
|
||||
json *current = &json_value;
|
||||
|
||||
for (size_t i = 0; i < nested_keys.size(); ++i) {
|
||||
const std::string &key_part = nested_keys[i];
|
||||
|
||||
if (i == nested_keys.size() - 1) {
|
||||
// Last key in the hierarchy - delete it
|
||||
if (current->contains(key_part)) {
|
||||
current->erase(key_part);
|
||||
LogDataBuckets("Deleted nested key [{}] from [{}]", key_part, k.key);
|
||||
} else {
|
||||
LogDataBuckets("Key [{}] not found in JSON - nothing to delete", k.key);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (!current->contains(key_part) || !(*current)[key_part].is_object()) {
|
||||
LogDataBuckets("Parent key [{}] does not exist or is not an object", key_part);
|
||||
return false;
|
||||
}
|
||||
current = &(*current)[key_part];
|
||||
}
|
||||
}
|
||||
|
||||
// If the JSON object is now empty, delete the top-level key
|
||||
if (json_value.empty()) {
|
||||
LogDataBuckets("Top-level key [{}] is now empty, deleting entire entry", top_level_key);
|
||||
|
||||
// delete cache
|
||||
if (CanCache(k)) {
|
||||
g_data_bucket_cache.erase(
|
||||
std::remove_if(
|
||||
g_data_bucket_cache.begin(),
|
||||
g_data_bucket_cache.end(),
|
||||
[&](DataBucketsRepository::DataBuckets &e) {
|
||||
return CheckBucketMatch(e, top_level_k);
|
||||
}
|
||||
),
|
||||
g_data_bucket_cache.end()
|
||||
);
|
||||
}
|
||||
|
||||
return DataBucketsRepository::DeleteWhere(
|
||||
database,
|
||||
fmt::format("{} `key` = '{}'", DataBucket::GetScopedDbFilters(k), top_level_key)
|
||||
);
|
||||
}
|
||||
|
||||
// Otherwise, update the existing JSON without the deleted key
|
||||
r.value = json_value.dump();
|
||||
DataBucketsRepository::UpdateOne(database, r);
|
||||
|
||||
// Update cache
|
||||
if (CanCache(k)) {
|
||||
for (auto &e : g_data_bucket_cache) {
|
||||
if (CheckBucketMatch(e, top_level_k)) {
|
||||
e.value = r.value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::string DataBucket::GetDataExpires(const DataBucketKey &k)
|
||||
|
||||
@ -31,12 +31,14 @@ void ZoneCLI::CommandHandler(int argc, char **argv)
|
||||
// Register commands
|
||||
function_map["benchmark:databuckets"] = &ZoneCLI::BenchmarkDatabuckets;
|
||||
function_map["sidecar:serve-http"] = &ZoneCLI::SidecarServeHttp;
|
||||
function_map["tests:databuckets"] = &ZoneCLI::DataBuckets;
|
||||
function_map["tests:npc-handins"] = &ZoneCLI::NpcHandins;
|
||||
function_map["tests:npc-handins-multiquest"] = &ZoneCLI::NpcHandinsMultiQuest;
|
||||
|
||||
EQEmuCommand::HandleMenu(function_map, cmd, argc, argv);
|
||||
}
|
||||
|
||||
#include "cli/databuckets.cpp"
|
||||
#include "cli/benchmark_databuckets.cpp"
|
||||
#include "cli/sidecar_serve_http.cpp"
|
||||
#include "cli/npc_handins.cpp"
|
||||
|
||||
@ -11,6 +11,7 @@ public:
|
||||
static bool RanConsoleCommand(int argc, char **argv);
|
||||
static bool RanSidecarCommand(int argc, char **argv);
|
||||
static bool RanTestCommand(int argc, char **argv);
|
||||
static void DataBuckets(int argc, char **argv, argh::parser &cmd, std::string &description);
|
||||
static void NpcHandins(int argc, char **argv, argh::parser &cmd, std::string &description);
|
||||
static void NpcHandinsMultiQuest(int argc, char **argv, argh::parser &cmd, std::string &description);
|
||||
};
|
||||
|
||||
@ -45,18 +45,11 @@ struct LootStateData {
|
||||
}
|
||||
};
|
||||
|
||||
inline bool IsValidJson(const std::string& json) {
|
||||
rapidjson::Document doc;
|
||||
rapidjson::ParseResult result = doc.Parse(json.c_str());
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void LoadLootStateData(Zone *zone, NPC *npc, const std::string &loot_data)
|
||||
{
|
||||
LootStateData l{};
|
||||
|
||||
if (!IsValidJson(loot_data)) {
|
||||
if (!Strings::IsValidJson(loot_data)) {
|
||||
LogZoneState("Invalid JSON data for NPC [{}]", npc->GetNPCTypeID());
|
||||
return;
|
||||
}
|
||||
@ -178,7 +171,7 @@ inline std::string GetLootSerialized(Corpse *c)
|
||||
|
||||
inline void LoadNPCEntityVariables(NPC *n, const std::string &entity_variables)
|
||||
{
|
||||
if (!IsValidJson(entity_variables)) {
|
||||
if (!Strings::IsValidJson(entity_variables)) {
|
||||
LogZoneState("Invalid JSON data for NPC [{}]", n->GetNPCTypeID());
|
||||
return;
|
||||
}
|
||||
@ -203,7 +196,7 @@ inline void LoadNPCEntityVariables(NPC *n, const std::string &entity_variables)
|
||||
|
||||
inline void LoadNPCBuffs(NPC *n, const std::string &buffs)
|
||||
{
|
||||
if (!IsValidJson(buffs)) {
|
||||
if (!Strings::IsValidJson(buffs)) {
|
||||
LogZoneState("Invalid JSON data for NPC [{}]", n->GetNPCTypeID());
|
||||
return;
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user