mirror of
https://github.com/EQEmu/Server.git
synced 2026-04-05 15:22:37 +00:00
- License was intended to be GPLv3 per earlier commit of GPLv3 LICENSE FILE - This is confirmed by the inclusion of libraries that are incompatible with GPLv2 - This is also confirmed by KLS and the agreement of KLS's predecessors - Added GPLv3 license headers to the compilable source files - Removed Folly licensing in strings.h since the string functions do not match the Folly functions and are standard functions - this must have been left over from previous implementations - Removed individual contributor license headers since the project has been under the "developer" mantle for many years - Removed comments on files that were previously automatically generated since they've been manually modified multiple times and there are no automatic scripts referencing them (removed in 2023)
144 lines
4.1 KiB
C++
144 lines
4.1 KiB
C++
/* EQEmu: EQEmulator
|
|
|
|
Copyright (C) 2001-2026 EQEmu Development Team
|
|
|
|
This program is free software; you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation; either version 3 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#pragma once
|
|
|
|
#include "common/eqemu_logsys.h"
|
|
|
|
#include "uv.h"
|
|
#include <array>
|
|
#include <atomic>
|
|
#include <iostream>
|
|
#include <memory>
|
|
#include <mutex>
|
|
#include <optional>
|
|
#include <vector>
|
|
|
|
namespace EQ { namespace Net { class TCPConnection; } }
|
|
|
|
constexpr size_t TCP_BUFFER_SIZE = 8192;
|
|
|
|
struct TCPWriteReq {
|
|
uv_write_t req{};
|
|
std::array<char, TCP_BUFFER_SIZE> buffer{};
|
|
size_t buffer_index{};
|
|
EQ::Net::TCPConnection* connection{};
|
|
uint32_t magic = 0xC0FFEE;
|
|
};
|
|
|
|
class WriteReqPool {
|
|
public:
|
|
explicit WriteReqPool(size_t initial_capacity = 512)
|
|
: m_capacity(initial_capacity), m_head(0) {
|
|
initialize_pool(m_capacity);
|
|
}
|
|
|
|
std::optional<TCPWriteReq*> acquire() {
|
|
size_t cap = m_capacity.load(std::memory_order_acquire);
|
|
|
|
for (size_t i = 0; i < cap; ++i) {
|
|
size_t index = m_head.fetch_add(1, std::memory_order_relaxed) % cap;
|
|
|
|
bool expected = false;
|
|
if (m_locks[index].compare_exchange_strong(expected, true, std::memory_order_acquire)) {
|
|
LogNetTCPDetail("[WriteReqPool] Acquired buffer index [{}]", index);
|
|
return m_reqs[index].get();
|
|
}
|
|
}
|
|
|
|
LogNetTCP("[WriteReqPool] Growing from [{}] to [{}]", cap, cap * 2);
|
|
grow();
|
|
return acquireAfterGrow();
|
|
}
|
|
|
|
void release(TCPWriteReq* req) {
|
|
if (!req) return;
|
|
|
|
const size_t index = req->buffer_index;
|
|
const size_t cap = m_capacity.load(std::memory_order_acquire);
|
|
|
|
if (index >= cap || m_reqs[index].get() != req) {
|
|
std::cerr << "WriteReqPool::release - Invalid or stale pointer (index=" << index << ")\n";
|
|
return;
|
|
}
|
|
|
|
m_locks[index].store(false, std::memory_order_release);
|
|
LogNetTCPDetail("[WriteReqPool] Released buffer index [{}]", index);
|
|
}
|
|
|
|
private:
|
|
std::vector<std::unique_ptr<TCPWriteReq>> m_reqs;
|
|
std::unique_ptr<std::atomic_bool[]> m_locks;
|
|
std::atomic<size_t> m_capacity;
|
|
std::atomic<size_t> m_head;
|
|
std::mutex m_grow_mutex;
|
|
|
|
void initialize_pool(size_t count) {
|
|
m_reqs.reserve(count);
|
|
m_locks = std::make_unique<std::atomic_bool[]>(count);
|
|
|
|
for (size_t i = 0; i < count; ++i) {
|
|
auto req = std::make_unique<TCPWriteReq>();
|
|
req->buffer_index = i;
|
|
req->req.data = req.get(); // optional: for use in libuv callbacks
|
|
m_locks[i].store(false, std::memory_order_relaxed);
|
|
m_reqs.emplace_back(std::move(req));
|
|
}
|
|
|
|
m_capacity.store(count, std::memory_order_release);
|
|
}
|
|
|
|
void grow() {
|
|
std::lock_guard<std::mutex> lock(m_grow_mutex);
|
|
|
|
const size_t old_cap = m_capacity.load(std::memory_order_acquire);
|
|
const size_t new_cap = old_cap * 2;
|
|
|
|
m_reqs.reserve(new_cap);
|
|
for (size_t i = old_cap; i < new_cap; ++i) {
|
|
auto req = std::make_unique<TCPWriteReq>();
|
|
req->buffer_index = i;
|
|
req->req.data = req.get(); // optional
|
|
m_reqs.emplace_back(std::move(req));
|
|
}
|
|
|
|
auto new_locks = std::make_unique<std::atomic_bool[]>(new_cap);
|
|
for (size_t i = 0; i < old_cap; ++i) {
|
|
new_locks[i].store(m_locks[i].load(std::memory_order_acquire));
|
|
}
|
|
for (size_t i = old_cap; i < new_cap; ++i) {
|
|
new_locks[i].store(false, std::memory_order_relaxed);
|
|
}
|
|
|
|
m_locks = std::move(new_locks);
|
|
m_capacity.store(new_cap, std::memory_order_release);
|
|
}
|
|
|
|
std::optional<TCPWriteReq*> acquireAfterGrow() {
|
|
const size_t cap = m_capacity.load(std::memory_order_acquire);
|
|
|
|
for (size_t i = 0; i < cap; ++i) {
|
|
bool expected = false;
|
|
if (m_locks[i].compare_exchange_strong(expected, true, std::memory_order_acquire)) {
|
|
LogNetTCP("[WriteReqPool] Acquired buffer index [{}] after grow", i);
|
|
return m_reqs[i].get();
|
|
}
|
|
}
|
|
return std::nullopt;
|
|
}
|
|
};
|