allocPool/allocPool.hpp

107 lines
3.0 KiB
C++
Raw Normal View History

2024-03-01 14:21:52 -05:00
#pragma once
#include <algorithm>
#include <cassert>
#include <concepts>
2024-03-01 14:21:52 -05:00
#include <cstddef>
#include <cstring>
#include <mutex>
2024-03-01 14:21:52 -05:00
#include <thread>
#include <unordered_map>
#include <vector>
template<class T>
concept resetable = requires(T val) {
val.reset();
};
template<class T>
requires std::default_initializable<T> && resetable<T>
class allocPool {
public:
explicit allocPool(size_t defaultAllocNumbers = 1000)
: vec(defaultAllocNumbers), pivot{defaultAllocNumbers} {
memset(&(vec[0]), 0, sizeof(vec[0]) * vec.size());
initArray(defaultAllocNumbers);
}
2024-03-01 14:21:52 -05:00
~allocPool() {
for (auto i: vec)
delete i;
}
T *getPtr() {
if (pivot == 0)
resizeVec();
auto *ptrToReturn{vec[0]};
std::swap(vec[0], vec[pivot - 1]);
positionMap[vec[0]] = 0;
positionMap[vec[pivot - 1]] = pivot - 1;
pivot--;
return ptrToReturn;
}
void returnPtr(T *ptr) {
size_t pos = positionMap[ptr];
2024-03-03 00:11:31 -05:00
ptr->reset();
std::swap(vec[pos], vec[pivot]);
positionMap[vec[pos]] = pos;
positionMap[vec[pivot]] = pivot;
pivot++;
}
2024-03-01 14:21:52 -05:00
private:
std::vector<T *> vec;
std::mutex positionMapMutex;
2024-03-01 14:21:52 -05:00
std::unordered_map<T *, size_t> positionMap;
size_t pivot;
void initArray(size_t amount) {
const auto amountOfThreads{std::thread::hardware_concurrency()};
assert(amountOfThreads);
const auto amountPerThread{amount / amountOfThreads};
std::vector<std::thread> threads;
threads.reserve(amountOfThreads);
// Using an allocPool, we estimate that we want to allocate a lot of objects, therefore
// the amount per thread *should* be higher than a cache line. This means we should, for
// the most part, avoid false sharing. In the case that it isn't, then the total amount
// should be pretty low, therefore false sharing shouldn't matter.
for (size_t i{}; i < amountOfThreads; i++)
threads.emplace_back(&allocPool::initObjects, this, i * amountPerThread, amountPerThread);
for (auto &t: threads)
t.join();
// Remainder
initObjects(amount - (amount % amountOfThreads), amount % amountOfThreads);
}
void initObjects(size_t startIdx, size_t amount) {
for (size_t i{}; i < amount; i++) {
vec[startIdx + i] = new T;
}
// In the future, it should be possible to write a custom hashmap with sections
// with independent locks, or use a data structure which would be contiguous.
std::lock_guard<std::mutex> guard(positionMapMutex);
for (size_t i{}; i < amount; i++) {
positionMap[vec[startIdx + i]] = i;
}
}
void resizeVec() {
size_t size{vec.size()};
vec.resize(2 * size);
pivot = size;
memcpy(&(vec[size]), &(vec[0]), sizeof(vec[0]) * size);
for (size_t i{}; i < size; i++)
positionMap[vec[size + i]] = size + i;
initArray(size);
}
2024-03-01 14:21:52 -05:00
};