8 #if defined(HAVE_CONFIG_H) 9 #include <config/bitcoin-config.h> 20 #include <sys/resource.h> 37 static inline size_t align_up(
size_t x,
size_t align) {
38 return (x + align - 1) & ~(align - 1);
45 : base(static_cast<char *>(base_in)),
46 end(static_cast<char *>(base_in) + size_in), alignment(alignment_in) {
76 const size_t size_remaining = size_ptr_it->first - size;
78 chunks_used.emplace(size_ptr_it->second + size_remaining, size).first;
80 if (size_ptr_it->first == size) {
93 return reinterpret_cast<void *
>(alloced->first);
103 auto i =
chunks_used.find(static_cast<char *>(ptr));
105 throw std::runtime_error(
"Arena: invalid or double free");
107 std::pair<char *, size_t> freed = *i;
113 freed.first -= prev->second->first;
114 freed.second += prev->second->first;
120 auto next =
chunks_free.find(freed.first + freed.second);
122 freed.second += next->second->first;
136 r.used += chunk.second;
139 r.free += chunk.second->first;
141 r.total = r.used + r.free;
146 static void printchunk(
void *
base,
size_t sz,
bool used) {
147 std::cout <<
"0x" << std::hex << std::setw(16) << std::setfill(
'0') <<
base 148 <<
" 0x" << std::hex << std::setw(16) << std::setfill(
'0') << sz
149 <<
" 0x" << used << std::endl;
151 void Arena::walk()
const {
153 printchunk(chunk.first, chunk.second,
true);
155 std::cout << std::endl;
157 printchunk(chunk.first, chunk.second->first,
false);
159 std::cout << std::endl;
172 Win32LockedPageAllocator();
173 void *AllocateLocked(
size_t len,
bool *lockingSuccess)
override;
174 void FreeLocked(
void *addr,
size_t len)
override;
175 size_t GetLimit()
override;
181 Win32LockedPageAllocator::Win32LockedPageAllocator() {
183 SYSTEM_INFO sSysInfo;
184 GetSystemInfo(&sSysInfo);
185 page_size = sSysInfo.dwPageSize;
187 void *Win32LockedPageAllocator::AllocateLocked(
size_t len,
188 bool *lockingSuccess) {
191 VirtualAlloc(
nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
197 *lockingSuccess = VirtualLock(const_cast<void *>(addr), len) != 0;
201 void Win32LockedPageAllocator::FreeLocked(
void *addr,
size_t len) {
204 VirtualUnlock(const_cast<void *>(addr), len);
207 size_t Win32LockedPageAllocator::GetLimit() {
209 return std::numeric_limits<size_t>::max();
224 void *AllocateLocked(
size_t len,
bool *lockingSuccess)
override;
225 void FreeLocked(
void *addr,
size_t len)
override;
226 size_t GetLimit()
override;
234 #if defined(PAGESIZE) // defined in climits 235 page_size = PAGESIZE;
236 #else // assume some POSIX OS 237 page_size = sysconf(_SC_PAGESIZE);
243 #ifndef MAP_ANONYMOUS 244 #define MAP_ANONYMOUS MAP_ANON 248 bool *lockingSuccess) {
251 addr = mmap(
nullptr, len, PROT_READ | PROT_WRITE,
253 if (addr == MAP_FAILED) {
257 *lockingSuccess = mlock(addr, len) == 0;
258 #if defined(MADV_DONTDUMP) // Linux 259 madvise(addr, len, MADV_DONTDUMP);
260 #elif defined(MADV_NOCORE) // FreeBSD 261 madvise(addr, len, MADV_NOCORE);
273 #ifdef RLIMIT_MEMLOCK 275 if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) {
276 if (rlim.rlim_cur != RLIM_INFINITY) {
277 return rlim.rlim_cur;
281 return std::numeric_limits<size_t>::max();
289 LockingFailed_Callback lf_cb_in)
290 : allocator(
std::move(allocator_in)), lf_cb(lf_cb_in),
291 cumulative_bytes_locked(0) {}
295 std::lock_guard<std::mutex> lock(
mutex);
303 for (
auto &arena :
arenas) {
304 void *addr = arena.alloc(size);
311 return arenas.back().alloc(size);
317 std::lock_guard<std::mutex> lock(
mutex);
320 for (
auto &arena :
arenas) {
321 if (arena.addressInArena(ptr)) {
326 throw std::runtime_error(
327 "LockedPool: invalid address not pointing to any arena");
331 std::lock_guard<std::mutex> lock(
mutex);
333 for (
const auto &arena :
arenas) {
353 size = std::min(size, limit);
356 void *addr =
allocator->AllocateLocked(size, &locked);
376 void *base_in,
size_t size_in,
378 :
Arena(base_in, size_in, align_in), base(base_in), size(size_in),
388 std::unique_ptr<LockedPageAllocator> allocator_in)
403 std::unique_ptr<LockedPageAllocator>
allocator(
404 new Win32LockedPageAllocator());
406 std::unique_ptr<LockedPageAllocator>
allocator(
410 LockedPoolManager::_instance = &instance;
static std::once_flag init_flag
size_t alignment
Minimum chunk alignment.
std::mutex mutex
Mutex protects access to this pool's data structures, including arenas.
void * AllocateLocked(size_t len, bool *lockingSuccess) override
Allocate and lock memory pages.
std::list< LockedPageArena > arenas
static const size_t ARENA_ALIGN
Chunk alignment.
std::unordered_map< char *, size_t > chunks_used
Map from begin of used chunk to its size.
LockedPool(std::unique_ptr< LockedPageAllocator > allocator, LockingFailed_Callback lf_cb_in=nullptr)
Create a new LockedPool.
ChunkToSizeMap chunks_free
Map from begin of free chunk to its node in size_to_free_chunk.
LockingFailed_Callback lf_cb
SizeToChunkSortedMap size_to_free_chunk
Map to enable O(log(n)) best-fit allocation, as it's sorted by size.
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align)
OS-dependent allocation and deallocation of locked/pinned memory pages.
LockedPageAllocator * allocator
Singleton class to keep track of locked (ie, non-swappable) memory, for use in std::allocator templat...
void * alloc(size_t size)
Allocate size bytes from this arena.
void memory_cleanse(void *ptr, size_t len)
Secure overwrite a buffer (possibly containing secret data) with zero-bytes.
void FreeLocked(void *addr, size_t len) override
Unlock and free memory pages.
Stats stats() const
Get arena usage statistics.
static LockedPoolManager * _instance
void * alloc(size_t size)
Allocate size bytes from this arena.
virtual void FreeLocked(void *addr, size_t len)=0
Unlock and free memory pages.
static size_t align_up(size_t x, size_t align)
Align up to power of 2.
static const size_t ARENA_SIZE
Size of one arena of locked memory.
static bool LockingFailed()
Called when locking fails, warn the user here.
Pool for locked memory chunks.
size_t GetLimit() override
Get the total limit on the amount of memory that may be locked by this process, in bytes...
void free(void *ptr)
Free a previously allocated chunk of memory.
void free(void *ptr)
Free a previously allocated chunk of memory.
char * base
Base address of arena.
LockedPageAllocator specialized for OSes that don't try to be special snowflakes. ...
PosixLockedPageAllocator()
bool new_arena(size_t size, size_t align)
LockedPoolManager(std::unique_ptr< LockedPageAllocator > allocator)
Stats stats() const
Get pool usage statistics.
static void CreateInstance()
Create a new LockedPoolManager specialized to the OS.
An arena manages a contiguous region of memory by dividing it into chunks.
size_t cumulative_bytes_locked
Arena(void *base, size_t size, size_t alignment)
ChunkToSizeMap chunks_free_end
Map from end of free chunk to its node in size_to_free_chunk.
std::unique_ptr< LockedPageAllocator > allocator