From 6ca64703dcdb5c08f49ffbb04a01775495576528 Mon Sep 17 00:00:00 2001 From: random-zebra Date: Mon, 29 Mar 2021 12:31:30 +0200 Subject: [PATCH 1/5] allocators: split allocators and pagelocker >>> backports bitcoin@d7d187e8a451ae946fa14cead7962edbe0046f12 Pagelocker is only needed for secure (usually wallet) operations, so don't make the zero-after-free allocator depend on it. --- CMakeLists.txt | 3 +- src/Makefile.am | 8 +- src/crypter.h | 2 +- src/key.h | 3 +- src/qt/askpassphrasedialog.h | 2 +- src/qt/walletmodel.h | 2 +- src/sapling/zip32.h | 6 +- src/streams.h | 2 +- src/support/allocators/secure.h | 63 ++++++++++++ src/support/allocators/zeroafterfree.h | 48 ++++++++++ .../pagelocker.cpp} | 6 +- src/{allocators.h => support/pagelocker.h} | 96 +------------------ src/test/allocator_tests.cpp | 2 +- src/util.cpp | 2 +- src/utilstrencodings.h | 2 +- 15 files changed, 135 insertions(+), 112 deletions(-) create mode 100644 src/support/allocators/secure.h create mode 100644 src/support/allocators/zeroafterfree.h rename src/{allocators.cpp => support/pagelocker.cpp} (94%) rename src/{allocators.h => support/pagelocker.h} (64%) diff --git a/CMakeLists.txt b/CMakeLists.txt index 196d2bae8ac98..d8dc7151f8c99 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -374,7 +374,6 @@ target_include_directories(ZEROCOIN_A PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/src ) set(COMMON_SOURCES - ./src/allocators.cpp ./src/base58.cpp ./src/bip38.cpp ./src/consensus/params.cpp @@ -421,7 +420,6 @@ target_include_directories(COMMON_A PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/src ) set(UTIL_SOURCES - ./src/allocators.cpp ./src/compat/strnlen.cpp ./src/compat/glibc_sanity.cpp ./src/compat/glibcxx_sanity.cpp @@ -441,6 +439,7 @@ set(UTIL_SOURCES ./src/utilstrencodings.cpp ./src/utilmoneystr.cpp ./src/utiltime.cpp + ./src/support/pagelocker.cpp ./src/support/cleanse.cpp ) add_library(UTIL_A STATIC ${BitcoinHeaders} ${UTIL_SOURCES}) diff --git a/src/Makefile.am b/src/Makefile.am index 30aeb7aba805a..af4ce136a218e 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -148,7 +148,6 @@ BITCOIN_CORE_H = \ activemasternode.h \ addrdb.h \ addrman.h \ - allocators.h \ arith_uint256.h \ amount.h \ base58.h \ @@ -260,7 +259,10 @@ BITCOIN_CORE_H = \ stakeinput.h \ script/ismine.h \ streams.h \ + support/allocators/secure.h \ + support/allocators/zeroafterfree.h \ support/cleanse.h \ + support/pagelocker.h \ sync.h \ threadsafety.h \ threadinterrupt.h \ @@ -485,7 +487,6 @@ endif libbitcoin_common_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) libbitcoin_common_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) libbitcoin_common_a_SOURCES = \ - allocators.cpp \ base58.cpp \ bip38.cpp \ chainparams.cpp \ @@ -526,7 +527,6 @@ libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) libbitcoin_util_a_SOURCES = \ arith_uint256.cpp \ - allocators.cpp \ chainparamsbase.cpp \ clientversion.cpp \ compat/glibc_sanity.cpp \ @@ -538,6 +538,7 @@ libbitcoin_util_a_SOURCES = \ random.cpp \ rpc/protocol.cpp \ support/cleanse.cpp \ + support/pagelocker.cpp \ sync.cpp \ threadinterrupt.cpp \ uint256.cpp \ @@ -670,7 +671,6 @@ if BUILD_BITCOIN_LIBS include_HEADERS = script/bitcoinconsensus.h libbitcoinconsensus_la_SOURCES = \ arith_uint256.cpp \ - allocators.cpp \ primitives/transaction.cpp \ crypto/hmac_sha512.cpp \ crypto/scrypt.cpp \ diff --git a/src/crypter.h b/src/crypter.h index 546f9d31c4094..c5e4008b64816 100644 --- a/src/crypter.h +++ b/src/crypter.h @@ -6,10 +6,10 @@ #ifndef BITCOIN_CRYPTER_H #define BITCOIN_CRYPTER_H -#include "allocators.h" #include "keystore.h" #include "serialize.h" #include "streams.h" +#include "support/allocators/zeroafterfree.h" class uint256; diff --git a/src/key.h b/src/key.h index a0e8a419783bf..d7e9dda3f1161 100644 --- a/src/key.h +++ b/src/key.h @@ -7,8 +7,8 @@ #ifndef PIVX_KEY_H #define PIVX_KEY_H -#include "allocators.h" #include "serialize.h" +#include "support/allocators/secure.h" #include "uint256.h" #include "pubkey.h" @@ -20,7 +20,6 @@ class CPubKey; struct CExtPubKey; /** - * secure_allocator is defined in allocators.h * CPrivKey is a serialized private key, with all parameters included * (PRIVATE_KEY_SIZE bytes) */ diff --git a/src/qt/askpassphrasedialog.h b/src/qt/askpassphrasedialog.h index 370a4a684e498..8e07d98779a63 100644 --- a/src/qt/askpassphrasedialog.h +++ b/src/qt/askpassphrasedialog.h @@ -8,7 +8,7 @@ #include #include "qt/pivx/prunnable.h" -#include "allocators.h" +#include "support/allocators/secure.h" #include class WalletModel; diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h index ce0ffb91c61a3..08f20d95fd726 100644 --- a/src/qt/walletmodel.h +++ b/src/qt/walletmodel.h @@ -13,8 +13,8 @@ #include "interfaces/wallet.h" -#include "allocators.h" /* for SecureString */ #include "operationresult.h" +#include "support/allocators/zeroafterfree.h" #include "wallet/wallet.h" #include "pairresult.h" diff --git a/src/sapling/zip32.h b/src/sapling/zip32.h index 03b0e96ca51ed..f0a9d46881b9a 100644 --- a/src/sapling/zip32.h +++ b/src/sapling/zip32.h @@ -5,12 +5,12 @@ #ifndef PIVX_ZIP32_H #define PIVX_ZIP32_H -#include "serialize.h" -#include "allocators.h" #include "blob_uint256.h" #include "key.h" -#include "uint256.h" #include "sapling/address.h" +#include "serialize.h" +#include "support/allocators/zeroafterfree.h" +#include "uint256.h" #include diff --git a/src/streams.h b/src/streams.h index 243d1108dcb52..a0533037a2f53 100644 --- a/src/streams.h +++ b/src/streams.h @@ -7,8 +7,8 @@ #ifndef BITCOIN_STREAMS_H #define BITCOIN_STREAMS_H -#include "allocators.h" #include "serialize.h" +#include "support/allocators/zeroafterfree.h" #include #include diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h new file mode 100644 index 0000000000000..00e9da4ea27e0 --- /dev/null +++ b/src/support/allocators/secure.h @@ -0,0 +1,63 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2013 The Bitcoin developers +// Distributed under the MIT/X11 software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + + +#ifndef BITCOIN_ALLOCATORS_SECURE_H +#define BITCOIN_ALLOCATORS_SECURE_H + +#include "support/pagelocker.h" + +#include + +// +// Allocator that locks its contents from being paged +// out of memory and clears its contents before deletion. +// +template +struct secure_allocator : public std::allocator { + // MSVC8 default copy constructor is broken + typedef std::allocator base; + typedef typename base::size_type size_type; + typedef typename base::difference_type difference_type; + typedef typename base::pointer pointer; + typedef typename base::const_pointer const_pointer; + typedef typename base::reference reference; + typedef typename base::const_reference const_reference; + typedef typename base::value_type value_type; + secure_allocator() throw() {} + secure_allocator(const secure_allocator& a) throw() : base(a) {} + template + secure_allocator(const secure_allocator& a) throw() : base(a) + { + } + ~secure_allocator() throw() {} + template + struct rebind { + typedef secure_allocator<_Other> other; + }; + + T* allocate(std::size_t n, const void* hint = 0) + { + T* p; + p = std::allocator::allocate(n, hint); + if (p != NULL) + LockedPageManager::Instance().LockRange(p, sizeof(T) * n); + return p; + } + + void deallocate(T* p, std::size_t n) + { + if (p != NULL) { + memory_cleanse(p, sizeof(T) * n); + LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); + } + std::allocator::deallocate(p, n); + } +}; + +// This is exactly like std::string, but with a custom allocator. +typedef std::basic_string, secure_allocator > SecureString; + +#endif // BITCOIN_ALLOCATORS_SECURE_H diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h new file mode 100644 index 0000000000000..9a65517ededfe --- /dev/null +++ b/src/support/allocators/zeroafterfree.h @@ -0,0 +1,48 @@ +// Copyright (c) 2009-2010 Satoshi Nakamoto +// Copyright (c) 2009-2013 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_ALLOCATORS_ZEROAFTERFREE_H +#define BITCOIN_ALLOCATORS_ZEROAFTERFREE_H + +#include "support/cleanse.h" + +#include +#include + +template +struct zero_after_free_allocator : public std::allocator { + // MSVC8 default copy constructor is broken + typedef std::allocator base; + typedef typename base::size_type size_type; + typedef typename base::difference_type difference_type; + typedef typename base::pointer pointer; + typedef typename base::const_pointer const_pointer; + typedef typename base::reference reference; + typedef typename base::const_reference const_reference; + typedef typename base::value_type value_type; + zero_after_free_allocator() throw() {} + zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {} + template + zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) + { + } + ~zero_after_free_allocator() throw() {} + template + struct rebind { + typedef zero_after_free_allocator<_Other> other; + }; + + void deallocate(T* p, std::size_t n) + { + if (p != NULL) + memory_cleanse(p, sizeof(T) * n); + std::allocator::deallocate(p, n); + } +}; + +// Byte-vector that clears its contents before deletion. +typedef std::vector > CSerializeData; + +#endif // BITCOIN_ALLOCATORS_ZEROAFTERFREE_H \ No newline at end of file diff --git a/src/allocators.cpp b/src/support/pagelocker.cpp similarity index 94% rename from src/allocators.cpp rename to src/support/pagelocker.cpp index df498f7eae7ef..4514d08bb33fb 100644 --- a/src/allocators.cpp +++ b/src/support/pagelocker.cpp @@ -2,7 +2,11 @@ // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include "allocators.h" +#include "support/pagelocker.h" + +#if defined(HAVE_CONFIG_H) +#include "config/pivx-config.h" +#endif #ifdef WIN32 #ifdef _WIN32_WINNT diff --git a/src/allocators.h b/src/support/pagelocker.h similarity index 64% rename from src/allocators.h rename to src/support/pagelocker.h index d3b3d6ea2838f..17c75a77c7b4f 100644 --- a/src/allocators.h +++ b/src/support/pagelocker.h @@ -3,15 +3,12 @@ // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#ifndef BITCOIN_ALLOCATORS_H -#define BITCOIN_ALLOCATORS_H +#ifndef BITCOIN_ALLOCATORS_PAGELOCKER_H +#define BITCOIN_ALLOCATORS_PAGELOCKER_H #include "support/cleanse.h" #include -#include -#include -#include #include #include @@ -160,91 +157,4 @@ class LockedPageManager : public LockedPageManagerBase static boost::once_flag init_flag; }; -// -// Allocator that locks its contents from being paged -// out of memory and clears its contents before deletion. -// -template -struct secure_allocator : public std::allocator { - // MSVC8 default copy constructor is broken - typedef std::allocator base; - typedef typename base::size_type size_type; - typedef typename base::difference_type difference_type; - typedef typename base::pointer pointer; - typedef typename base::const_pointer const_pointer; - typedef typename base::reference reference; - typedef typename base::const_reference const_reference; - typedef typename base::value_type value_type; - secure_allocator() throw() {} - secure_allocator(const secure_allocator& a) throw() : base(a) {} - template - secure_allocator(const secure_allocator& a) throw() : base(a) - { - } - ~secure_allocator() throw() {} - template - struct rebind { - typedef secure_allocator<_Other> other; - }; - - T* allocate(std::size_t n, const void* hint = 0) - { - T* p; - p = std::allocator::allocate(n, hint); - if (p != NULL) - LockedPageManager::Instance().LockRange(p, sizeof(T) * n); - return p; - } - - void deallocate(T* p, std::size_t n) - { - if (p != NULL) { - memory_cleanse(p, sizeof(T) * n); - LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); - } - std::allocator::deallocate(p, n); - } -}; - - -// -// Allocator that clears its contents before deletion. -// -template -struct zero_after_free_allocator : public std::allocator { - // MSVC8 default copy constructor is broken - typedef std::allocator base; - typedef typename base::size_type size_type; - typedef typename base::difference_type difference_type; - typedef typename base::pointer pointer; - typedef typename base::const_pointer const_pointer; - typedef typename base::reference reference; - typedef typename base::const_reference const_reference; - typedef typename base::value_type value_type; - zero_after_free_allocator() throw() {} - zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {} - template - zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) - { - } - ~zero_after_free_allocator() throw() {} - template - struct rebind { - typedef zero_after_free_allocator<_Other> other; - }; - - void deallocate(T* p, std::size_t n) - { - if (p != NULL) - memory_cleanse(p, sizeof(T) * n); - std::allocator::deallocate(p, n); - } -}; - -// This is exactly like std::string, but with a custom allocator. -typedef std::basic_string, secure_allocator > SecureString; - -// Byte-vector that clears its contents before deletion. -typedef std::vector > CSerializeData; - -#endif // BITCOIN_ALLOCATORS_H +#endif // BITCOIN_ALLOCATORS_PAGELOCKER_H diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp index 6a7f89871edda..922cd7602476e 100644 --- a/src/test/allocator_tests.cpp +++ b/src/test/allocator_tests.cpp @@ -4,7 +4,7 @@ #include "util.h" -#include "allocators.h" +#include "support/allocators/zeroafterfree.h" #include "test/test_pivx.h" #include diff --git a/src/util.cpp b/src/util.cpp index 25b02cd141d21..3ac197e0eeb24 100644 --- a/src/util.cpp +++ b/src/util.cpp @@ -11,9 +11,9 @@ #include "util.h" -#include "allocators.h" #include "chainparamsbase.h" #include "random.h" +#include "support/allocators/zeroafterfree.h" #include "utilstrencodings.h" #include "utiltime.h" diff --git a/src/utilstrencodings.h b/src/utilstrencodings.h index 5b1f0372b08ed..50ace81454251 100644 --- a/src/utilstrencodings.h +++ b/src/utilstrencodings.h @@ -10,7 +10,7 @@ #ifndef BITCOIN_UTILSTRENCODINGS_H #define BITCOIN_UTILSTRENCODINGS_H -#include "allocators.h" +#include "support/allocators/secure.h" #include #include #include From 2d2e33112191c03be9cbda87ee440fd2a323d892 Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Sun, 18 Sep 2016 09:55:14 +0200 Subject: [PATCH 2/5] support: Add LockedPool >>> backports bitcoin/bitcoin@4536148b15595229d0563fb60913b23cb78788ed Add a pool for locked memory chunks, replacing LockedPageManager. This is something I've been wanting to do for a long time. The current approach of locking objects where they happen to be on the stack or heap in-place causes a lot of mlock/munlock system call overhead, slowing down any handling of keys. Also locked memory is a limited resource on many operating systems (and using a lot of it bogs down the system), so the previous approach of locking every page that may contain any key information (but also other information) is wasteful. --- CMakeLists.txt | 2 +- src/Makefile.am | 4 +- src/support/allocators/secure.h | 12 +- src/support/lockedpool.cpp | 383 ++++++++++++++++++++++++++++++++ src/support/lockedpool.h | 251 +++++++++++++++++++++ src/support/pagelocker.cpp | 70 ------ src/support/pagelocker.h | 160 ------------- src/test/allocator_tests.cpp | 280 +++++++++++++++-------- src/utilstrencodings.h | 1 + 9 files changed, 834 insertions(+), 329 deletions(-) create mode 100644 src/support/lockedpool.cpp create mode 100644 src/support/lockedpool.h delete mode 100644 src/support/pagelocker.cpp delete mode 100644 src/support/pagelocker.h diff --git a/CMakeLists.txt b/CMakeLists.txt index d8dc7151f8c99..6b3a6953baad9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -439,7 +439,7 @@ set(UTIL_SOURCES ./src/utilstrencodings.cpp ./src/utilmoneystr.cpp ./src/utiltime.cpp - ./src/support/pagelocker.cpp + ./src/support/lockedpool.cpp ./src/support/cleanse.cpp ) add_library(UTIL_A STATIC ${BitcoinHeaders} ${UTIL_SOURCES}) diff --git a/src/Makefile.am b/src/Makefile.am index af4ce136a218e..7c5df62ac0e1c 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -262,7 +262,7 @@ BITCOIN_CORE_H = \ support/allocators/secure.h \ support/allocators/zeroafterfree.h \ support/cleanse.h \ - support/pagelocker.h \ + support/lockedpool.h \ sync.h \ threadsafety.h \ threadinterrupt.h \ @@ -538,7 +538,7 @@ libbitcoin_util_a_SOURCES = \ random.cpp \ rpc/protocol.cpp \ support/cleanse.cpp \ - support/pagelocker.cpp \ + support/lockedpool.cpp \ sync.cpp \ threadinterrupt.cpp \ uint256.cpp \ diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 00e9da4ea27e0..f88bef9c681d5 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -7,7 +7,8 @@ #ifndef BITCOIN_ALLOCATORS_SECURE_H #define BITCOIN_ALLOCATORS_SECURE_H -#include "support/pagelocker.h" +#include "support/lockedpool.h" +#include "support/cleanse.h" #include @@ -40,20 +41,15 @@ struct secure_allocator : public std::allocator { T* allocate(std::size_t n, const void* hint = 0) { - T* p; - p = std::allocator::allocate(n, hint); - if (p != NULL) - LockedPageManager::Instance().LockRange(p, sizeof(T) * n); - return p; + return static_cast(LockedPoolManager::Instance().alloc(sizeof(T) * n)); } void deallocate(T* p, std::size_t n) { if (p != NULL) { memory_cleanse(p, sizeof(T) * n); - LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); } - std::allocator::deallocate(p, n); + LockedPoolManager::Instance().free(p); } }; diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp new file mode 100644 index 0000000000000..583ab3f7369aa --- /dev/null +++ b/src/support/lockedpool.cpp @@ -0,0 +1,383 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "support/lockedpool.h" +#include "support/cleanse.h" + +#if defined(HAVE_CONFIG_H) +#include "config/pivx-config.h" +#endif + +#ifdef WIN32 +#ifdef _WIN32_WINNT +#undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0501 +#define WIN32_LEAN_AND_MEAN 1 +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#else +#include // for mmap +#include // for getrlimit +#include // for PAGESIZE +#include // for sysconf +#endif + +LockedPoolManager* LockedPoolManager::_instance = NULL; +std::once_flag LockedPoolManager::init_flag; + +/*******************************************************************************/ +// Utilities +// +/** Align up to power of 2 */ +static inline size_t align_up(size_t x, size_t align) +{ + return (x + align - 1) & ~(align - 1); +} + +/*******************************************************************************/ +// Implementation: Arena + +Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): + base(static_cast(base_in)), end(static_cast(base_in) + size_in), alignment(alignment_in) +{ + // Start with one free chunk that covers the entire arena + chunks.emplace(base, Chunk(size_in, false)); +} + +Arena::~Arena() +{ +} + +void* Arena::alloc(size_t size) +{ + // Round to next multiple of alignment + size = align_up(size, alignment); + + // Don't handle zero-sized chunks, or those bigger than MAX_SIZE + if (size == 0 || size >= Chunk::MAX_SIZE) { + return nullptr; + } + + for (auto& chunk: chunks) { + if (!chunk.second.isInUse() && size <= chunk.second.getSize()) { + char* base = chunk.first; + size_t leftover = chunk.second.getSize() - size; + if (leftover > 0) { // Split chunk + chunks.emplace(base + size, Chunk(leftover, false)); + chunk.second.setSize(size); + } + chunk.second.setInUse(true); + return reinterpret_cast(base); + } + } + return nullptr; +} + +void Arena::free(void *ptr) +{ + // Freeing the NULL pointer is OK. + if (ptr == nullptr) { + return; + } + auto i = chunks.find(static_cast(ptr)); + if (i == chunks.end() || !i->second.isInUse()) { + throw std::runtime_error("Arena: invalid or double free"); + } + + i->second.setInUse(false); + + if (i != chunks.begin()) { // Absorb into previous chunk if exists and free + auto prev = i; + --prev; + if (!prev->second.isInUse()) { + // Absorb current chunk size into previous chunk. + prev->second.setSize(prev->second.getSize() + i->second.getSize()); + // Erase current chunk. Erasing does not invalidate current + // iterators for a map, except for that pointing to the object + // itself, which will be overwritten in the next statement. + chunks.erase(i); + // From here on, the previous chunk is our current chunk. + i = prev; + } + } + auto next = i; + ++next; + if (next != chunks.end()) { // Absorb next chunk if exists and free + if (!next->second.isInUse()) { + // Absurb next chunk size into current chunk + i->second.setSize(i->second.getSize() + next->second.getSize()); + // Erase next chunk. + chunks.erase(next); + } + } +} + +Arena::Stats Arena::stats() const +{ + Arena::Stats r; + r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; + for (const auto& chunk: chunks) { + if (chunk.second.isInUse()) { + r.used += chunk.second.getSize(); + r.chunks_used += 1; + } else { + r.free += chunk.second.getSize(); + r.chunks_free += 1; + } + r.total += chunk.second.getSize(); + } + return r; +} + +#ifdef ARENA_DEBUG +void Arena::walk() const +{ + for (const auto& chunk: chunks) { + std::cout << + "0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first << + " 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() << + " 0x" << chunk.second.isInUse() << std::endl; + } + std::cout << std::endl; +} +#endif + +/*******************************************************************************/ +// Implementation: Win32LockedPageAllocator + +#ifdef WIN32 +/** LockedPageAllocator specialized for Windows. + */ +class Win32LockedPageAllocator: public LockedPageAllocator +{ +public: + Win32LockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +Win32LockedPageAllocator::Win32LockedPageAllocator() +{ + // Determine system page size in bytes + SYSTEM_INFO sSysInfo; + GetSystemInfo(&sSysInfo); + page_size = sSysInfo.dwPageSize; +} +void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + len = align_up(len, page_size); + void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (addr) { + // VirtualLock is used to attempt to keep keying material out of swap. Note + // that it does not provide this as a guarantee, but, in practice, memory + // that has been VirtualLock'd almost never gets written to the pagefile + // except in rare circumstances where memory is extremely low. + *lockingSuccess = VirtualLock(const_cast(addr), len) != 0; + } + return addr; +} +void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + VirtualUnlock(const_cast(addr), len); +} + +size_t Win32LockedPageAllocator::GetLimit() +{ + // TODO is there a limit on windows, how to get it? + return std::numeric_limits::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: PosixLockedPageAllocator + +#ifndef WIN32 +/** LockedPageAllocator specialized for OSes that don't try to be + * special snowflakes. + */ +class PosixLockedPageAllocator: public LockedPageAllocator +{ +public: + PosixLockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +PosixLockedPageAllocator::PosixLockedPageAllocator() +{ + // Determine system page size in bytes +#if defined(PAGESIZE) // defined in limits.h + page_size = PAGESIZE; +#else // assume some POSIX OS + page_size = sysconf(_SC_PAGESIZE); +#endif +} +void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + void *addr; + len = align_up(len, page_size); + addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (addr) { + *lockingSuccess = mlock(addr, len) == 0; + } + return addr; +} +void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + munlock(addr, len); + munmap(addr, len); +} +size_t PosixLockedPageAllocator::GetLimit() +{ +#ifdef RLIMIT_MEMLOCK + struct rlimit rlim; + if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { + if (rlim.rlim_cur != RLIM_INFINITY) { + return rlim.rlim_cur; + } + } +#endif + return std::numeric_limits::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: LockedPool + +LockedPool::LockedPool(std::unique_ptr allocator_in, LockingFailed_Callback lf_cb_in): + allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) +{ +} + +LockedPool::~LockedPool() +{ +} +void* LockedPool::alloc(size_t size) +{ + std::lock_guard lock(mutex); + // Try allocating from each current arena + for (auto &arena: arenas) { + void *addr = arena.alloc(size); + if (addr) { + return addr; + } + } + // If that fails, create a new one + if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { + return arenas.back().alloc(size); + } + return nullptr; +} + +void LockedPool::free(void *ptr) +{ + std::lock_guard lock(mutex); + // TODO we can do better than this linear search by keeping a map of arena + // extents to arena, and looking up the address. + for (auto &arena: arenas) { + if (arena.addressInArena(ptr)) { + arena.free(ptr); + return; + } + } + throw std::runtime_error("LockedPool: invalid address not pointing to any arena"); +} + +LockedPool::Stats LockedPool::stats() const +{ + std::lock_guard lock(mutex); + LockedPool::Stats r; + r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; + r.locked = cumulative_bytes_locked; + for (const auto &arena: arenas) { + Arena::Stats i = arena.stats(); + r.used += i.used; + r.free += i.free; + r.total += i.total; + r.chunks_used += i.chunks_used; + r.chunks_free += i.chunks_free; + } + return r; +} + +bool LockedPool::new_arena(size_t size, size_t align) +{ + bool locked; + // If this is the first arena, handle this specially: Cap the upper size + // by the process limit. This makes sure that the first arena will at least + // be locked. An exception to this is if the process limit is 0: + // in this case no memory can be locked at all so we'll skip past this logic. + if (arenas.empty()) { + size_t limit = allocator->GetLimit(); + if (limit > 0) { + size = std::min(size, limit); + } + } + void *addr = allocator->AllocateLocked(size, &locked); + if (!addr) { + return false; + } + if (locked) { + cumulative_bytes_locked += size; + } else if (lf_cb) { // Call the locking-failed callback if locking failed + if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed. + allocator->FreeLocked(addr, size); + return false; + } + } + arenas.emplace_back(allocator.get(), addr, size, align); + return true; +} + +LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in): + Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) +{ +} +LockedPool::LockedPageArena::~LockedPageArena() +{ + allocator->FreeLocked(base, size); +} + +/*******************************************************************************/ +// Implementation: LockedPoolManager +// +LockedPoolManager::LockedPoolManager(std::unique_ptr allocator): + LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed) +{ +} + +bool LockedPoolManager::LockingFailed() +{ + // TODO: log something but how? without including util.h + return true; +} + +void LockedPoolManager::CreateInstance() +{ + // Using a local static instance guarantees that the object is initialized + // when it's first needed and also deinitialized after all objects that use + // it are done with it. I can think of one unlikely scenario where we may + // have a static deinitialization order/problem, but the check in + // LockedPoolManagerBase's destructor helps us detect if that ever happens. +#ifdef WIN32 + std::unique_ptr allocator(new Win32LockedPageAllocator()); +#else + std::unique_ptr allocator(new PosixLockedPageAllocator()); +#endif + static LockedPoolManager instance(std::move(allocator)); + LockedPoolManager::_instance = &instance; +} diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h new file mode 100644 index 0000000000000..526c17a73fe76 --- /dev/null +++ b/src/support/lockedpool.h @@ -0,0 +1,251 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H +#define BITCOIN_SUPPORT_LOCKEDPOOL_H + +#include +#include +#include +#include +#include + +/** + * OS-dependent allocation and deallocation of locked/pinned memory pages. + * Abstract base class. + */ +class LockedPageAllocator +{ +public: + virtual ~LockedPageAllocator() {} + /** Allocate and lock memory pages. + * If len is not a multiple of the system page size, it is rounded up. + * Returns 0 in case of allocation failure. + * + * If locking the memory pages could not be accomplished it will still + * return the memory, however the lockingSuccess flag will be false. + * lockingSuccess is undefined if the allocation fails. + */ + virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0; + + /** Unlock and free memory pages. + * Clear the memory before unlocking. + */ + virtual void FreeLocked(void* addr, size_t len) = 0; + + /** Get the total limit on the amount of memory that may be locked by this + * process, in bytes. Return size_t max if there is no limit or the limit + * is unknown. Return 0 if no memory can be locked at all. + */ + virtual size_t GetLimit() = 0; +}; + +/* An arena manages a contiguous region of memory by dividing it into + * chunks. + */ +class Arena +{ +public: + Arena(void *base, size_t size, size_t alignment); + virtual ~Arena(); + + /** A chunk of memory. + */ + struct Chunk + { + /** Most significant bit of size_t. This is used to mark + * in-usedness of chunk. + */ + const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1); + /** Maximum size of a chunk */ + const static size_t MAX_SIZE = SIZE_MSB - 1; + + Chunk(size_t size_in, bool used_in): + size(size_in | (used_in ? SIZE_MSB : 0)) {} + + bool isInUse() const { return size & SIZE_MSB; } + void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); } + size_t getSize() const { return size & ~SIZE_MSB; } + void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; } + private: + size_t size; + }; + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t chunks_used; + size_t chunks_free; + }; + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get arena usage statistics */ + Stats stats() const; + +#ifdef ARENA_DEBUG + void walk() const; +#endif + + /** Return whether a pointer points inside this arena. + * This returns base <= ptr < (base+size) so only use it for (inclusive) + * chunk starting addresses. + */ + bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } +private: + Arena(const Arena& other) = delete; // non construction-copyable + Arena& operator=(const Arena&) = delete; // non copyable + + /** Map of chunk address to chunk information. This class makes use of the + * sorted order to merge previous and next chunks during deallocation. + */ + std::map chunks; + /** Base address of arena */ + char* base; + /** End address of arena */ + char* end; + /** Minimum chunk alignment */ + size_t alignment; +}; + +/** Pool for locked memory chunks. + * + * To avoid sensitive key data from being swapped to disk, the memory in this pool + * is locked/pinned. + * + * An arena manages a contiguous region of memory. The pool starts out with one arena + * but can grow to multiple arenas if the need arises. + * + * Unlike a normal C heap, the administrative structures are seperate from the managed + * memory. This has been done as the sizes and bases of objects are not in themselves sensitive + * information, as to conserve precious locked memory. In some operating systems + * the amount of memory that can be locked is small. + */ +class LockedPool +{ +public: + /** Size of one arena of locked memory. This is a compromise. + * Do not set this too low, as managing many arenas will increase + * allocation and deallocation overhead. Setting it too high allocates + * more locked memory from the OS than strictly necessary. + */ + static const size_t ARENA_SIZE = 256*1024; + /** Chunk alignment. Another compromise. Setting this too high will waste + * memory, setting it too low will facilitate fragmentation. + */ + static const size_t ARENA_ALIGN = 16; + + /** Callback when allocation succeeds but locking fails. + */ + typedef bool (*LockingFailed_Callback)(); + + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t locked; + size_t chunks_used; + size_t chunks_free; + }; + + /** Create a new LockedPool. This takes ownership of the MemoryPageLocker, + * you can only instantiate this with LockedPool(std::move(...)). + * + * The second argument is an optional callback when locking a newly allocated arena failed. + * If this callback is provided and returns false, the allocation fails (hard fail), if + * it returns true the allocation proceeds, but it could warn. + */ + LockedPool(std::unique_ptr allocator, LockingFailed_Callback lf_cb_in = 0); + ~LockedPool(); + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get pool usage statistics */ + Stats stats() const; +private: + LockedPool(const LockedPool& other) = delete; // non construction-copyable + LockedPool& operator=(const LockedPool&) = delete; // non copyable + + std::unique_ptr allocator; + + /** Create an arena from locked pages */ + class LockedPageArena: public Arena + { + public: + LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); + ~LockedPageArena(); + private: + void *base; + size_t size; + LockedPageAllocator *allocator; + }; + + bool new_arena(size_t size, size_t align); + + std::list arenas; + LockingFailed_Callback lf_cb; + size_t cumulative_bytes_locked; + /** Mutex protects access to this pool's data structures, including arenas. + */ + mutable std::mutex mutex; +}; + +/** + * Singleton class to keep track of locked (ie, non-swappable) memory, for use in + * std::allocator templates. + * + * Some implementations of the STL allocate memory in some constructors (i.e., see + * MSVC's vector implementation where it allocates 1 byte of memory in the allocator.) + * Due to the unpredictable order of static initializers, we have to make sure the + * LockedPoolManager instance exists before any other STL-based objects that use + * secure_allocator are created. So instead of having LockedPoolManager also be + * static-initialized, it is created on demand. + */ +class LockedPoolManager : public LockedPool +{ +public: + /** Return the current instance, or create it once */ + static LockedPoolManager& Instance() + { + std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); + return *LockedPoolManager::_instance; + } + +private: + LockedPoolManager(std::unique_ptr allocator); + + /** Create a new LockedPoolManager specialized to the OS */ + static void CreateInstance(); + /** Called when locking fails, warn the user here */ + static bool LockingFailed(); + + static LockedPoolManager* _instance; + static std::once_flag init_flag; +}; + +#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H diff --git a/src/support/pagelocker.cpp b/src/support/pagelocker.cpp deleted file mode 100644 index 4514d08bb33fb..0000000000000 --- a/src/support/pagelocker.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2009-2013 The Bitcoin developers -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include "support/pagelocker.h" - -#if defined(HAVE_CONFIG_H) -#include "config/pivx-config.h" -#endif - -#ifdef WIN32 -#ifdef _WIN32_WINNT -#undef _WIN32_WINNT -#endif -#define _WIN32_WINNT 0x0501 -#define WIN32_LEAN_AND_MEAN 1 -#ifndef NOMINMAX -#define NOMINMAX -#endif -#include -// This is used to attempt to keep keying material out of swap -// Note that VirtualLock does not provide this as a guarantee on Windows, -// but, in practice, memory that has been VirtualLock'd almost never gets written to -// the pagefile except in rare circumstances where memory is extremely low. -#else -#include // for PAGESIZE -#include -#include // for sysconf -#endif - -LockedPageManager* LockedPageManager::_instance = NULL; -boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT; - -/** Determine system page size in bytes */ -static inline size_t GetSystemPageSize() -{ - size_t page_size; -#if defined(WIN32) - SYSTEM_INFO sSysInfo; - GetSystemInfo(&sSysInfo); - page_size = sSysInfo.dwPageSize; -#elif defined(PAGESIZE) // defined in limits.h - page_size = PAGESIZE; -#else // assume some POSIX OS - page_size = sysconf(_SC_PAGESIZE); -#endif - return page_size; -} - -bool MemoryPageLocker::Lock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualLock(const_cast(addr), len) != 0; -#else - return mlock(addr, len) == 0; -#endif -} - -bool MemoryPageLocker::Unlock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualUnlock(const_cast(addr), len) != 0; -#else - return munlock(addr, len) == 0; -#endif -} - -LockedPageManager::LockedPageManager() : LockedPageManagerBase(GetSystemPageSize()) -{ -} diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h deleted file mode 100644 index 17c75a77c7b4f..0000000000000 --- a/src/support/pagelocker.h +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2013 The Bitcoin developers -// Distributed under the MIT/X11 software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_ALLOCATORS_PAGELOCKER_H -#define BITCOIN_ALLOCATORS_PAGELOCKER_H - -#include "support/cleanse.h" - -#include - -#include -#include - -/** - * Thread-safe class to keep track of locked (ie, non-swappable) memory pages. - * - * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock() - * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when - * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page. - * - * @note By using a map from each page base address to lock count, this class is optimized for - * small objects that span up to a few pages, mostly smaller than a page. To support large allocations, - * something like an interval tree would be the preferred data structure. - */ -template -class LockedPageManagerBase -{ -public: - LockedPageManagerBase(size_t page_size) : page_size(page_size) - { - // Determine bitmask for extracting page from address - assert(!(page_size & (page_size - 1))); // size must be power of two - page_mask = ~(page_size - 1); - } - - ~LockedPageManagerBase() - { - } - - - // For all pages in affected range, increase lock count - void LockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - if (it == histogram.end()) // Newly locked page - { - locker.Lock(reinterpret_cast(page), page_size); - histogram.emplace(page, 1); - } else // Page was already locked; increase counter - { - it->second += 1; - } - } - } - - // For all pages in affected range, decrease lock count - void UnlockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - assert(it != histogram.end()); // Cannot unlock an area that was not locked - // Decrease counter for page, when it is zero, the page will be unlocked - it->second -= 1; - if (it->second == 0) // Nothing on the page anymore that keeps it locked - { - // Unlock page and remove the count from histogram - locker.Unlock(reinterpret_cast(page), page_size); - histogram.erase(it); - } - } - } - - // Get number of locked pages for diagnostics - int GetLockedPageCount() - { - boost::mutex::scoped_lock lock(mutex); - return histogram.size(); - } - -private: - Locker locker; - boost::mutex mutex; - size_t page_size, page_mask; - // map of page base address to lock count - typedef std::map Histogram; - Histogram histogram; -}; - - -/** - * OS-dependent memory page locking/unlocking. - * Defined as policy class to make stubbing for test possible. - */ -class MemoryPageLocker -{ -public: - /** Lock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Lock(const void* addr, size_t len); - /** Unlock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Unlock(const void* addr, size_t len); -}; - -/** - * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in - * std::allocator templates. - * - * Some implementations of the STL allocate memory in some constructors (i.e., see - * MSVC's vector implementation where it allocates 1 byte of memory in the allocator.) - * Due to the unpredictable order of static initializers, we have to make sure the - * LockedPageManager instance exists before any other STL-based objects that use - * secure_allocator are created. So instead of having LockedPageManager also be - * static-initialized, it is created on demand. - */ -class LockedPageManager : public LockedPageManagerBase -{ -public: - static LockedPageManager& Instance() - { - boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag); - return *LockedPageManager::_instance; - } - -private: - LockedPageManager(); - - static void CreateInstance() - { - // Using a local static instance guarantees that the object is initialized - // when it's first needed and also deinitialized after all objects that use - // it are done with it. I can think of one unlikely scenario where we may - // have a static deinitialization order/problem, but the check in - // LockedPageManagerBase's destructor helps us detect if that ever happens. - static LockedPageManager instance; - LockedPageManager::_instance = &instance; - } - - static LockedPageManager* _instance; - static boost::once_flag init_flag; -}; - -#endif // BITCOIN_ALLOCATORS_PAGELOCKER_H diff --git a/src/test/allocator_tests.cpp b/src/test/allocator_tests.cpp index 922cd7602476e..9976268ce9c2f 100644 --- a/src/test/allocator_tests.cpp +++ b/src/test/allocator_tests.cpp @@ -11,110 +11,214 @@ BOOST_FIXTURE_TEST_SUITE(allocator_tests, BasicTestingSetup) -// Dummy memory page locker for platform independent tests -static const void *last_lock_addr, *last_unlock_addr; -static size_t last_lock_len, last_unlock_len; -class TestLocker +BOOST_AUTO_TEST_CASE(arena_tests) { -public: - bool Lock(const void *addr, size_t len) + // Fake memory base address for testing + // without actually using memory. + void *synth_base = reinterpret_cast(0x08000000); + const size_t synth_size = 1024*1024; + Arena b(synth_base, synth_size, 16); + void *chunk = b.alloc(1000); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(chunk != nullptr); + BOOST_CHECK(b.stats().used == 1008); // Aligned to 16 + BOOST_CHECK(b.stats().total == synth_size); // Nothing has disappeared? + b.free(chunk); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(b.stats().used == 0); + BOOST_CHECK(b.stats().free == synth_size); + try { // Test exception on double-free + b.free(chunk); + BOOST_CHECK(0); + } catch(std::runtime_error &) { - last_lock_addr = addr; - last_lock_len = len; - return true; } - bool Unlock(const void *addr, size_t len) - { - last_unlock_addr = addr; - last_unlock_len = len; - return true; + + void *a0 = b.alloc(128); + BOOST_CHECK(a0 == synth_base); // first allocation must start at beginning + void *a1 = b.alloc(256); + void *a2 = b.alloc(512); + BOOST_CHECK(b.stats().used == 896); + BOOST_CHECK(b.stats().total == synth_size); +#ifdef ARENA_DEBUG + b.walk(); +#endif + b.free(a0); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(b.stats().used == 768); + b.free(a1); + BOOST_CHECK(b.stats().used == 512); + void *a3 = b.alloc(128); +#ifdef ARENA_DEBUG + b.walk(); +#endif + BOOST_CHECK(b.stats().used == 640); + b.free(a2); + BOOST_CHECK(b.stats().used == 128); + b.free(a3); + BOOST_CHECK(b.stats().used == 0); + BOOST_CHECK(b.stats().total == synth_size); + BOOST_CHECK(b.stats().free == synth_size); + + std::vector addr; + BOOST_CHECK(b.alloc(0) == nullptr); // allocating 0 always returns nullptr +#ifdef ARENA_DEBUG + b.walk(); +#endif + // Sweeping allocate all memory + for (int x=0; x<1024; ++x) + addr.push_back(b.alloc(1024)); + BOOST_CHECK(addr[0] == synth_base); // first allocation must start at beginning + BOOST_CHECK(b.stats().free == 0); + BOOST_CHECK(b.alloc(1024) == nullptr); // memory is full, this must return nullptr + BOOST_CHECK(b.alloc(0) == nullptr); + for (int x=0; x<1024; ++x) + b.free(addr[x]); + addr.clear(); + BOOST_CHECK(b.stats().total == synth_size); + BOOST_CHECK(b.stats().free == synth_size); + + // Now in the other direction... + for (int x=0; x<1024; ++x) + addr.push_back(b.alloc(1024)); + for (int x=0; x<1024; ++x) + b.free(addr[1023-x]); + addr.clear(); + + // Now allocate in smaller unequal chunks, then deallocate haphazardly + // Not all the chunks will succeed allocating, but freeing nullptr is + // allowed so that is no problem. + for (int x=0; x<2048; ++x) + addr.push_back(b.alloc(x+1)); + for (int x=0; x<2048; ++x) + b.free(addr[((x*23)%2048)^242]); + addr.clear(); + + // Go entirely wild: free and alloc interleaved, + // generate targets and sizes using pseudo-randomness. + for (int x=0; x<2048; ++x) + addr.push_back(0); + uint32_t s = 0x12345678; + for (int x=0; x<5000; ++x) { + int idx = s & (addr.size()-1); + if (s & 0x80000000) { + b.free(addr[idx]); + addr[idx] = 0; + } else if(!addr[idx]) { + addr[idx] = b.alloc((s >> 16) & 2047); + } + bool lsb = s & 1; + s >>= 1; + if (lsb) + s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 } -}; + for (void *ptr: addr) + b.free(ptr); + addr.clear(); -BOOST_AUTO_TEST_CASE(test_LockedPageManagerBase) + BOOST_CHECK(b.stats().total == synth_size); + BOOST_CHECK(b.stats().free == synth_size); +} + +/** Mock LockedPageAllocator for testing */ +class TestLockedPageAllocator: public LockedPageAllocator { - const size_t test_page_size = 4096; - LockedPageManagerBase lpm(test_page_size); - size_t addr; - last_lock_addr = last_unlock_addr = 0; - last_lock_len = last_unlock_len = 0; - - /* Try large number of small objects */ - addr = 0; - for(int i=0; i<1000; ++i) - { - lpm.LockRange(reinterpret_cast(addr), 33); - addr += 33; - } - /* Try small number of page-sized objects, straddling two pages */ - addr = test_page_size*100 + 53; - for(int i=0; i<100; ++i) - { - lpm.LockRange(reinterpret_cast(addr), test_page_size); - addr += test_page_size; - } - /* Try small number of page-sized objects aligned to exactly one page */ - addr = test_page_size*300; - for(int i=0; i<100; ++i) - { - lpm.LockRange(reinterpret_cast(addr), test_page_size); - addr += test_page_size; - } - /* one very large object, straddling pages */ - lpm.LockRange(reinterpret_cast(test_page_size*600+1), test_page_size*500); - BOOST_CHECK(last_lock_addr == reinterpret_cast(test_page_size*(600+500))); - /* one very large object, page aligned */ - lpm.LockRange(reinterpret_cast(test_page_size*1200), test_page_size*500-1); - BOOST_CHECK(last_lock_addr == reinterpret_cast(test_page_size*(1200+500-1))); - - BOOST_CHECK(lpm.GetLockedPageCount() == ( - (1000*33+test_page_size-1)/test_page_size + // small objects - 101 + 100 + // page-sized objects - 501 + 500)); // large objects - BOOST_CHECK((last_lock_len & (test_page_size-1)) == 0); // always lock entire pages - BOOST_CHECK(last_unlock_len == 0); // nothing unlocked yet - - /* And unlock again */ - addr = 0; - for(int i=0; i<1000; ++i) +public: + TestLockedPageAllocator(int count_in, int lockedcount_in): count(count_in), lockedcount(lockedcount_in) {} + void* AllocateLocked(size_t len, bool *lockingSuccess) { - lpm.UnlockRange(reinterpret_cast(addr), 33); - addr += 33; + *lockingSuccess = false; + if (count > 0) { + --count; + + if (lockedcount > 0) { + --lockedcount; + *lockingSuccess = true; + } + + return reinterpret_cast(0x08000000 + (count<<24)); // Fake address, do not actually use this memory + } + return 0; } - addr = test_page_size*100 + 53; - for(int i=0; i<100; ++i) + void FreeLocked(void* addr, size_t len) { - lpm.UnlockRange(reinterpret_cast(addr), test_page_size); - addr += test_page_size; } - addr = test_page_size*300; - for(int i=0; i<100; ++i) + size_t GetLimit() { - lpm.UnlockRange(reinterpret_cast(addr), test_page_size); - addr += test_page_size; + return std::numeric_limits::max(); } - lpm.UnlockRange(reinterpret_cast(test_page_size*600+1), test_page_size*500); - lpm.UnlockRange(reinterpret_cast(test_page_size*1200), test_page_size*500-1); +private: + int count; + int lockedcount; +}; - /* Check that everything is released */ - BOOST_CHECK(lpm.GetLockedPageCount() == 0); +BOOST_AUTO_TEST_CASE(lockedpool_tests_mock) +{ + // Test over three virtual arenas, of which one will succeed being locked + std::unique_ptr x(new TestLockedPageAllocator(3, 1)); + LockedPool pool(std::move(x)); + BOOST_CHECK(pool.stats().total == 0); + BOOST_CHECK(pool.stats().locked == 0); - /* A few and unlocks of size zero (should have no effect) */ - addr = 0; - for(int i=0; i<1000; ++i) - { - lpm.LockRange(reinterpret_cast(addr), 0); - addr += 1; - } - BOOST_CHECK(lpm.GetLockedPageCount() == 0); - addr = 0; - for(int i=0; i<1000; ++i) + void *a0 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a0); + BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE); + void *a1 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a1); + void *a2 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a2); + void *a3 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a3); + void *a4 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a4); + void *a5 = pool.alloc(LockedPool::ARENA_SIZE / 2); + BOOST_CHECK(a5); + // We've passed a count of three arenas, so this allocation should fail + void *a6 = pool.alloc(16); + BOOST_CHECK(!a6); + + pool.free(a0); + pool.free(a2); + pool.free(a4); + pool.free(a1); + pool.free(a3); + pool.free(a5); + BOOST_CHECK(pool.stats().total == 3*LockedPool::ARENA_SIZE); + BOOST_CHECK(pool.stats().locked == LockedPool::ARENA_SIZE); + BOOST_CHECK(pool.stats().used == 0); +} + +// These tests used the live LockedPoolManager object, this is also used +// by other tests so the conditions are somewhat less controllable and thus the +// tests are somewhat more error-prone. +BOOST_AUTO_TEST_CASE(lockedpool_tests_live) +{ + LockedPoolManager &pool = LockedPoolManager::Instance(); + LockedPool::Stats initial = pool.stats(); + + void *a0 = pool.alloc(16); + BOOST_CHECK(a0); + // Test reading and writing the allocated memory + *((uint32_t*)a0) = 0x1234; + BOOST_CHECK(*((uint32_t*)a0) == 0x1234); + + pool.free(a0); + try { // Test exception on double-free + pool.free(a0); + BOOST_CHECK(0); + } catch(std::runtime_error &) { - lpm.UnlockRange(reinterpret_cast(addr), 0); - addr += 1; } - BOOST_CHECK(lpm.GetLockedPageCount() == 0); - BOOST_CHECK((last_unlock_len & (test_page_size-1)) == 0); // always unlock entire pages + // If more than one new arena was allocated for the above tests, something is wrong + BOOST_CHECK(pool.stats().total <= (initial.total + LockedPool::ARENA_SIZE)); + // Usage must be back to where it started + BOOST_CHECK(pool.stats().used == initial.used); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/utilstrencodings.h b/src/utilstrencodings.h index 50ace81454251..dc4fca797af98 100644 --- a/src/utilstrencodings.h +++ b/src/utilstrencodings.h @@ -11,6 +11,7 @@ #define BITCOIN_UTILSTRENCODINGS_H #include "support/allocators/secure.h" +#include #include #include #include From cd34088850170c784ff66699dbffa251636927e8 Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Sun, 18 Sep 2016 10:22:30 +0200 Subject: [PATCH 3/5] rpc: Add `getmemoryinfo` call >>> backports bitcoin/bitcoin@65679990967c0fa4fc0320907d9cd33c801faf5a ``` getmemoryinfo Returns an object containing information about memory usage. Result: { "locked": { (json object) Information about locked memory manager "used": xxxxx, (numeric) Number of bytes used "free": xxxxx, (numeric) Number of bytes available in current arenas "total": xxxxxxx, (numeric) Total number of bytes managed "locked": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk. } } Examples: > bitcoin-cli getmemoryinfo > curl --user myusername --data-binary '{"jsonrpc": "1.0", "id":"curltest", "method": "getmemoryinfo", "params": [] }' -H 'content-type: text/plain;' http://127.0.0.1:8332/ ``` --- src/rpc/misc.cpp | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index 0033c334c04bc..1818e09161523 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -700,12 +700,55 @@ UniValue logging(const JSONRPCRequest& request) return result; } +static UniValue RPCLockedMemoryInfo() +{ + LockedPool::Stats stats = LockedPoolManager::Instance().stats(); + UniValue obj(UniValue::VOBJ); + obj.pushKV("used", uint64_t(stats.used)); + obj.pushKV("free", uint64_t(stats.free)); + obj.pushKV("total", uint64_t(stats.total)); + obj.pushKV("locked", uint64_t(stats.locked)); + obj.pushKV("chunks_used", uint64_t(stats.chunks_used)); + obj.pushKV("chunks_free", uint64_t(stats.chunks_free)); + return obj; +} + +UniValue getmemoryinfo(const JSONRPCRequest& request) +{ + /* Please, avoid using the word "pool" here in the RPC interface or help, + * as users will undoubtedly confuse it with the other "memory pool" + */ + if (request.fHelp || request.params.size() != 0) + throw std::runtime_error( + "getmemoryinfo\n" + "Returns an object containing information about memory usage.\n" + "\nResult:\n" + "{\n" + " \"locked\": { (json object) Information about locked memory manager\n" + " \"used\": xxxxx, (numeric) Number of bytes used\n" + " \"free\": xxxxx, (numeric) Number of bytes available in current arenas\n" + " \"total\": xxxxxxx, (numeric) Total number of bytes managed\n" + " \"locked\": xxxxxx, (numeric) Amount of bytes that succeeded locking. If this number is smaller than total, locking pages failed at some point and key data could be swapped to disk.\n" + " \"chunks_used\": xxxxx, (numeric) Number allocated chunks\n" + " \"chunks_free\": xxxxx, (numeric) Number unused chunks\n" + " }\n" + "}\n" + "\nExamples:\n" + + HelpExampleCli("getmemoryinfo", "") + + HelpExampleRpc("getmemoryinfo", "") + ); + UniValue obj(UniValue::VOBJ); + obj.pushKV("locked", RPCLockedMemoryInfo()); + return obj; +} + static const CRPCCommand commands[] = { // category name actor (function) okSafeMode // --------------------- ------------------------ ----------------------- ---------- { "control", "getinfo", &getinfo, true }, /* uses wallet if enabled */ { "control", "mnsync", &mnsync, true }, { "control", "spork", &spork, true }, + { "control", "getmemoryinfo", &getmemoryinfo, true }, { "util", "validateaddress", &validateaddress, true }, /* uses wallet if enabled */ { "util", "createmultisig", &createmultisig, true }, { "util", "logging", &logging, true }, From 68a3f8f665cb47c039317fdde8f46d529bb98ca7 Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Tue, 18 Oct 2016 17:49:02 +0200 Subject: [PATCH 4/5] bench: Add benchmark for lockedpool allocation/deallocation --- src/Makefile.bench.include | 3 ++- src/bench/lockedpool.cpp | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 1 deletion(-) create mode 100644 src/bench/lockedpool.cpp diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include index e55e80646146e..4c450080319e7 100644 --- a/src/Makefile.bench.include +++ b/src/Makefile.bench.include @@ -11,11 +11,12 @@ bench_bench_pivx_SOURCES = \ bench/bench_pivx.cpp \ bench/bench.cpp \ bench/bench.h \ - bench/checkblock.cpp \ bench/Examples.cpp \ bench/base58.cpp \ + bench/checkblock.cpp \ bench/checkqueue.cpp \ bench/crypto_hash.cpp \ + bench/lockedpool.cpp \ bench/perf.cpp \ bench/perf.h \ bench/prevector.cpp diff --git a/src/bench/lockedpool.cpp b/src/bench/lockedpool.cpp new file mode 100644 index 0000000000000..5df5b1ac6e640 --- /dev/null +++ b/src/bench/lockedpool.cpp @@ -0,0 +1,47 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "bench.h" + +#include "support/lockedpool.h" + +#include +#include + +#define ASIZE 2048 +#define BITER 5000 +#define MSIZE 2048 + +static void LockedPool(benchmark::State& state) +{ + void *synth_base = reinterpret_cast(0x08000000); + const size_t synth_size = 1024*1024; + Arena b(synth_base, synth_size, 16); + + std::vector addr; + for (int x=0; x> 16) & (MSIZE-1)); + } + bool lsb = s & 1; + s >>= 1; + if (lsb) + s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0 + } + } + for (void *ptr: addr) + b.free(ptr); + addr.clear(); +} + +BENCHMARK(LockedPool); + From 1c04a3500848fb3c7513cd352b7422b4b131f42d Mon Sep 17 00:00:00 2001 From: random-zebra Date: Sun, 4 Apr 2021 09:17:24 +0200 Subject: [PATCH 5/5] [Trivial] Use C++11 nulltpr instead of NULL macro in allocators files --- src/support/allocators/secure.h | 2 +- src/support/allocators/zeroafterfree.h | 4 ++-- src/support/lockedpool.cpp | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index f88bef9c681d5..274a8661ad54d 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -46,7 +46,7 @@ struct secure_allocator : public std::allocator { void deallocate(T* p, std::size_t n) { - if (p != NULL) { + if (p != nullptr) { memory_cleanse(p, sizeof(T) * n); } LockedPoolManager::Instance().free(p); diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h index 9a65517ededfe..12aafe0a18309 100644 --- a/src/support/allocators/zeroafterfree.h +++ b/src/support/allocators/zeroafterfree.h @@ -36,7 +36,7 @@ struct zero_after_free_allocator : public std::allocator { void deallocate(T* p, std::size_t n) { - if (p != NULL) + if (p != nullptr) memory_cleanse(p, sizeof(T) * n); std::allocator::deallocate(p, n); } @@ -45,4 +45,4 @@ struct zero_after_free_allocator : public std::allocator { // Byte-vector that clears its contents before deletion. typedef std::vector > CSerializeData; -#endif // BITCOIN_ALLOCATORS_ZEROAFTERFREE_H \ No newline at end of file +#endif // BITCOIN_ALLOCATORS_ZEROAFTERFREE_H diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 583ab3f7369aa..f75be1e274a70 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -26,7 +26,7 @@ #include // for sysconf #endif -LockedPoolManager* LockedPoolManager::_instance = NULL; +LockedPoolManager* LockedPoolManager::_instance = nullptr; std::once_flag LockedPoolManager::init_flag; /*******************************************************************************/ @@ -79,7 +79,7 @@ void* Arena::alloc(size_t size) void Arena::free(void *ptr) { - // Freeing the NULL pointer is OK. + // Freeing the nullptr pointer is OK. if (ptr == nullptr) { return; }