Skip to content

Commit

Permalink
Merge pull request #675 from evoskuil/master
Browse files Browse the repository at this point in the history
Implement linked-linear memory arena.
  • Loading branch information
evoskuil committed Aug 20, 2024
2 parents ee3cd1c + 61403ae commit a138508
Show file tree
Hide file tree
Showing 10 changed files with 178 additions and 96 deletions.
61 changes: 47 additions & 14 deletions include/bitcoin/node/block_arena.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,38 +26,71 @@
namespace libbitcoin {
namespace node {

/// Thread UNSAFE linear memory arena.
/// Caller must manage capacity to ensure buffer is not overflowed.
/// Thread UNSAFE linked-linear memory arena.
class BCN_API block_arena final
: public arena
{
public:
DELETE_COPY(block_arena);

block_arena(size_t size=zero) NOEXCEPT;
block_arena(size_t multiple) NOEXCEPT;
block_arena(block_arena&& other) NOEXCEPT;
~block_arena() NOEXCEPT;

block_arena& operator=(block_arena&& other) NOEXCEPT;

void* start() NOEXCEPT override;
size_t detach() NOEXCEPT override;
void release(void* ptr, size_t bytes) NOEXCEPT override;
/// Start an allocation of linked chunks.
void* start(size_t wire_size) THROWS override;

/// Finalize allocation and reset allocator, return total allocation.
size_t detach() THROWS override;

/// Release all chunks chained to the address.
void release(void* address) NOEXCEPT override;

protected:
struct record{ void* next; size_t size; };

/// Link a memory chunk to the allocated list.
void* link_new_chunk(size_t minimum=zero) THROWS;

/// Trim chunk to offset_, invalidates capacity.
void trim_to_offset() THROWS;

/// Close out chunk with link to next.
void set_record(uint8_t* next_address, size_t own_size) NOEXCEPT;

/// Get size of address chunk and address of next chunk (or nullptr).
record get_record(uint8_t* address) const NOEXCEPT;

/// Number of bytes remaining to be allocated.
size_t capacity() const NOEXCEPT;

/// Reset members (does not free).
size_t reset(size_t chunk_size=zero) NOEXCEPT;

private:
static constexpr size_t record_size = sizeof(record);
constexpr size_t to_aligned(size_t value, size_t align) NOEXCEPT
{
using namespace system;
BC_ASSERT_MSG(is_nonzero(align), "align zero");
BC_ASSERT_MSG(!is_add_overflow(value, sub1(align)), "overflow");
BC_ASSERT_MSG(power2(floored_log2(align)) == align, "align power");
BC_ASSERT_MSG(align <= alignof(std::max_align_t), "align overflow");
return (value + sub1(align)) & ~sub1(align);
}

void* do_allocate(size_t bytes, size_t align) THROWS override;
void do_deallocate(void* ptr, size_t bytes, size_t align) NOEXCEPT override;
bool do_is_equal(const arena& other) const NOEXCEPT override;

// Number of bytes remaining to be allocated.
size_t capacity() const NOEXCEPT;

// These are thread safe (set only construct).
uint8_t* memory_map_{ nullptr };
size_t size_;

// This is unprotected, caller must guard.
// These are unprotected, caller must guard.
uint8_t* memory_map_;
size_t multiple_;
size_t offset_;
size_t total_;
size_t size_;

};

Expand Down
8 changes: 4 additions & 4 deletions include/bitcoin/node/block_memory.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,17 +27,17 @@
namespace libbitcoin {
namespace node {

/// Thread SAFE linear memory allocation and tracking.
/// Thread SAFE linked-linear arena allocator.
class BCN_API block_memory final
: public network::memory
{
public:
DELETE_COPY_MOVE_DESTRUCT(block_memory);

/// Default allocate each arena to preclude allcation and locking.
block_memory(size_t bytes, size_t threads) NOEXCEPT;
/// Per thread multiple of wire size for each linear allocation chunk.
block_memory(size_t multiple, size_t threads) NOEXCEPT;

/// Each thread obtains an arena of the same size.
/// Each thread obtains an arena.
arena* get_arena() NOEXCEPT override;

protected:
Expand Down
3 changes: 1 addition & 2 deletions include/bitcoin/node/settings.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class BCN_API settings
/// Properties.
bool headers_first;
float allowed_deviation;
uint64_t allocation_bytes;
uint16_t allocation_multiple;
uint64_t snapshot_bytes;
uint32_t snapshot_valid;
uint32_t maximum_height;
Expand All @@ -84,7 +84,6 @@ class BCN_API settings
uint32_t threads;

/// Helpers.
virtual size_t allocation() const NOEXCEPT;
virtual size_t maximum_height_() const NOEXCEPT;
virtual size_t maximum_concurrency_() const NOEXCEPT;
virtual network::steady_clock::duration sample_period() const NOEXCEPT;
Expand Down
168 changes: 111 additions & 57 deletions src/block_arena.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -18,112 +18,172 @@
*/
#include <bitcoin/node/block_arena.hpp>

#include <algorithm>
#include <shared_mutex>
#include <bitcoin/system.hpp>

namespace libbitcoin {

BC_DEBUG_ONLY(constexpr auto max_align = alignof(std::max_align_t);)

template <typename Type, if_unsigned_integer<Type> = true>
constexpr Type to_aligned(Type value, Type alignment) NOEXCEPT
{
return (value + sub1(alignment)) & ~sub1(alignment);
}

namespace node {

using namespace system;

BC_PUSH_WARNING(NO_MALLOC_OR_FREE)
BC_PUSH_WARNING(NO_REINTERPRET_CAST)
BC_PUSH_WARNING(NO_POINTER_ARITHMETIC)
BC_PUSH_WARNING(THROW_FROM_NOEXCEPT)

// "If size is zero, the behavior of malloc is implementation-defined. For
// example, a null pointer may be returned. Alternatively, a non-null pointer
// may be returned; but such a pointer should not be dereferenced, and should
// be passed to free to avoid memory leaks."
// en.cppreference.com/w/c/memory/malloc
// construct/destruct/assign
// ----------------------------------------------------------------------------

block_arena::block_arena(size_t size) NOEXCEPT
: size_{ size },
offset_{ size }
block_arena::block_arena(size_t multiple) NOEXCEPT
: memory_map_{ nullptr },
multiple_{ multiple },
offset_{ zero },
total_{ zero },
size_{ zero }
{
}

block_arena::block_arena(block_arena&& other) NOEXCEPT
: memory_map_{ other.memory_map_ },
size_{ other.size_ },
offset_{ other.offset_ }
multiple_{ other.multiple_ },
offset_{ other.offset_ },
total_{ other.total_ },
size_{ other.size_ }
{
// Prevents free(memory_map_) as responsibility is passed to this object.
other.memory_map_ = nullptr;
}

block_arena::~block_arena() NOEXCEPT
{
release(memory_map_, offset_);
release(memory_map_);
}

block_arena& block_arena::operator=(block_arena&& other) NOEXCEPT
{
memory_map_ = other.memory_map_;
size_ = other.size_;
multiple_ = other.multiple_;
offset_ = other.offset_;
total_ = other.total_;
size_ = other.size_;

// Prevents free(memory_map_) as responsibility is passed to this object.
other.memory_map_ = nullptr;
return *this;
}

void* block_arena::start() NOEXCEPT
// public
// ----------------------------------------------------------------------------

void* block_arena::start(size_t wire_size) THROWS
{
release(memory_map_, offset_);
memory_map_ = system::pointer_cast<uint8_t>(std::malloc(size_));
if (is_null(memory_map_))
if (is_multiply_overflow(wire_size, multiple_))
throw allocation_exception{};

offset_ = zero;
return memory_map_;
release(memory_map_);
reset(multiple_ * wire_size);
return link_new_chunk();
}

size_t block_arena::detach() NOEXCEPT
size_t block_arena::detach() THROWS
{
const auto size = offset_;
const auto map = std::realloc(memory_map_, size);
trim_to_offset();
set_record(nullptr, offset_);
return reset();
}

// Memory map must not move.
if (map != memory_map_)
void block_arena::release(void* address) NOEXCEPT
{
while (!is_null(address))
{
const auto value = get_record(pointer_cast<uint8_t>(address));
std::free(address/*, value.size */);
address = value.next;
}
}

// protected
// ----------------------------------------------------------------------------

void* block_arena::link_new_chunk(size_t minimum) THROWS
{
// Ensure next allocation accomodates record plus current request.
BC_ASSERT(!is_add_overflow(minimum, record_size));
size_ = std::max(size_, minimum + record_size);

// Allocate size to temporary.
const auto map = pointer_cast<uint8_t>(std::malloc(size_));
if (is_null(map))
throw allocation_exception{};

memory_map_ = nullptr;
offset_ = size_;
return size;
// Set previous chunk record pointer to new allocation and own size.
set_record(map, offset_);
offset_ = record_size;
return memory_map_ = map;
}

void block_arena::release(void* ptr, size_t) NOEXCEPT
void block_arena::trim_to_offset() THROWS
{
// Does not affect member state.
if (!is_null(ptr))
std::free(ptr);
// Memory map must not move. Move by realloc is allowed but not expected
// for truncation. If moves then this should drop into mmap/munmap/mremap.
////const auto map = std::realloc(memory_map_, offset_);
////if (map != memory_map_)
//// throw allocation_exception{};
}

void* block_arena::do_allocate(size_t bytes, size_t align) THROWS
void block_arena::set_record(uint8_t* next_address, size_t own_size) NOEXCEPT
{
// Don't set previous when current is the first chunk.
if (is_null(memory_map_))
return;

reinterpret_cast<record&>(*memory_map_) = { next_address, own_size };
total_ += own_size;
}

block_arena::record block_arena::get_record(uint8_t* address) const NOEXCEPT
{
return reinterpret_cast<const record&>(*address);
}

size_t block_arena::capacity() const NOEXCEPT
{
using namespace system;
BC_ASSERT_MSG(is_nonzero(align), "align zero");
BC_ASSERT_MSG(align <= max_align, "align overflow");
BC_ASSERT_MSG(power2(floored_log2(align)) == align, "align power");
BC_ASSERT_MSG(!is_add_overflow(bytes, sub1(align)), "alignment overflow");
return floored_subtract(size_, offset_);
}

size_t block_arena::reset(size_t chunk_size) NOEXCEPT
{
// Chunk resets to nullptr/full with no total allocation.
const auto total = total_;
memory_map_ = nullptr;
offset_ = chunk_size;
size_ = chunk_size;
total_ = zero;
return total;
}

// protected interface
// ----------------------------------------------------------------------------

void* block_arena::do_allocate(size_t bytes, size_t align) THROWS
{
const auto aligned_offset = to_aligned(offset_, align);
const auto padding = aligned_offset - offset_;

BC_ASSERT(!system::is_add_overflow(padding, bytes));
const auto allocation = padding + bytes;

////BC_ASSERT_MSG(allocation <= capacity(), "buffer overflow");
if (allocation > capacity())
throw allocation_exception{};

offset_ += allocation;
return memory_map_ + aligned_offset;
{
trim_to_offset();
link_new_chunk(allocation);
return do_allocate(bytes, align);
}
else
{
offset_ += allocation;
return memory_map_ + aligned_offset;
}
}

void block_arena::do_deallocate(void*, size_t, size_t) NOEXCEPT
Expand All @@ -136,12 +196,6 @@ bool block_arena::do_is_equal(const arena& other) const NOEXCEPT
return &other == this;
}

// private
size_t block_arena::capacity() const NOEXCEPT
{
return system::floored_subtract(size_, offset_);
}

BC_POP_WARNING()
BC_POP_WARNING()
BC_POP_WARNING()
Expand Down
4 changes: 2 additions & 2 deletions src/block_memory.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,12 @@ namespace node {

BC_PUSH_WARNING(NO_THROW_IN_NOEXCEPT)

block_memory::block_memory(size_t bytes, size_t threads) NOEXCEPT
block_memory::block_memory(size_t multiple, size_t threads) NOEXCEPT
: count_{}, arenas_{}
{
arenas_.reserve(threads);
for (auto index = zero; index < threads; ++index)
arenas_.emplace_back(bytes);
arenas_.emplace_back(multiple);
}

arena* block_memory::get_arena() NOEXCEPT
Expand Down
3 changes: 2 additions & 1 deletion src/full_node.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,8 @@ full_node::full_node(query& query, const configuration& configuration,
: p2p(configuration.network, log),
config_(configuration),
query_(query),
memory_(configuration.node.allocation(), configuration.network.threads),
memory_(configuration.node.allocation_multiple,
configuration.network.threads),
chaser_block_(*this),
chaser_header_(*this),
chaser_check_(*this),
Expand Down
6 changes: 3 additions & 3 deletions src/parser.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -898,9 +898,9 @@ options_metadata parser::load_settings() THROWS
"Allowable underperformance standard deviation, defaults to 1.5 (0 disables)."
)
(
"node.allocation_bytes",
value<uint64_t>(&configured.node.allocation_bytes),
"Preallocated block buffer for each network thread, defaults to 52000000."
"node.allocation_multiple",
value<uint16_t>(&configured.node.allocation_multiple),
"Per thread block deserialization buffer multiple of wire size, defaults to 20."
)
(
"node.maximum_height",
Expand Down
Loading

0 comments on commit a138508

Please sign in to comment.