#pragma once #include "types.h" #include "mutex.h" #include "cond.h" #include "util/atomic.hpp" #include "util/typeindices.hpp" #include "VirtualMemory.h" #include namespace utils { class typemap; template class typeptr; class typeptr_base; // Special tag for typemap access: request free id constexpr struct id_new_t{} id_new{}; // Special tag for typemap access: unconditionally access the only object (max_count = 1 only) constexpr struct id_any_t{} id_any{}; // Special tag for typemap access: like id_any but also default-construct the object if not exists constexpr struct id_always_t{} id_always{}; // Aggregate with information for more accurate object retrieval, isn't accepted internally struct weak_typeptr { uint id; uint type; // Stamp isn't automatically stored and checked anywhere ullong stamp; }; // Detect id transformation trait (multiplier) template struct typeinfo_step { static constexpr uint step = 1; }; template struct typeinfo_step::id_step)>> { static constexpr uint step = uint{std::decay_t::id_step}; }; // Detect id transformation trait (addend) template struct typeinfo_bias { static constexpr uint bias = 0; }; template struct typeinfo_bias::id_base)>> { static constexpr uint bias = uint{std::decay_t::id_base}; }; // Detect max number of objects, default = 1 template struct typeinfo_count { static constexpr uint max_count = 1; }; template struct typeinfo_count::id_count)>> { static constexpr uint max_count = uint{std::decay_t::id_count}; static_assert(ullong{max_count} * typeinfo_step::step <= 0x1'0000'0000ull); }; // Detect operator -> template struct typeinfo_pointer { static constexpr bool is_ptr = false; }; template struct typeinfo_pointer::operator->)>> { static constexpr bool is_ptr = true; }; // Type information struct typeinfo_base { uint size = 0; uint align = 0; uint count = 0; void(*clean)(class typemap_block*) = 0; constexpr typeinfo_base() noexcept = default; template static void call_destructor(typemap_block* ptr) noexcept; template static constexpr typeinfo_base make_typeinfo() noexcept { static_assert(alignof(T) < 4096); typeinfo_base r; r.size = uint{sizeof(T)}; r.align = uint{alignof(T)}; r.count = typeinfo_count::max_count; r.clean = &call_destructor; return r; } }; // Internal, control block for a particular object class typemap_block { friend typemap; template friend class typeptr; friend class typeptr_base; shared_mutex m_mutex; atomic_t m_type; public: typemap_block() = default; // Get pointer to the object of type T, with respect to alignment template T* get_ptr() { constexpr uint offset = alignof(T) < SelfSize ? ::align(SelfSize, alignof(T)) : alignof(T); return reinterpret_cast(reinterpret_cast(this) + offset); } }; static_assert(std::is_standard_layout_v); static_assert(sizeof(typemap_block) == 8); template void typeinfo_base::call_destructor(typemap_block* ptr) noexcept { ptr->get_ptr()->~T(); } // An object of type T paired with atomic refcounter template class refctr final { atomic_t m_ref{1}; public: T object; template refctr(Args&&... args) : object(std::forward(args)...) { } void add_ref() noexcept { m_ref++; } std::size_t remove_ref() noexcept { return --m_ref; } }; // Simplified "shared" ptr making use of refctr class template class refptr final { refctr* m_ptr = nullptr; void destroy() { if (m_ptr && !m_ptr->remove_ref()) delete m_ptr; } public: constexpr refptr() = default; // Construct directly from refctr pointer explicit refptr(refctr* ptr) noexcept : m_ptr(ptr) { } refptr(const refptr& rhs) noexcept : m_ptr(rhs.m_ptr) { if (m_ptr) m_ptr->add_ref(); } refptr(refptr&& rhs) noexcept : m_ptr(rhs.m_ptr) { rhs.m_ptr = nullptr; } ~refptr() { destroy(); } refptr& operator =(const refptr& rhs) noexcept { destroy(); m_ptr = rhs.m_ptr; if (m_ptr) m_ptr->add_ref(); } refptr& operator =(refptr&& rhs) noexcept { std::swap(m_ptr, rhs.m_ptr); } void reset() noexcept { destroy(); m_ptr = nullptr; } refctr* release() noexcept { return std::exchange(m_ptr, nullptr); } void swap(refptr&& rhs) noexcept { std::swap(m_ptr, rhs.m_ptr); } refctr* get() const noexcept { return m_ptr; } T& operator *() const noexcept { return m_ptr->object; } T* operator ->() const noexcept { return &m_ptr->object; } explicit operator bool() const noexcept { return !!m_ptr; } }; // Internal, typemap control block for a particular type struct alignas(64) typemap_head { // Pointer to the uninitialized storage uchar* m_ptr = nullptr; // Free ID counter atomic_t m_sema{0}; // Max ID ever used + 1 atomic_t m_limit{0}; // Increased on each constructor call atomic_t m_create_count{0}; // Increased on each destructor call atomic_t m_destroy_count{0}; // Aligned size of the storage for each object uint m_ssize = 0; // Total object count in the storage uint m_count = 0; // Destructor caller; related to particular type, not the current storage void(*clean)(typemap_block*) = 0; }; class typeptr_base { typemap_head* m_head; typemap_block* m_block; template friend class typeptr; friend typemap; }; // Pointer + lock object, possible states: // 1) Invalid - bad id, no space, or after release() // 2) Null - locked, but the object does not exist // 3) OK - locked and the object exists template class typeptr : typeptr_base { using typeptr_base::m_head; using typeptr_base::m_block; friend typemap; void release() { if constexpr (type_const() && type_volatile()) { } else if constexpr (type_const() || type_volatile()) { m_block->m_mutex.unlock_shared(); } else { m_block->m_mutex.unlock(); } if (m_block->m_type == 0) { if constexpr (typeinfo_count::max_count > 1) { // Return semaphore m_head->m_sema--; } } } public: constexpr typeptr(typeptr_base base) noexcept : typeptr_base(base) { } typeptr(const typeptr&) = delete; typeptr& operator=(const typeptr&) = delete; ~typeptr() { if (m_block) { release(); } } // Verify the object exists bool exists() const noexcept { return m_block->m_type != 0; } // Verify the state is valid explicit operator bool() const noexcept { return m_block != nullptr; } // Get the pointer to the existing object template > auto get() const noexcept { ASSUME(m_block->m_type != 0); return m_block->get_ptr(); } auto operator->() const noexcept { // Invoke object's operator -> if available if constexpr (typeinfo_pointer::is_ptr) { return get()->operator->(); } else { return get(); } } // Release the lock and set invalid state void unlock() { if (m_block) { release(); m_block = nullptr; } } // Call the constructor, return the stamp template , typename... Args> ullong create(Args&&... args) { static_assert(!type_const()); static_assert(!type_volatile()); const ullong result = ++m_head->m_create_count; if constexpr (typeinfo_count::max_count > 1) { // Update hints only if the object is not being recreated if (!m_block->m_type) { const uint this_id = this->get_id(); // Update max count m_head->m_limit.fetch_op([this_id](uint& limit) { if (limit <= this_id) { limit = this_id + 1; return true; } return false; }); } } if constexpr (true) { static_assert(std::is_same_v); // Set type; zero value shall not be observed in the case of recreation if (m_block->m_type.exchange(1) != 0) { // Destroy object if it exists m_block->get_ptr()->~T(); m_head->m_destroy_count++; } new (m_block->get_ptr()) New(std::forward(args)...); } return result; } // Call the destructor if object exists void destroy() noexcept { static_assert(!type_const()); if (!m_block->m_type.exchange(0)) { return; } m_block->get_ptr()->~T(); m_head->m_destroy_count++; } // Get the ID uint get_id() const { // It's not often needed so figure it out instead of storing it const std::size_t diff = reinterpret_cast(m_block) - m_head->m_ptr; const std::size_t quot = diff / m_head->m_ssize; if (diff % m_head->m_ssize || quot > typeinfo_count::max_count) { return -1; } constexpr uint bias = typeinfo_bias::bias; constexpr uint step = typeinfo_step::step; return static_cast(quot) * step + bias; } static constexpr bool type_const() { return std::is_const_v>; } static constexpr bool type_volatile() { return std::is_volatile_v>; } }; // Dynamic object collection, one or more per any type; shall not be initialized before main() class typemap { // Pointer to the dynamic array typemap_head* m_map = nullptr; // Pointer to the virtual memory void* m_memory = nullptr; // Virtual memory size std::size_t m_total = 0; template typemap_head* get_head() const { return &m_map[stx::typeindex>()]; } public: typemap(const typemap&) = delete; typemap& operator=(const typemap&) = delete; // Construct without initialization (suitable for global typemap) explicit constexpr typemap(std::nullptr_t) noexcept { } // Construct with initialization typemap() { init(); } ~typemap() { delete[] m_map; if (m_memory) { utils::memory_release(m_memory, m_total); } } // Recreate, also required if constructed without initialization. void init() { if (!stx::typelist_v.count()) { return; } // Recreate and copy some type information if (m_map == nullptr) { m_map = new typemap_head[stx::typelist_v.count()](); } else { auto type = stx::typelist_v.begin(); auto _end = stx::typelist_v.end(); for (uint i = 0; type != _end; i++, ++type) { // Delete objects (there shall be no threads accessing them) const uint lim = m_map[i].m_count != 1 ? +m_map[i].m_limit : 1; for (std::size_t j = 0; j < lim; j++) { const auto block = reinterpret_cast(m_map[i].m_ptr + j * m_map[i].m_ssize); if (block->m_type) { m_map[i].clean(block); } } // Reset mutable fields m_map[i].m_sema.raw() = 0; m_map[i].m_limit.raw() = 0; m_map[i].m_create_count.raw() = 0; m_map[i].m_destroy_count.raw() = 0; } } // Initialize virtual memory if necessary if (m_memory == nullptr) { // Determine total size, copy typeinfo auto type = stx::typelist_v.begin(); auto _end = stx::typelist_v.end(); for (uint i = 0; type != _end; i++, ++type) { const uint align = type->align; const uint ssize = ::align(sizeof(typemap_block), align) + ::align(type->size, align); const auto total = std::size_t{ssize} * type->count; const auto start = std::uintptr_t{::align(m_total, align)}; if (total) { // Move forward hoping there are no usable gaps wasted m_total = start + total; // Store storage size and object count m_map[i].m_ssize = ssize; m_map[i].m_count = type->count; m_map[i].m_ptr = reinterpret_cast(start); } // Copy destructor for indexed access m_map[i].clean = type->clean; } // Allocate virtual memory m_memory = utils::memory_reserve(m_total); utils::memory_commit(m_memory, m_total); // Update pointers for (uint i = 0, n = stx::typelist_v.count(); i < n; i++) { if (m_map[i].m_count) { m_map[i].m_ptr = static_cast(m_memory) + reinterpret_cast(m_map[i].m_ptr); } } } else { // Reinitialize virtual memory at the same location utils::memory_reset(m_memory, m_total); } } // Return allocated virtual memory block size (not aligned) std::size_t get_memory_size() const { return m_total; } private: // Prepare pointers template typeptr_base init_ptr(Arg&& id) const { if constexpr (typeinfo_count::max_count == 0) { return {}; } using id_tag = std::decay_t; typemap_head* head = get_head(); typemap_block* block; if constexpr (std::is_same_v || std::is_same_v || std::is_same_v) { if constexpr (constexpr uint last = typeinfo_count::max_count - 1) { // If max_count > 1 only id_new is supported static_assert(std::is_same_v); static_assert(!std::is_const_v>); static_assert(!std::is_volatile_v>); // Try to acquire the semaphore if (!head->m_sema.try_inc(last + 1)) [[unlikely]] { block = nullptr; } else { // Find empty location and lock it, starting from hint index for (uint lim = head->m_limit, i = (lim > last ? 0 : lim);; i = (i == last ? 0 : i + 1)) { block = reinterpret_cast(head->m_ptr + std::size_t{i} * head->m_ssize); if (block->m_type == 0 && block->m_mutex.try_lock()) { if (block->m_type == 0) [[likely]] { break; } block->m_mutex.unlock(); } } } } else { // Always access first element block = reinterpret_cast(head->m_ptr); if constexpr (std::is_same_v) { static_assert(!std::is_const_v>); static_assert(!std::is_volatile_v>); if (block->m_type != 0 || !block->m_mutex.try_lock()) { block = nullptr; } else if (block->m_type != 0) [[unlikely]] { block->m_mutex.unlock(); block = nullptr; } } } } else if constexpr (std::is_invocable_r_v) { // Access with a lookup function for (std::size_t j = 0; j < (typeinfo_count::max_count != 1 ? +head->m_limit : 1); j++) { block = reinterpret_cast(head->m_ptr + j * head->m_ssize); if (block->m_type) { std::lock_guard lock(block->m_mutex); if (block->m_type) { if (std::invoke(std::forward(id), std::as_const(*block->get_ptr()))) { break; } } } block = nullptr; } } else { // Access by transformed id constexpr uint bias = typeinfo_bias::bias; constexpr uint step = typeinfo_step::step; const uint unbiased = static_cast(std::forward(id)) - bias; const uint unscaled = unbiased / step; block = reinterpret_cast(head->m_ptr + std::size_t{head->m_ssize} * unscaled); // Check id range and type if (unscaled >= typeinfo_count::max_count || unbiased % step) [[unlikely]] { block = nullptr; } else { if (block->m_type == 0) [[unlikely]] { block = nullptr; } } } typeptr_base result; result.m_head = head; result.m_block = block; return result; } template void check_ptr(typemap_block*& block, Arg&& id) const { using id_tag = std::decay_t; if constexpr (std::is_same_v) { // No action for id_new return; } else if constexpr (std::is_same_v) { // No action for id_any return; } else if constexpr (std::is_same_v) { if (block->m_type == 0 && block->m_type.compare_and_swap_test(0, 1)) { // Initialize object if necessary static_assert(!std::is_const_v>); static_assert(!std::is_volatile_v>); new (block->get_ptr) Type(); } return; } else if constexpr (std::is_invocable_r_v) { if (!block) [[unlikely]] { return; } if (block->m_type) [[likely]] { if (std::invoke(std::forward(id), std::as_const(*block->get_ptr()))) { return; } } } else if (block) { if (block->m_type) [[likely]] { return; } } else { return; } // Fallback: unlock and invalidate block->m_mutex.unlock(); block = nullptr; } template bool lock_ptr(typemap_block* block) const { // Use reader lock for const access constexpr bool is_const = std::is_const_v>; constexpr bool is_volatile = std::is_volatile_v>; // Already locked or lock is unnecessary if constexpr (!Lock) { return true; } else { // Skip failed ids if (!block) { return true; } if constexpr (Try) { if constexpr (is_const || is_volatile) { return block->m_mutex.try_lock_shared(); } else { return block->m_mutex.try_lock(); } } else if constexpr (is_const || is_volatile) { if (block->m_mutex.is_lockable()) [[likely]] { return true; } block->m_mutex.lock_shared(); return false; } else { if (block->m_mutex.is_free()) [[likely]] { return true; } block->m_mutex.lock(); return false; } } } template bool try_lock(const std::array& array, uint locked, std::integer_sequence) const { // Try to lock mutex if not locked from the previous step if (I == locked || lock_ptr(array[I].m_block)) { if constexpr (I + 1 < N) { // Proceed recursively if (try_lock(array, locked, std::integer_sequence{})) [[likely]] { return true; } // Retire: unlock everything, including (I == locked) case if constexpr (Lock) { if (array[I].m_block) { if constexpr (std::is_const_v> || std::is_volatile_v>) { array[I].m_block->m_mutex.unlock_shared(); } else { array[I].m_block->m_mutex.unlock(); } } } } else { return true; } } return false; } template uint lock_array(const std::array& array, std::integer_sequence, std::integer_sequence) const { // Verify all mutexes are free or wait for one of them and return its index uint locked = 0; ((lock_ptr(array[I].m_block) && ++locked) && ...); return locked; } template void check_array(std::array& array, std::integer_sequence, Args&&... ids) const { // Check types and unlock on mismatch (check_ptr(array[I].m_block, std::forward(ids)), ...); } template std::tuple...> array_to_tuple(const std::array& array, std::integer_sequence) const { return {array[I]...}; } template static constexpr bool does_need_lock() { if constexpr (std::is_same_v, id_new_t>) { return false; } if constexpr (std::is_const_v> && std::is_volatile_v>) { return false; } return true; } // Transform T&& into refptr, moving const qualifier from T to refptr template > using decode_t = std::conditional_t, T, std::conditional_t, const refptr>, refptr>>; public: // Lock any objects by their identifiers, special tags id_new/id_any/id_always, or search predicates template > auto lock(Args&&... ids) const { static_assert(((!std::is_lvalue_reference_v) && ...)); static_assert(((!std::is_array_v) && ...)); static_assert(((!std::is_void_v) && ...)); // Initialize pointers std::array result{this->init_ptr>(std::forward(ids))...}; // Whether requires locking after init_ptr using locks_t = std::integer_sequence, Args>()...>; // Array index helper using seq_t = std::index_sequence_for...>; // Lock any number of objects in safe manner while (true) { const uint locked = lock_array...>(result, seq_t{}, locks_t{}); if (try_lock<0, decode_t...>(result, locked, locks_t{})) [[likely]] break; } // Verify object types check_array...>(result, seq_t{}, std::forward(ids)...); // Return tuple of possibly locked pointers, or a single pointer if constexpr (sizeof...(Types) != 1) { return array_to_tuple...>(result, seq_t{}); } else { return typeptr...>(result[0]); } } // Apply a function to all objects of one or more types template ullong apply(F&& func) { static_assert(!std::is_lvalue_reference_v); static_assert(!std::is_array_v); static_assert(!std::is_void_v); typemap_head* head = get_head>(); const ullong ix = head->m_create_count; for (std::size_t j = 0; j < (typeinfo_count>::max_count != 1 ? +head->m_limit : 1); j++) { const auto block = reinterpret_cast(head->m_ptr + j * head->m_ssize); if (block->m_type) { std::lock_guard lock(block->m_mutex); if (block->m_type) { std::invoke(std::forward(func), *block->get_ptr>()); } } } // Return "unsigned negative" value if the creation index has increased const ullong result = ix - head->m_create_count; if constexpr (sizeof...(Types) > 0) { return (result + ... + apply(func)); } else { return result; } } template ullong get_create_count() const { return get_head()->m_create_count; } template ullong get_destroy_count() const { return get_head()->m_destroy_count; } }; } // namespace utils