mirror of https://github.com/snes9xgit/snes9x.git
Update Vulkan Memory Allocator and .hpp.
This commit is contained in:
parent
9be1ee83b3
commit
0e03a36847
|
@ -126,9 +126,7 @@ See documentation chapter: \ref statistics.
|
|||
extern "C" {
|
||||
#endif
|
||||
|
||||
#ifndef VULKAN_H_
|
||||
#include <vulkan/vulkan.h>
|
||||
#endif
|
||||
#include <vulkan/vulkan.h>
|
||||
|
||||
#if !defined(VMA_VULKAN_VERSION)
|
||||
#if defined(VK_VERSION_1_3)
|
||||
|
@ -234,6 +232,12 @@ extern "C" {
|
|||
#define VMA_CALL_POST
|
||||
#endif
|
||||
|
||||
// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
|
||||
// structure that will be extended via the pNext chain.
|
||||
#ifndef VMA_EXTENDS_VK_STRUCT
|
||||
#define VMA_EXTENDS_VK_STRUCT(vkStruct)
|
||||
#endif
|
||||
|
||||
// Define this macro to decorate pointers with an attribute specifying the
|
||||
// length of the array they point to if they are not null.
|
||||
//
|
||||
|
@ -1319,7 +1323,7 @@ typedef struct VmaPoolCreateInfo
|
|||
Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
|
||||
can be attached automatically by this library when using other, more convenient of its features.
|
||||
*/
|
||||
void* VMA_NULLABLE pMemoryAllocateNext;
|
||||
void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
|
||||
} VmaPoolCreateInfo;
|
||||
|
||||
/** @} */
|
||||
|
@ -1389,6 +1393,12 @@ typedef struct VmaAllocationInfo
|
|||
const char* VMA_NULLABLE pName;
|
||||
} VmaAllocationInfo;
|
||||
|
||||
/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
|
||||
|
||||
Should return true if the defragmentation needs to stop current pass.
|
||||
*/
|
||||
typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
|
||||
|
||||
/** \brief Parameters for defragmentation.
|
||||
|
||||
To be used with function vmaBeginDefragmentation().
|
||||
|
@ -1412,6 +1422,13 @@ typedef struct VmaDefragmentationInfo
|
|||
`0` means no limit.
|
||||
*/
|
||||
uint32_t maxAllocationsPerPass;
|
||||
/** \brief Optional custom callback for stopping vmaBeginDefragmentation().
|
||||
|
||||
Have to return true for breaking current defragmentation pass.
|
||||
*/
|
||||
PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
|
||||
/// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
|
||||
void* VMA_NULLABLE pBreakCallbackUserData;
|
||||
} VmaDefragmentationInfo;
|
||||
|
||||
/// Single move of an allocation to be done for defragmentation.
|
||||
|
@ -2227,7 +2244,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
|
|||
VmaAllocation VMA_NOT_NULL allocation,
|
||||
VkDeviceSize allocationLocalOffset,
|
||||
VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
|
||||
const void* VMA_NULLABLE pNext);
|
||||
const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
|
||||
|
||||
/** \brief Binds image to allocation.
|
||||
|
||||
|
@ -2264,7 +2281,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
|
|||
VmaAllocation VMA_NOT_NULL allocation,
|
||||
VkDeviceSize allocationLocalOffset,
|
||||
VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
|
||||
const void* VMA_NULLABLE pNext);
|
||||
const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
|
||||
|
||||
/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
|
||||
|
||||
|
@ -2607,10 +2624,18 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
|
|||
#include <utility>
|
||||
#include <type_traits>
|
||||
|
||||
#if !defined(VMA_CPP20)
|
||||
#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
|
||||
#define VMA_CPP20 1
|
||||
#else
|
||||
#define VMA_CPP20 0
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
|
||||
#endif
|
||||
#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
|
||||
#if VMA_CPP20
|
||||
#include <bit> // For std::popcount
|
||||
#endif
|
||||
|
||||
|
@ -2806,7 +2831,7 @@ static void vma_aligned_free(void* VMA_NULLABLE ptr)
|
|||
#endif
|
||||
|
||||
#ifndef VMA_ALIGN_OF
|
||||
#define VMA_ALIGN_OF(type) (__alignof(type))
|
||||
#define VMA_ALIGN_OF(type) (alignof(type))
|
||||
#endif
|
||||
|
||||
#ifndef VMA_SYSTEM_ALIGNED_MALLOC
|
||||
|
@ -3246,7 +3271,7 @@ But you need to check in runtime whether user's CPU supports these, as some old
|
|||
*/
|
||||
static inline uint32_t VmaCountBitsSet(uint32_t v)
|
||||
{
|
||||
#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
|
||||
#if VMA_CPP20
|
||||
return std::popcount(v);
|
||||
#else
|
||||
uint32_t c = v - ((v >> 1) & 0x55555555);
|
||||
|
@ -3677,7 +3702,7 @@ static bool FindMemoryPreferences(
|
|||
return false;
|
||||
}
|
||||
// This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
|
||||
const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
|
||||
const bool deviceAccess = (bufImgUsage & ~static_cast<VkFlags>(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
|
||||
const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
|
||||
const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
|
||||
const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
|
||||
|
@ -3687,18 +3712,21 @@ static bool FindMemoryPreferences(
|
|||
// CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
|
||||
if(hostAccessRandom)
|
||||
{
|
||||
if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
|
||||
// Prefer cached. Cannot require it, because some platforms don't have it (e.g. Raspberry Pi - see #362)!
|
||||
outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||
|
||||
if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
|
||||
{
|
||||
// Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
|
||||
// Omitting HOST_VISIBLE here is intentional.
|
||||
// In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
|
||||
// Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
|
||||
outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||
outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Always CPU memory, cached.
|
||||
outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
|
||||
// Always CPU memory.
|
||||
outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
|
||||
}
|
||||
}
|
||||
// CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
|
||||
|
@ -3737,19 +3765,18 @@ static bool FindMemoryPreferences(
|
|||
// No CPU access
|
||||
else
|
||||
{
|
||||
// GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory
|
||||
if(deviceAccess)
|
||||
{
|
||||
// ...unless there is a clear preference from the user not to do so.
|
||||
if(preferHost)
|
||||
outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
else
|
||||
outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
}
|
||||
// if(deviceAccess)
|
||||
//
|
||||
// GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
|
||||
// unless there is a clear preference from the user not to do so.
|
||||
//
|
||||
// else:
|
||||
//
|
||||
// No direct GPU access, no CPU access, just transfers.
|
||||
// It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
|
||||
// a "swap file" copy to free some GPU memory (then better CPU memory).
|
||||
// Up to the user to decide. If no preferece, assume the former and choose GPU memory.
|
||||
|
||||
if(preferHost)
|
||||
outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
|
||||
else
|
||||
|
@ -10295,8 +10322,8 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
|
|||
else
|
||||
sizeForNextList += smallSizeStep;
|
||||
|
||||
uint32_t nextListIndex = 0;
|
||||
uint32_t prevListIndex = 0;
|
||||
uint32_t nextListIndex = m_ListsCount;
|
||||
uint32_t prevListIndex = m_ListsCount;
|
||||
Block* nextListBlock = VMA_NULL;
|
||||
Block* prevListBlock = VMA_NULL;
|
||||
|
||||
|
@ -11038,6 +11065,8 @@ private:
|
|||
|
||||
const VkDeviceSize m_MaxPassBytes;
|
||||
const uint32_t m_MaxPassAllocations;
|
||||
const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
|
||||
void* m_BreakCallbackUserData;
|
||||
|
||||
VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
|
||||
VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
|
||||
|
@ -12968,6 +12997,8 @@ VmaDefragmentationContext_T::VmaDefragmentationContext_T(
|
|||
const VmaDefragmentationInfo& info)
|
||||
: m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
|
||||
m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
|
||||
m_BreakCallback(info.pfnBreakCallback),
|
||||
m_BreakCallbackUserData(info.pBreakCallbackUserData),
|
||||
m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
|
||||
m_Moves(m_MoveAllocator)
|
||||
{
|
||||
|
@ -13363,6 +13394,10 @@ VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::Get
|
|||
|
||||
VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
|
||||
{
|
||||
// Check custom criteria if exists
|
||||
if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
|
||||
return CounterStatus::End;
|
||||
|
||||
// Ignore allocation if will exceed max size for copy
|
||||
if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
|
||||
{
|
||||
|
@ -13371,6 +13406,8 @@ VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCou
|
|||
else
|
||||
return CounterStatus::End;
|
||||
}
|
||||
else
|
||||
m_IgnoredAllocs = 0;
|
||||
return CounterStatus::Pass;
|
||||
}
|
||||
|
||||
|
@ -13730,7 +13767,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto
|
|||
{
|
||||
// Full clear performed already
|
||||
if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
|
||||
reinterpret_cast<size_t*>(m_AlgorithmState)[index] = last;
|
||||
vectorState.firstFreeBlock = last;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define VULKAN_MEMORY_ALLOCATOR_HPP
|
||||
|
||||
#if !defined(AMD_VULKAN_MEMORY_ALLOCATOR_H)
|
||||
#include "vk_mem_alloc.h"
|
||||
#include <vk_mem_alloc.h>
|
||||
#endif
|
||||
|
||||
#include <vulkan/vulkan.hpp>
|
||||
|
@ -59,10 +59,12 @@ namespace VMA_HPP_NAMESPACE {
|
|||
};
|
||||
}
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<Buffer, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<Buffer, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<Buffer, VMA_HPP_NAMESPACE::Allocator>;
|
||||
};
|
||||
template<> struct UniqueHandleTraits<Image, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<Image, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<Image, VMA_HPP_NAMESPACE::Allocator>;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -13,6 +13,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eExtMemoryPriority = VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(AllocatorCreateFlagBits value) {
|
||||
if (value == AllocatorCreateFlagBits::eExternallySynchronized) return "ExternallySynchronized";
|
||||
if (value == AllocatorCreateFlagBits::eKhrDedicatedAllocation) return "KhrDedicatedAllocation";
|
||||
|
@ -23,6 +24,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value == AllocatorCreateFlagBits::eExtMemoryPriority) return "ExtMemoryPriority";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
|
@ -59,6 +61,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
return ~(AllocatorCreateFlags(bits));
|
||||
}
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(AllocatorCreateFlags value) {
|
||||
if (!value) return "{}";
|
||||
std::string result;
|
||||
|
@ -71,6 +74,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value & AllocatorCreateFlagBits::eExtMemoryPriority) result += "ExtMemoryPriority | ";
|
||||
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -88,6 +92,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eAutoPreferHost = VMA_MEMORY_USAGE_AUTO_PREFER_HOST
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(MemoryUsage value) {
|
||||
if (value == MemoryUsage::eUnknown) return "Unknown";
|
||||
if (value == MemoryUsage::eGpuOnly) return "GpuOnly";
|
||||
|
@ -101,6 +106,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value == MemoryUsage::eAutoPreferHost) return "AutoPreferHost";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -124,6 +130,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eStrategyFirstFit = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(AllocationCreateFlagBits value) {
|
||||
if (value == AllocationCreateFlagBits::eDedicatedMemory) return "DedicatedMemory";
|
||||
if (value == AllocationCreateFlagBits::eNeverAllocate) return "NeverAllocate";
|
||||
|
@ -143,6 +150,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value == AllocationCreateFlagBits::eStrategyFirstFit) return "StrategyFirstFit";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
|
@ -188,6 +196,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
return ~(AllocationCreateFlags(bits));
|
||||
}
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(AllocationCreateFlags value) {
|
||||
if (!value) return "{}";
|
||||
std::string result;
|
||||
|
@ -209,6 +218,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value & AllocationCreateFlagBits::eStrategyFirstFit) result += "StrategyFirstFit | ";
|
||||
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -218,11 +228,13 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eLinearAlgorithm = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(PoolCreateFlagBits value) {
|
||||
if (value == PoolCreateFlagBits::eIgnoreBufferImageGranularity) return "IgnoreBufferImageGranularity";
|
||||
if (value == PoolCreateFlagBits::eLinearAlgorithm) return "LinearAlgorithm";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
|
@ -254,6 +266,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
return ~(PoolCreateFlags(bits));
|
||||
}
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(PoolCreateFlags value) {
|
||||
if (!value) return "{}";
|
||||
std::string result;
|
||||
|
@ -261,6 +274,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value & PoolCreateFlagBits::eLinearAlgorithm) result += "LinearAlgorithm | ";
|
||||
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -272,6 +286,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eFlagAlgorithmExtensive = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(DefragmentationFlagBits value) {
|
||||
if (value == DefragmentationFlagBits::eFlagAlgorithmFast) return "FlagAlgorithmFast";
|
||||
if (value == DefragmentationFlagBits::eFlagAlgorithmBalanced) return "FlagAlgorithmBalanced";
|
||||
|
@ -279,6 +294,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value == DefragmentationFlagBits::eFlagAlgorithmExtensive) return "FlagAlgorithmExtensive";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
|
@ -312,6 +328,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
return ~(DefragmentationFlags(bits));
|
||||
}
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(DefragmentationFlags value) {
|
||||
if (!value) return "{}";
|
||||
std::string result;
|
||||
|
@ -321,6 +338,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value & DefragmentationFlagBits::eFlagAlgorithmExtensive) result += "FlagAlgorithmExtensive | ";
|
||||
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -331,12 +349,14 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eDestroy = VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(DefragmentationMoveOperation value) {
|
||||
if (value == DefragmentationMoveOperation::eCopy) return "Copy";
|
||||
if (value == DefragmentationMoveOperation::eIgnore) return "Ignore";
|
||||
if (value == DefragmentationMoveOperation::eDestroy) return "Destroy";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -345,10 +365,12 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eLinearAlgorithm = VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(VirtualBlockCreateFlagBits value) {
|
||||
if (value == VirtualBlockCreateFlagBits::eLinearAlgorithm) return "LinearAlgorithm";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
|
@ -379,12 +401,14 @@ namespace VMA_HPP_NAMESPACE {
|
|||
return ~(VirtualBlockCreateFlags(bits));
|
||||
}
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(VirtualBlockCreateFlags value) {
|
||||
if (!value) return "{}";
|
||||
std::string result;
|
||||
if (value & VirtualBlockCreateFlagBits::eLinearAlgorithm) result += "LinearAlgorithm | ";
|
||||
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
|
@ -396,6 +420,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
eStrategyMinOffset = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT
|
||||
};
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(VirtualAllocationCreateFlagBits value) {
|
||||
if (value == VirtualAllocationCreateFlagBits::eUpperAddress) return "UpperAddress";
|
||||
if (value == VirtualAllocationCreateFlagBits::eStrategyMinMemory) return "StrategyMinMemory";
|
||||
|
@ -403,6 +428,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value == VirtualAllocationCreateFlagBits::eStrategyMinOffset) return "StrategyMinOffset";
|
||||
return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast<uint32_t>(value)) + " )";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
|
@ -436,6 +462,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
return ~(VirtualAllocationCreateFlags(bits));
|
||||
}
|
||||
|
||||
# if !defined( VULKAN_HPP_NO_TO_STRING )
|
||||
VULKAN_HPP_INLINE std::string to_string(VirtualAllocationCreateFlags value) {
|
||||
if (!value) return "{}";
|
||||
std::string result;
|
||||
|
@ -445,6 +472,7 @@ namespace VMA_HPP_NAMESPACE {
|
|||
if (value & VirtualAllocationCreateFlagBits::eStrategyMinOffset) result += "StrategyMinOffset | ";
|
||||
return "{ " + result.substr( 0, result.size() - 3 ) + " }";
|
||||
}
|
||||
# endif
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -387,12 +387,12 @@ namespace VMA_HPP_NAMESPACE {
|
|||
}
|
||||
|
||||
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
|
||||
VULKAN_HPP_INLINE void Allocator::freeMemory(const Allocation allocation) const {
|
||||
vmaFreeMemory(m_allocator, static_cast<const VmaAllocation>(allocation));
|
||||
VULKAN_HPP_INLINE void Allocator::freeMemory(Allocation allocation) const {
|
||||
vmaFreeMemory(m_allocator, static_cast<VmaAllocation>(allocation));
|
||||
}
|
||||
#else
|
||||
VULKAN_HPP_INLINE void Allocator::freeMemory(const Allocation allocation) const {
|
||||
vmaFreeMemory(m_allocator, static_cast<const VmaAllocation>(allocation));
|
||||
VULKAN_HPP_INLINE void Allocator::freeMemory(Allocation allocation) const {
|
||||
vmaFreeMemory(m_allocator, static_cast<VmaAllocation>(allocation));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -22,11 +22,11 @@ namespace VMA_HPP_NAMESPACE {
|
|||
struct VirtualAllocationCreateInfo;
|
||||
struct VirtualAllocationInfo;
|
||||
|
||||
class Allocator;
|
||||
class Pool;
|
||||
class Allocation;
|
||||
class DefragmentationContext;
|
||||
class VirtualAllocation;
|
||||
class Allocator;
|
||||
class VirtualBlock;
|
||||
}
|
||||
|
||||
|
@ -81,7 +81,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
}
|
||||
#ifndef VULKAN_HPP_NO_SMART_HANDLE
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<VMA_HPP_NAMESPACE::Pool, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::Pool, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::Pool, VMA_HPP_NAMESPACE::Allocator>;
|
||||
};
|
||||
}
|
||||
|
@ -138,7 +139,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
}
|
||||
#ifndef VULKAN_HPP_NO_SMART_HANDLE
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<VMA_HPP_NAMESPACE::Allocation, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::Allocation, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::Allocation, VMA_HPP_NAMESPACE::Allocator>;
|
||||
};
|
||||
}
|
||||
|
@ -193,14 +195,6 @@ namespace VMA_HPP_NAMESPACE {
|
|||
VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationContext) == sizeof(VmaDefragmentationContext),
|
||||
"handle and wrapper have different size!");
|
||||
}
|
||||
#ifndef VULKAN_HPP_NO_SMART_HANDLE
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<VMA_HPP_NAMESPACE::DefragmentationContext, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::DefragmentationContext, void>;
|
||||
};
|
||||
}
|
||||
namespace VMA_HPP_NAMESPACE { using UniqueDefragmentationContext = VULKAN_HPP_NAMESPACE::UniqueHandle<DefragmentationContext, Dispatcher>; }
|
||||
#endif
|
||||
|
||||
namespace VMA_HPP_NAMESPACE {
|
||||
class Allocator {
|
||||
|
@ -444,9 +438,9 @@ namespace VMA_HPP_NAMESPACE {
|
|||
AllocationInfo* allocationInfo) const;
|
||||
|
||||
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
|
||||
void freeMemory(const Allocation allocation) const;
|
||||
void freeMemory(Allocation allocation) const;
|
||||
#else
|
||||
void freeMemory(const Allocation allocation) const;
|
||||
void freeMemory(Allocation allocation) const;
|
||||
#endif
|
||||
|
||||
#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE
|
||||
|
@ -731,7 +725,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
}
|
||||
#ifndef VULKAN_HPP_NO_SMART_HANDLE
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<VMA_HPP_NAMESPACE::Allocator, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::Allocator, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::Allocator, void>;
|
||||
};
|
||||
}
|
||||
|
@ -788,7 +783,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
}
|
||||
#ifndef VULKAN_HPP_NO_SMART_HANDLE
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<VMA_HPP_NAMESPACE::VirtualAllocation, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::VirtualAllocation, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::VirtualAllocation, VMA_HPP_NAMESPACE::VirtualBlock>;
|
||||
};
|
||||
}
|
||||
|
@ -919,7 +915,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
}
|
||||
#ifndef VULKAN_HPP_NO_SMART_HANDLE
|
||||
namespace VULKAN_HPP_NAMESPACE {
|
||||
template<> struct UniqueHandleTraits<VMA_HPP_NAMESPACE::VirtualBlock, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
template<> class UniqueHandleTraits<VMA_HPP_NAMESPACE::VirtualBlock, VMA_HPP_NAMESPACE::Dispatcher> {
|
||||
public:
|
||||
using deleter = VMA_HPP_NAMESPACE::Deleter<VMA_HPP_NAMESPACE::VirtualBlock, void>;
|
||||
};
|
||||
}
|
||||
|
|
|
@ -1306,11 +1306,15 @@ namespace VMA_HPP_NAMESPACE {
|
|||
, Pool pool_ = {}
|
||||
, VULKAN_HPP_NAMESPACE::DeviceSize maxBytesPerPass_ = {}
|
||||
, uint32_t maxAllocationsPerPass_ = {}
|
||||
, PFN_vmaCheckDefragmentationBreakFunction pfnBreakCallback_ = {}
|
||||
, void* pBreakCallbackUserData_ = {}
|
||||
) VULKAN_HPP_NOEXCEPT
|
||||
: flags(flags_)
|
||||
, pool(pool_)
|
||||
, maxBytesPerPass(maxBytesPerPass_)
|
||||
, maxAllocationsPerPass(maxAllocationsPerPass_)
|
||||
, pfnBreakCallback(pfnBreakCallback_)
|
||||
, pBreakCallbackUserData(pBreakCallbackUserData_)
|
||||
{}
|
||||
|
||||
VULKAN_HPP_CONSTEXPR DefragmentationInfo(DefragmentationInfo const &) VULKAN_HPP_NOEXCEPT = default;
|
||||
|
@ -1339,6 +1343,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
&& pool == rhs.pool
|
||||
&& maxBytesPerPass == rhs.maxBytesPerPass
|
||||
&& maxAllocationsPerPass == rhs.maxAllocationsPerPass
|
||||
&& pfnBreakCallback == rhs.pfnBreakCallback
|
||||
&& pBreakCallbackUserData == rhs.pBreakCallbackUserData
|
||||
;
|
||||
}
|
||||
#endif
|
||||
|
@ -1364,6 +1370,16 @@ namespace VMA_HPP_NAMESPACE {
|
|||
maxAllocationsPerPass = maxAllocationsPerPass_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VULKAN_HPP_CONSTEXPR_14 DefragmentationInfo& setPfnBreakCallback(PFN_vmaCheckDefragmentationBreakFunction pfnBreakCallback_) VULKAN_HPP_NOEXCEPT {
|
||||
pfnBreakCallback = pfnBreakCallback_;
|
||||
return *this;
|
||||
}
|
||||
|
||||
VULKAN_HPP_CONSTEXPR_14 DefragmentationInfo& setPBreakCallbackUserData(void* pBreakCallbackUserData_) VULKAN_HPP_NOEXCEPT {
|
||||
pBreakCallbackUserData = pBreakCallbackUserData_;
|
||||
return *this;
|
||||
}
|
||||
#endif
|
||||
|
||||
public:
|
||||
|
@ -1371,6 +1387,8 @@ namespace VMA_HPP_NAMESPACE {
|
|||
Pool pool = {};
|
||||
VULKAN_HPP_NAMESPACE::DeviceSize maxBytesPerPass = {};
|
||||
uint32_t maxAllocationsPerPass = {};
|
||||
PFN_vmaCheckDefragmentationBreakFunction pfnBreakCallback = {};
|
||||
void* pBreakCallbackUserData = {};
|
||||
};
|
||||
VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationInfo) == sizeof(VmaDefragmentationInfo),
|
||||
"struct and wrapper have different size!");
|
||||
|
|
Loading…
Reference in New Issue