diff --git a/.cirrus.yml b/.cirrus.yml index a7eb92bc..bf37b67c 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -13,7 +13,7 @@ snes9x_linux-gtk-amd64_task: - apt-get update && DEBIAN_FRONTEND=noninteractive apt-get -y install meson gettext libsdl2-dev libgtkmm-3.0-dev libgtk-3-dev libminizip-dev portaudio19-dev glslang-dev cmake compile_script: - - meson build gtk --buildtype=release --strip + - cmake -G Ninja -B build -S gtk -DCMAKE_BUILD_TYPE=Release - ninja -j2 -C build package_script: @@ -195,12 +195,3 @@ libretro_nintendo-ngc_task: build_artifacts: path: "libretro/snes9x_libretro_ngc.a" - -libretro_playstation-psp_task: - container: - image: bkcsoft/psptoolchain - compile_script: - - make -j2 -C libretro platform=unix - build_artifacts: - path: "libretro/snes9x_libretro.so" - diff --git a/.gitmodules b/.gitmodules index b79d1aec..def1eca2 100644 --- a/.gitmodules +++ b/.gitmodules @@ -4,15 +4,9 @@ [submodule "win32/zlib/src"] path = win32/zlib/src url = https://github.com/madler/zlib.git -[submodule "win32/DirectXMath"] - path = win32/DirectXMath - url = https://github.com/Microsoft/DirectXMath [submodule "shaders/SPIRV-Cross"] path = external/SPIRV-Cross url = https://github.com/KhronosGroup/SPIRV-Cross.git [submodule "shaders/glslang"] path = external/glslang url = https://github.com/KhronosGroup/glslang.git -[submodule "vulkan/vulkan"] - path = external/vulkan-headers - url = https://github.com/KhronosGroup/Vulkan-Headers.git diff --git a/appveyor.yml b/appveyor.yml index 59c2f172..b7583700 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,36 +1,36 @@ version: 1.61-{build} -image: Visual Studio 2017 +image: Visual Studio 2022 environment: matrix: - - generator: "Visual Studio 15" + - generator: "Visual Studio 2022" config: Release Unicode platform: Win32 arch: win32 output: win32\snes9x.exe - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022 - - generator: "Visual Studio 15" + - generator: "Visual Studio 2022" config: Release Unicode platform: x64 arch: win32-x64 output: win32\snes9x-x64.exe - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022 - - generator: "Visual Studio 15" + - generator: "Visual Studio 2022" config: libretro Release platform: Win32 arch: libretro output: libretro\Win32\libretro Release\snes9x_libretro.dll - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022 - - generator: "Visual Studio 15" + - generator: "Visual Studio 2022" config: libretro Release platform: x64 arch: libretro-x64 output: libretro\x64\libretro Release\snes9x_libretro.dll - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 + APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2022 init: - git config --global core.autocrlf input diff --git a/apu/apu.cpp b/apu/apu.cpp index 86af9c65..f9f2fd0a 100644 --- a/apu/apu.cpp +++ b/apu/apu.cpp @@ -5,6 +5,7 @@ \*****************************************************************************/ #include +#include #include "../snes9x.h" #include "apu.h" #include "../msu1.h" @@ -36,10 +37,10 @@ namespace spc { static apu_callback callback = NULL; static void *callback_data = NULL; -static bool8 sound_in_sync = TRUE; -static bool8 sound_enabled = FALSE; +static bool8 sound_in_sync = true; +static bool8 sound_enabled = false; -static Resampler *resampler = NULL; +static Resampler resampler; static int32 reference_time; static uint32 remainder; @@ -56,9 +57,8 @@ static double dynamic_rate_multiplier = 1.0; namespace msu { // Always 16-bit, Stereo; 1.5x dsp buffer to never overflow -static Resampler *resampler = NULL; -static int16 *resample_buffer = NULL; -static int resample_buffer_size = 0; +static Resampler resampler; +static std::vector resampler_buffer; } // namespace msu static void UpdatePlaybackRate(void); @@ -74,44 +74,32 @@ bool8 S9xMixSamples(uint8 *dest, int sample_count) { memset(out, 0, sample_count << 1); S9xClearSamples(); + spc::sound_in_sync = true; + return true; } - else + + if (spc::resampler.avail() < sample_count) { - if (spc::resampler->avail() >= sample_count) - { - spc::resampler->read((short *)out, sample_count); + memset(out, 0, sample_count << 1); + return false; + } - if (Settings.MSU1) - { - if (msu::resampler->avail() >= sample_count) - { - if (msu::resample_buffer_size < sample_count) - { - if (msu::resample_buffer) - delete[] msu::resample_buffer; - msu::resample_buffer = new int16[sample_count]; - msu::resample_buffer_size = sample_count; - } - msu::resampler->read(msu::resample_buffer, - sample_count); - for (int i = 0; i < sample_count; ++i) - { - int32 mixed = (int32)out[i] + msu::resample_buffer[i]; - out[i] = ((int16)mixed != mixed) ? (mixed >> 31) ^ 0x7fff : mixed; - } - } - else // should never occur - assert(0); - } - } - else + spc::resampler.read((short *)out, sample_count); + + if (Settings.MSU1) + { + if ((int)msu::resampler_buffer.size() < sample_count) + msu::resampler_buffer.resize(sample_count); + + msu::resampler.read(msu::resampler_buffer.data(), sample_count); + for (int i = 0; i < sample_count; ++i) { - memset(out, 0, sample_count << 1); - return false; + int32 mixed = (int32)out[i] + msu::resampler_buffer[i]; + out[i] = ((int16)mixed != mixed) ? (mixed >> 31) ^ 0x7fff : mixed; } } - if (spc::resampler->space_empty() >= 535 * 2 || !Settings.SoundSync || + if (spc::resampler.space_empty() >= 535 * 2 || !Settings.SoundSync || Settings.TurboMode || Settings.Mute) spc::sound_in_sync = true; else @@ -122,9 +110,9 @@ bool8 S9xMixSamples(uint8 *dest, int sample_count) int S9xGetSampleCount(void) { - int avail = spc::resampler->avail(); + int avail = spc::resampler.avail(); if (Settings.MSU1) // return minimum available samples, otherwise we can run into the assert above due to partial sample generation in msu1 - avail = Resampler::min(avail, msu::resampler->avail()); + avail = Resampler::min(avail, msu::resampler.avail()); return avail; } @@ -133,7 +121,7 @@ void S9xLandSamples(void) if (spc::callback != NULL) spc::callback(spc::callback_data); - if (spc::resampler->space_empty() >= 535 * 2 || !Settings.SoundSync || + if (spc::resampler.space_empty() >= 535 * 2 || !Settings.SoundSync || Settings.TurboMode || Settings.Mute) spc::sound_in_sync = true; else @@ -142,15 +130,15 @@ void S9xLandSamples(void) void S9xClearSamples(void) { - spc::resampler->clear(); + spc::resampler.clear(); if (Settings.MSU1) - msu::resampler->clear(); + msu::resampler.clear(); } bool8 S9xSyncSound(void) { if (!Settings.SoundSync || spc::sound_in_sync) - return (TRUE); + return true; S9xLandSamples(); @@ -183,12 +171,12 @@ static void UpdatePlaybackRate(void) time_ratio *= spc::dynamic_rate_multiplier; } - spc::resampler->time_ratio(time_ratio); + spc::resampler.time_ratio(time_ratio); if (Settings.MSU1) { - time_ratio = (44100.0 / Settings.SoundPlaybackRate) * (Settings.SoundInputRate / 32040.0); - msu::resampler->time_ratio(time_ratio); + time_ratio = time_ratio * 44100 / 32040; + msu::resampler.time_ratio(time_ratio); } } @@ -201,27 +189,11 @@ bool8 S9xInitSound(int buffer_ms) if (requested_buffer_size_samples > buffer_size_samples) buffer_size_samples = requested_buffer_size_samples; - if (!spc::resampler) - { - spc::resampler = new Resampler(buffer_size_samples); - if (!spc::resampler) - return (FALSE); - } - else - spc::resampler->resize(buffer_size_samples); + spc::resampler.resize(buffer_size_samples); + msu::resampler.resize(buffer_size_samples * 3 / 2); - - if (!msu::resampler) - { - msu::resampler = new Resampler(buffer_size_samples * 3 / 2); - if (!msu::resampler) - return (FALSE); - } - else - msu::resampler->resize(buffer_size_samples * 3 / 2); - - SNES::dsp.spc_dsp.set_output(spc::resampler); - S9xMSU1SetOutput(msu::resampler); + SNES::dsp.spc_dsp.set_output(&spc::resampler); + S9xMSU1SetOutput(&msu::resampler); UpdatePlaybackRate(); @@ -239,7 +211,7 @@ void S9xSetSoundMute(bool8 mute) { Settings.Mute = mute; if (!spc::sound_enabled) - Settings.Mute = TRUE; + Settings.Mute = true; } void S9xDumpSPCSnapshot(void) @@ -249,33 +221,22 @@ void S9xDumpSPCSnapshot(void) static void SPCSnapshotCallback(void) { - S9xSPCDump(S9xGetFilenameInc((".spc"), SPC_DIR)); + S9xSPCDump(S9xGetFilenameInc((".spc"), SPC_DIR).c_str()); printf("Dumped key-on triggered spc snapshot.\n"); } bool8 S9xInitAPU(void) { - spc::resampler = NULL; - msu::resampler = NULL; + spc::resampler.clear(); + msu::resampler.clear(); - return (TRUE); + return true; } void S9xDeinitAPU(void) { - if (spc::resampler) - { - delete spc::resampler; - spc::resampler = NULL; - } - - if (msu::resampler) - { - delete msu::resampler; - msu::resampler = NULL; - } - S9xMSU1DeInit(); + msu::resampler_buffer.clear(); } static inline int S9xAPUGetClock(int32 cpucycles) @@ -309,10 +270,10 @@ void S9xAPUSetReferenceTime(int32 cpucycles) void S9xAPUExecute(void) { - SNES::smp.clock -= S9xAPUGetClock(CPU.Cycles); - SNES::smp.enter(); - + int cycles = S9xAPUGetClock(CPU.Cycles); spc::remainder = S9xAPUGetClockRemainder(CPU.Cycles); + SNES::smp.clock -= cycles; + SNES::smp.enter(); S9xAPUSetReferenceTime(CPU.Cycles); } @@ -322,7 +283,7 @@ void S9xAPUEndScanline(void) S9xAPUExecute(); SNES::dsp.synchronize(); - if (spc::resampler->space_filled() >= APU_SAMPLE_BLOCK || !spc::sound_in_sync) + if (spc::resampler.space_filled() >= APU_SAMPLE_BLOCK) S9xLandSamples(); } @@ -518,9 +479,9 @@ bool8 S9xSPCDump(const char *filename) fs = fopen(filename, "wb"); if (!fs) - return (FALSE); + return false; - S9xSetSoundMute(TRUE); + S9xSetSoundMute(true); SNES::smp.save_spc(buf); @@ -533,7 +494,7 @@ bool8 S9xSPCDump(const char *filename) fclose(fs); - S9xSetSoundMute(FALSE); + S9xSetSoundMute(false); - return (TRUE); + return true; } diff --git a/bsx.cpp b/bsx.cpp index 2881dbed..7bc0df94 100644 --- a/bsx.cpp +++ b/bsx.cpp @@ -738,16 +738,14 @@ void S9xBSXSetStream1 (uint8 count) if (BSX.sat_stream1.is_open()) BSX.sat_stream1.close(); //If Stream already opened for one file: Close it. - char path[PATH_MAX + 1], name[PATH_MAX + 1]; + std::string path = S9xGetDirectory(SAT_DIR) + SLASH_STR; - strcpy(path, S9xGetDirectory(SAT_DIR)); - strcat(path, SLASH_STR); - - snprintf(name, PATH_MAX + 1, "BSX%04X-%d.bin", (BSX.PPU[0x2188 - BSXPPUBASE] | (BSX.PPU[0x2189 - BSXPPUBASE] * 256)), count); //BSXHHHH-DDD.bin - strcat(path, name); + char name[PATH_MAX]; + snprintf(name, PATH_MAX, "BSX%04X-%d.bin", (BSX.PPU[0x2188 - BSXPPUBASE] | (BSX.PPU[0x2189 - BSXPPUBASE] * 256)), count); //BSXHHHH-DDD.bin + path += name; BSX.sat_stream1.clear(); - BSX.sat_stream1.open(path, std::ios::in | std::ios::binary); + BSX.sat_stream1.open(path.c_str(), std::ios::in | std::ios::binary); if (BSX.sat_stream1.good()) { BSX.sat_stream1.seekg(0, BSX.sat_stream1.end); @@ -770,16 +768,15 @@ void S9xBSXSetStream2 (uint8 count) if (BSX.sat_stream2.is_open()) BSX.sat_stream2.close(); //If Stream already opened for one file: Close it. - char path[PATH_MAX + 1], name[PATH_MAX + 1]; - strcpy(path, S9xGetDirectory(SAT_DIR)); - strcat(path, SLASH_STR); + std::string path = S9xGetDirectory(SAT_DIR) + SLASH_STR; - snprintf(name, PATH_MAX + 1, "BSX%04X-%d.bin", (BSX.PPU[0x218E - BSXPPUBASE] | (BSX.PPU[0x218F - BSXPPUBASE] * 256)), count); //BSXHHHH-DDD.bin - strcat(path, name); + char name[PATH_MAX]; + snprintf(name, PATH_MAX, "BSX%04X-%d.bin", (BSX.PPU[0x218E - BSXPPUBASE] | (BSX.PPU[0x218F - BSXPPUBASE] * 256)), count); //BSXHHHH-DDD.bin + path += name; BSX.sat_stream2.clear(); - BSX.sat_stream2.open(path, std::ios::in | std::ios::binary); + BSX.sat_stream2.open(path.c_str(), std::ios::in | std::ios::binary); if (BSX.sat_stream2.good()) { BSX.sat_stream2.seekg(0, BSX.sat_stream2.end); @@ -1207,20 +1204,15 @@ uint8 * S9xGetBasePointerBSX (uint32 address) static bool8 BSX_LoadBIOS (void) { FILE *fp; - char path[PATH_MAX + 1], name[PATH_MAX + 1]; bool8 r = FALSE; - strcpy(path, S9xGetDirectory(BIOS_DIR)); - strcat(path, SLASH_STR); - strcpy(name, path); - strcat(name, "BS-X.bin"); + std::string name = S9xGetDirectory(BIOS_DIR) + SLASH_STR + "BS-X.bin"; - fp = fopen(name, "rb"); + fp = fopen(name.c_str(), "rb"); if (!fp) { - strcpy(name, path); - strcat(name, "BS-X.bios"); - fp = fopen(name, "rb"); + name = S9xGetDirectory(BIOS_DIR) + SLASH_STR + "BS-X.bios"; + fp = fopen(name.c_str(), "rb"); } if (fp) @@ -1255,7 +1247,7 @@ void S9xInitBSX (void) { Settings.BS = FALSE; - if (is_BSX_BIOS(Memory.ROM,Memory.CalculatedSize)) + if (is_BSX_BIOS(Memory.ROM,Memory.CalculatedSize)) { // BS-X itself diff --git a/cheats.cpp b/cheats.cpp index 0b54a0f7..46347c16 100644 --- a/cheats.cpp +++ b/cheats.cpp @@ -37,112 +37,6 @@ (s) == S9X_24_BITS ? (((int32) ((*((m) + (o)) + (*((m) + (o) + 1) << 8) + (*((m) + (o) + 2) << 16)) << 8)) >> 8): \ ((int32) (*((m) + (o)) + (*((m) + (o) + 1) << 8) + (*((m) + (o) + 2) << 16) + (*((m) + (o) + 3) << 24)))) -static bool8 S9xAllHex (const char *, int); - - -static bool8 S9xAllHex (const char *code, int len) -{ - for (int i = 0; i < len; i++) - if ((code[i] < '0' || code[i] > '9') && (code[i] < 'a' || code[i] > 'f') && (code[i] < 'A' || code[i] > 'F')) - return (FALSE); - - return (TRUE); -} - -const char * S9xProActionReplayToRaw (const char *code, uint32 &address, uint8 &byte) -{ - uint32 data = 0; - - if (strlen(code) != 8 || !S9xAllHex(code, 8) || sscanf(code, "%x", &data) != 1) - return ("Invalid Pro Action Replay code - should be 8 hex digits in length."); - - address = data >> 8; - byte = (uint8) data; - - return (NULL); -} - -const char * S9xGoldFingerToRaw (const char *code, uint32 &address, bool8 &sram, uint8 &num_bytes, uint8 bytes[3]) -{ - char tmp[15]; - int i; - - if (strlen(code) != 14) - return ("Invalid Gold Finger code - should be 14 hex digits in length."); - - strncpy(tmp, code, 5); - tmp[5] = 0; - if (sscanf(tmp, "%x", &address) != 1) - return ("Invalid Gold Finger code."); - - // Correct GoldFinger Address - address = (address & 0x7FFF) | ((address & 0x7F8000) << 1) | 0x8000; - - for (i = 0; i < 3; i++) - { - unsigned int byte; - - strncpy(tmp, code + 5 + i * 2, 2); - tmp[2] = 0; - if (sscanf(tmp, "%x", &byte) != 1) - break; - bytes[i] = (uint8) byte; - } - - num_bytes = i; - sram = code[13] == '1'; - - return (NULL); -} - -const char * S9xGameGenieToRaw (const char *code, uint32 &address, uint8 &byte) -{ - char new_code[12]; - - if (strlen(code) != 9 || *(code + 4) != '-' || !S9xAllHex(code, 4) || !S9xAllHex(code + 5, 4)) - return ("Invalid Game Genie(tm) code - should be 'xxxx-xxxx'."); - - strcpy(new_code, "0x"); - strncpy(new_code + 2, code, 4); - strcpy(new_code + 6, code + 5); - - static const char *real_hex = "0123456789ABCDEF"; - static const char *genie_hex = "DF4709156BC8A23E"; - - for (int i = 2; i < 10; i++) - { - if (islower(new_code[i])) - new_code[i] = toupper(new_code[i]); - - int j; - for (j = 0; j < 16; j++) - { - if (new_code[i] == genie_hex[j]) - { - new_code[i] = real_hex[j]; - break; - } - } - - if (j == 16) - return ("Invalid hex-character in Game Genie(tm) code."); - } - - uint32 data = 0; - sscanf(new_code, "%x", &data); - byte = (uint8) (data >> 24); - address = data & 0xffffff; - address = ((address & 0x003c00) << 10) + - ((address & 0x00003c) << 14) + - ((address & 0xf00000) >> 8) + - ((address & 0x000003) << 10) + - ((address & 0x00c000) >> 6) + - ((address & 0x0f0000) >> 12) + - ((address & 0x0003c0) >> 6); - - return (NULL); -} - void S9xStartCheatSearch (SCheatData *d) { memmove(d->CWRAM, d->RAM, 0x20000); diff --git a/cheats.h b/cheats.h index 7fbabd75..fbc15982 100644 --- a/cheats.h +++ b/cheats.h @@ -23,14 +23,14 @@ struct SCheat struct SCheatGroup { - char *name; + std::string name; bool8 enabled; - std::vector c; + std::vector cheat; }; struct SCheatData { - std::vector g; + std::vector group; bool8 enabled; uint8 CWRAM[0x20000]; uint8 CSRAM[0x80000]; @@ -73,20 +73,20 @@ typedef enum extern SCheatData Cheat; extern Watch watches[16]; -int S9xAddCheatGroup (const char *name, const char *cheat); -int S9xModifyCheatGroup (uint32 index, const char *name, const char *cheat); -void S9xEnableCheatGroup (uint32 index); -void S9xDisableCheatGroup (uint32 index); -void S9xDeleteCheats (void); -char *S9xCheatGroupToText (uint32 index); -void S9xDeleteCheatGroup (uint32 index); -bool8 S9xLoadCheatFile (const char *filename); -bool8 S9xSaveCheatFile (const char *filename); -void S9xUpdateCheatsInMemory (void); -int S9xImportCheatsFromDatabase(const char *filename); -void S9xCheatsDisable (void); -void S9xCheatsEnable (void); -char *S9xCheatValidate (const char *cheat); +int S9xAddCheatGroup(const std::string &name, const std::string &cheat); +int S9xModifyCheatGroup(uint32 index, const std::string &name, const std::string &cheat); +void S9xEnableCheatGroup(uint32 index); +void S9xDisableCheatGroup(uint32 index); +void S9xDeleteCheats(void); +std::string S9xCheatGroupToText(uint32 index); +void S9xDeleteCheatGroup(uint32 index); +bool8 S9xLoadCheatFile(const std::string &filename); +bool8 S9xSaveCheatFile(const std::string &filename); +void S9xUpdateCheatsInMemory(void); +int S9xImportCheatsFromDatabase(const std::string &filename); +void S9xCheatsDisable(void); +void S9xCheatsEnable(void); +std::string S9xCheatValidate(const std::string &cheat); void S9xInitCheatData (void); void S9xInitWatchedAddress (void); @@ -96,8 +96,4 @@ void S9xSearchForValue (SCheatData *, S9xCheatComparisonType, S9xCheatDataSize, void S9xSearchForAddress (SCheatData *, S9xCheatComparisonType, S9xCheatDataSize, uint32, bool8); void S9xOutputCheatSearchResults (SCheatData *); -const char * S9xGameGenieToRaw (const char *, uint32 &, uint8 &); -const char * S9xProActionReplayToRaw (const char *, uint32 &, uint8 &); -const char * S9xGoldFingerToRaw (const char *, uint32 &, bool8 &, uint8 &, uint8 bytes[3]); - #endif diff --git a/cheats2.cpp b/cheats2.cpp index 83a29693..997d58c2 100644 --- a/cheats2.cpp +++ b/cheats2.cpp @@ -4,37 +4,24 @@ For further information, consult the LICENSE file in the root directory. \*****************************************************************************/ -#include - +#include "bml.h" +#include "cheats.h" #include "snes9x.h" #include "memmap.h" -#include "cheats.h" -#include "bml.h" -static inline char *trim (char *string) +static inline uint8 S9xGetByteFree(uint32 Address) { - int start; - int end; - - for (start = 0; string[start] && isspace (string[start]); start++) {} - for (end = start; string[end] && !isspace (string[end]); end++) {} - string[end] = '\0'; - return &string[start]; -} - -static inline uint8 S9xGetByteFree (uint32 Address) -{ - int block = (Address & 0xffffff) >> MEMMAP_SHIFT; + int block = (Address & 0xffffff) >> MEMMAP_SHIFT; uint8 *GetAddress = Memory.Map[block]; uint8 byte; - if (GetAddress >= (uint8 *) CMemory::MAP_LAST) + if (GetAddress >= (uint8 *)CMemory::MAP_LAST) { byte = *(GetAddress + (Address & 0xffff)); return (byte); } - switch ((pint) GetAddress) + switch ((pint)GetAddress) { case CMemory::MAP_CPU: byte = S9xGetCPU(Address & 0xffff); @@ -108,18 +95,18 @@ static inline uint8 S9xGetByteFree (uint32 Address) } } -static inline void S9xSetByteFree (uint8 Byte, uint32 Address) +static inline void S9xSetByteFree(uint8 Byte, uint32 Address) { int block = (Address & 0xffffff) >> MEMMAP_SHIFT; uint8 *SetAddress = Memory.Map[block]; - if (SetAddress >= (uint8 *) CMemory::MAP_LAST) + if (SetAddress >= (uint8 *)CMemory::MAP_LAST) { *(SetAddress + (Address & 0xffff)) = Byte; return; } - switch ((pint) SetAddress) + switch ((pint)SetAddress) { case CMemory::MAP_CPU: S9xSetCPU(Byte, Address & 0xffff); @@ -197,165 +184,229 @@ static inline void S9xSetByteFree (uint8 Byte, uint32 Address) } } -void S9xInitWatchedAddress (void) +void S9xInitWatchedAddress(void) { for (unsigned int i = 0; i < sizeof(watches) / sizeof(watches[0]); i++) watches[i].on = false; } -void S9xInitCheatData (void) +void S9xInitCheatData(void) { Cheat.RAM = Memory.RAM; Cheat.SRAM = Memory.SRAM; Cheat.FillRAM = Memory.FillRAM; } +static inline std::string trim(const std::string &&string) +{ + auto start = string.find_first_not_of(" \t\n\r"); + auto end = string.find_last_not_of(" \t\n\r"); + if (start != std::string::npos && end != std::string::npos) + return string.substr(start, end - start + 1); + return ""; +} -void S9xUpdateCheatInMemory (SCheat *c) +void S9xUpdateCheatInMemory(SCheat &c) { uint8 byte; - if (!c->enabled) + if (!c.enabled) return; - byte = S9xGetByteFree (c->address); + byte = S9xGetByteFree(c.address); - if (byte != c->byte) + if (byte != c.byte) { /* The game wrote a different byte to the address, update saved_byte */ - c->saved_byte = byte; + c.saved_byte = byte; - if (c->conditional) + if (c.conditional) { - if (c->saved_byte != c->cond_byte && c->cond_true) + if (c.saved_byte != c.cond_byte && c.cond_true) { /* Condition is now false, let the byte stand */ - c->cond_true = false; + c.cond_true = false; } - else if (c->saved_byte == c->cond_byte && !c->cond_true) + else if (c.saved_byte == c.cond_byte && !c.cond_true) { - c->cond_true = true; - S9xSetByteFree (c->byte, c->address); + c.cond_true = true; + S9xSetByteFree(c.byte, c.address); } } else - S9xSetByteFree (c->byte, c->address); + S9xSetByteFree(c.byte, c.address); } - else if (c->conditional) + else if (c.conditional) { - if (byte == c->cond_byte) + if (byte == c.cond_byte) { - c->cond_true = true; - c->saved_byte = byte; - S9xSetByteFree (c->byte, c->address); + c.cond_true = true; + c.saved_byte = byte; + S9xSetByteFree(c.byte, c.address); } } } -void S9xDisableCheat (SCheat *c) +void S9xDisableCheat(SCheat &c) { - if (!c->enabled) + if (!c.enabled) return; if (!Cheat.enabled) { - c->enabled = false; + c.enabled = false; return; } /* Make sure we restore the up-to-date written byte */ - S9xUpdateCheatInMemory (c); - c->enabled = false; + S9xUpdateCheatInMemory(c); + c.enabled = false; - if (c->conditional && !c->cond_true) + if (c.conditional && !c.cond_true) return; - S9xSetByteFree (c->saved_byte, c->address); - c->cond_true = false; + S9xSetByteFree (c.saved_byte, c.address); + c.cond_true = false; } -void S9xDeleteCheatGroup (uint32 g) +void S9xDeleteCheatGroup(uint32 g) { unsigned int i; - if (g >= Cheat.g.size ()) + if (g >= Cheat.group.size()) return; - for (i = 0; i < Cheat.g[g].c.size (); i++) + for (i = 0; i < Cheat.group[g].cheat.size(); i++) { - S9xDisableCheat (&Cheat.g[g].c[i]); + S9xDisableCheat(Cheat.group[g].cheat[i]); } - delete[] Cheat.g[g].name; - - Cheat.g.erase (Cheat.g.begin () + g); + Cheat.group.erase(Cheat.group.begin() + g); } -void S9xDeleteCheats (void) +void S9xDeleteCheats(void) { - unsigned int i; - - for (i = 0; i < Cheat.g.size (); i++) + for (size_t i = 0; i < Cheat.group.size(); i++) { - S9xDisableCheatGroup (i); - - delete[] Cheat.g[i].name; + S9xDisableCheatGroup(i); } - Cheat.g.clear (); + Cheat.group.clear(); } -void S9xEnableCheat (SCheat *c) +void S9xEnableCheat(SCheat &c) { uint8 byte; - if (c->enabled) + if (c.enabled) return; - c->enabled = true; + c.enabled = true; if (!Cheat.enabled) return; - byte = S9xGetByteFree(c->address); + byte = S9xGetByteFree(c.address); - if (c->conditional) + if (c.conditional) { - if (byte != c->cond_byte) + if (byte != c.cond_byte) return; - c->cond_true = true; + c.cond_true = true; } - c->saved_byte = byte; - S9xSetByteFree (c->byte, c->address); + c.saved_byte = byte; + S9xSetByteFree(c.byte, c.address); } -void S9xEnableCheatGroup (uint32 num) +void S9xEnableCheatGroup(uint32 num) { - unsigned int i; + for (auto &c : Cheat.group[num].cheat) + S9xEnableCheat(c); - for (i = 0; i < Cheat.g[num].c.size (); i++) - { - S9xEnableCheat (&Cheat.g[num].c[i]); - } - - Cheat.g[num].enabled = true; + Cheat.group[num].enabled = true; } -void S9xDisableCheatGroup (uint32 num) +void S9xDisableCheatGroup(uint32 num) { - unsigned int i; + for (auto &c : Cheat.group[num].cheat) + S9xDisableCheat(c); - for (i = 0; i < Cheat.g[num].c.size (); i++) - { - S9xDisableCheat (&Cheat.g[num].c[i]); - } - - Cheat.g[num].enabled = false; + Cheat.group[num].enabled = false; } -SCheat S9xTextToCheat (char *text) +static bool is_all_hex(const std::string &code) +{ + for (const auto &c : code) + { + if ((c < '0' || c > '9') && + (c < 'a' || c > 'f') && + (c < 'A' || c > 'F')) + return false; + } + + return true; +} + +bool S9xProActionReplayToRaw(const std::string &code, uint32 &address, uint8 &byte) +{ + if (code.length() != 8 || !is_all_hex(code)) + return false; + + uint32 data = std::strtoul(code.c_str(), nullptr, 16); + + address = data >> 8; + byte = (uint8)data; + + return true; +} + +bool S9xGameGenieToRaw(const std::string &code, uint32 &address, uint8 &byte) +{ + if (code.length() != 9) + return false; + if (code[4] != '-') + return false; + if (!is_all_hex(code.substr(0, 4))) + return false; + if (!is_all_hex(code.substr(5, 4))) + return false; + + auto new_code = code.substr(0, 4) + code.substr(5, 4); + + static const char *real_hex = "0123456789ABCDEF"; + static const char *genie_hex = "DF4709156BC8A23E"; + + for (auto &c : new_code) + { + c = toupper(c); + + for (int i = 0; i < 16; i++) + { + if (genie_hex[i] == c) + { + c = real_hex[i]; + break; + } + } + } + + uint32 data = strtoul(new_code.c_str(), nullptr, 16); + byte = (uint8)(data >> 24); + address = data & 0xffffff; + address = ((address & 0x003c00) << 10) + + ((address & 0x00003c) << 14) + + ((address & 0xf00000) >> 8) + + ((address & 0x000003) << 10) + + ((address & 0x00c000) >> 6) + + ((address & 0x0f0000) >> 12) + + ((address & 0x0003c0) >> 6); + + return true; +} + +SCheat S9xTextToCheat(const std::string &text) { SCheat c; unsigned int byte = 0; @@ -364,34 +415,28 @@ SCheat S9xTextToCheat (char *text) c.enabled = false; c.conditional = false; - if (!S9xGameGenieToRaw (text, c.address, c.byte)) + if (S9xGameGenieToRaw(text, c.address, c.byte)) { byte = c.byte; } - - else if (!S9xProActionReplayToRaw (text, c.address, c.byte)) + else if (S9xProActionReplayToRaw(text, c.address, c.byte)) { byte = c.byte; } - - else if (sscanf (text, "%x = %x ? %x", &c.address, &cond_byte, &byte) == 3) + else if (sscanf(text.c_str(), "%x = %x ? %x", &c.address, &cond_byte, &byte) == 3) { c.conditional = true; } - - else if (sscanf (text, "%x = %x", &c.address, &byte) == 2) + else if (sscanf(text.c_str(), "%x = %x", &c.address, &byte) == 2) { } - - else if (sscanf (text, "%x / %x / %x", &c.address, &cond_byte, &byte) == 3) + else if (sscanf(text.c_str(), "%x / %x / %x", &c.address, &cond_byte, &byte) == 3) { c.conditional = true; } - - else if (sscanf (text, "%x / %x", &c.address, &byte) == 2) + else if (sscanf(text.c_str(), "%x / %x", &c.address, &byte) == 2) { } - else { c.address = 0; @@ -404,313 +449,287 @@ SCheat S9xTextToCheat (char *text) return c; } -SCheatGroup S9xCreateCheatGroup (const char *name, const char *cheat) +std::vector split_string(const std::string &str, unsigned char delim) { - SCheatGroup g; - char *code_string = strdup (cheat); - char *code_ptr = code_string; - int len; + std::vector tokens; + size_t pos = 0; + size_t index; - g.name = strdup (name); - g.enabled = false; - - for (len = strcspn (code_ptr, "+"); len; len = strcspn (code_ptr, "+")) + while (pos < str.length()) { - char *code = code_ptr; - code_ptr += len + (code_ptr[len] == '\0' ? 0 : 1); - code[len] = '\0'; - code = trim (code); + index = str.find(delim, pos); + if (index == std::string::npos) + { + if (pos < str.length()) + { + tokens.push_back(trim(str.substr(pos))); + } - SCheat c = S9xTextToCheat (code); - if (c.address) - g.c.push_back (c); + break; + } + else if (index > pos) + { + tokens.push_back(trim(str.substr(pos, index - pos))); + } + + pos = index + 1; } - free(code_string); + return tokens; +} + +SCheatGroup S9xCreateCheatGroup(const std::string &name, const std::string &cheat) +{ + SCheatGroup g; + + g.name = name; + g.enabled = false; + + auto cheats = split_string(cheat, '+'); + for (const auto &c : cheats) + { + SCheat new_cheat = S9xTextToCheat(c); + if (new_cheat.address) + g.cheat.push_back(new_cheat); + } return g; } -int S9xAddCheatGroup (const char *name, const char *cheat) +int S9xAddCheatGroup(const std::string &name, const std::string &cheat) { - SCheatGroup g = S9xCreateCheatGroup (name, cheat); - if (g.c.size () == 0) + SCheatGroup g = S9xCreateCheatGroup(name, cheat); + if (g.cheat.size() == 0) return -1; - Cheat.g.push_back (g); + Cheat.group.push_back(g); - return Cheat.g.size () - 1; + return Cheat.group.size() - 1; } -int S9xModifyCheatGroup (uint32 num, const char *name, const char *cheat) +int S9xModifyCheatGroup(uint32 num, const std::string &name, const std::string &cheat) { - if (num >= Cheat.g.size()) + if (num >= Cheat.group.size()) return -1; - S9xDisableCheatGroup (num); - delete[] Cheat.g[num].name; + S9xDisableCheatGroup(num); - Cheat.g[num] = S9xCreateCheatGroup (name, cheat); + Cheat.group[num] = S9xCreateCheatGroup(name, cheat); return num; } -char *S9xCheatToText (SCheat *c) +std::string S9xCheatToText(const SCheat &c) { - int size = 10; /* 6 address, 1 =, 2 byte, 1 NUL */ - char *text; + char output[256]{}; - if (c->conditional) - size += 3; /* additional 2 byte, 1 ? */ - - text = new char[size]; - - if (c->conditional) - snprintf (text, size, "%06x=%02x?%02x", c->address, c->cond_byte, c->byte); + if (c.conditional) + sprintf(output, "%06x=%02x?%02x", c.address, c.cond_byte, c.byte); else - snprintf (text, size, "%06x=%02x", c->address, c->byte); + sprintf(output, "%06x=%02x", c.address, c.byte); + + return std::string(output); +} + +std::string S9xCheatGroupToText(SCheatGroup &g) +{ + std::string text = ""; + + for (size_t i = 0; i < g.cheat.size(); i++) + { + text += S9xCheatToText(g.cheat[i]); + if (i != g.cheat.size() - 1) + text += "+"; + } return text; } -char *S9xCheatGroupToText (SCheatGroup *g) +std::string S9xCheatValidate(const std::string &code_string) { - std::string text = ""; - unsigned int i; + SCheatGroup g = S9xCreateCheatGroup("temp", code_string); - if (g->c.size () == 0) - return NULL; - - for (i = 0; i < g->c.size (); i++) + if (g.cheat.size() > 0) { - char *tmp = S9xCheatToText (&g->c[i]); - if (i != 0) - text += " + "; - text += tmp; - delete[] tmp; + return S9xCheatGroupToText(g); } - return strdup (text.c_str ()); + return ""; } -char *S9xCheatValidate (const char *code_string) +std::string S9xCheatGroupToText(uint32 num) { - SCheatGroup g = S9xCreateCheatGroup ("temp", code_string); + if (num >= Cheat.group.size()) + return ""; - delete[] g.name; - - if (g.c.size() > 0) - { - return S9xCheatGroupToText (&g); - } - - return NULL; + return S9xCheatGroupToText(Cheat.group[num]); } -char *S9xCheatGroupToText (uint32 num) +void S9xUpdateCheatsInMemory(void) { - if (num >= Cheat.g.size ()) - return NULL; - - return S9xCheatGroupToText (&Cheat.g[num]); -} - -void S9xUpdateCheatsInMemory (void) -{ - unsigned int i; - unsigned int j; - if (!Cheat.enabled) return; - for (i = 0; i < Cheat.g.size (); i++) - { - for (j = 0; j < Cheat.g[i].c.size (); j++) - { - S9xUpdateCheatInMemory (&Cheat.g[i].c[j]); - } - } + for (auto &group : Cheat.group) + for (auto &cheat : group.cheat) + S9xUpdateCheatInMemory(cheat); } -static int S9xCheatIsDuplicate (const char *name, const char *code) +static bool S9xCheatIsDuplicate(const std::string &name, const std::string &code) { - unsigned int i; - - for (i = 0; i < Cheat.g.size(); i++) + for (size_t i = 0; i < Cheat.group.size(); i++) { - if (!strcmp (name, Cheat.g[i].name)) + if (Cheat.group[i].name == name) { - char *code_string = S9xCheatGroupToText (i); - char *validated = S9xCheatValidate (code); + auto code_string = S9xCheatGroupToText(i); + auto validated_string = S9xCheatValidate(code); - if (validated && !strcmp (code_string, validated)) - { - free (code_string); - free (validated); - return TRUE; - } - - free (code_string); - free (validated); + if (validated_string == code_string) + return true; } } - return FALSE; + return false; } -static void S9xLoadCheatsFromBMLNode (bml_node *n) +static void S9xLoadCheatsFromBMLNode(bml_node &n) { - unsigned int i; - - for (i = 0; i < n->child.size (); i++) + for (auto &c : n.child) { - if (!strcasecmp (n->child[i].name.c_str(), "cheat")) - { - const char *desc = NULL; - const char *code = NULL; - bool8 enabled = false; + if (strcasecmp(c.name.c_str(), "cheat")) + continue; - bml_node *c = &n->child[i]; - bml_node *tmp = NULL; + auto subnode = c.find_subnode("code"); + if (!subnode) + continue; + std::string code = subnode->data; - tmp = c->find_subnode("name"); - if (!tmp) - desc = (char *) ""; - else - desc = tmp->data.c_str(); + std::string name; + subnode = c.find_subnode("name"); + if (subnode) + name = subnode->data; - tmp = c->find_subnode("code"); - if (tmp) - code = tmp->data.c_str(); + bool enable = false; + if (c.find_subnode("enable")) + enable = true; - if (c->find_subnode("enable")) - enabled = true; + if (S9xCheatIsDuplicate(name, code)) + continue; - if (code && !S9xCheatIsDuplicate (desc, code)) - { - int index = S9xAddCheatGroup (desc, code); - - if (enabled) - S9xEnableCheatGroup (index); - } - } + auto index = S9xAddCheatGroup(name, code); + if (enable) + S9xEnableCheatGroup(index); } return; } -static bool8 S9xLoadCheatFileClassic (const char *filename) +static bool8 S9xLoadCheatFileClassic(const std::string &filename) { FILE *fs; uint8 data[28]; - fs = fopen(filename, "rb"); + fs = fopen(filename.c_str(), "rb"); if (!fs) return (FALSE); - while (fread ((void *) data, 1, 28, fs) == 28) + while (fread(data, 1, 28, fs) == 28) { SCheat c; - char name[21]; - char cheat[10]; c.enabled = (data[0] & 4) == 0; c.byte = data[1]; - c.address = data[2] | (data[3] << 8) | (data[4] << 16); - memcpy (name, &data[8], 20); - name[20] = 0; + c.address = data[2] | (data[3] << 8) | (data[4] << 16); - snprintf (cheat, 10, "%x=%x", c.address, c.byte); - S9xAddCheatGroup (name, cheat); + std::string name((const char *)&data[8], 20); + char code[32]{}; + sprintf(code, "%x=%x", c.address, c.byte); + std::string cheat(code); + S9xAddCheatGroup(name, cheat); if (c.enabled) - S9xEnableCheatGroup (Cheat.g.size () - 1); + S9xEnableCheatGroup(Cheat.group.size() - 1); } fclose(fs); - return (TRUE); + return TRUE; } -bool8 S9xLoadCheatFile (const char *filename) +bool8 S9xLoadCheatFile(const std::string &filename) { bml_node bml; if (!bml.parse_file(filename)) { - return S9xLoadCheatFileClassic (filename); + return S9xLoadCheatFileClassic(filename); } bml_node *n = bml.find_subnode("cheat"); if (n) { - S9xLoadCheatsFromBMLNode (&bml); + S9xLoadCheatsFromBMLNode(bml); } if (!n) { - return S9xLoadCheatFileClassic (filename); + return S9xLoadCheatFileClassic(filename); } return (TRUE); } -bool8 S9xSaveCheatFile (const char *filename) +bool8 S9xSaveCheatFile(const std::string &filename) { unsigned int i; FILE *file = NULL; - if (Cheat.g.size () == 0) + if (Cheat.group.size() == 0) { - remove (filename); + remove(filename.c_str()); return TRUE; } - file = fopen (filename, "w"); + file = fopen(filename.c_str(), "w"); if (!file) return FALSE; - for (i = 0; i < Cheat.g.size (); i++) + for (i = 0; i < Cheat.group.size(); i++) { - char *txt = S9xCheatGroupToText (i); - - fprintf (file, - "cheat\n" - " name: %s\n" - " code: %s\n" - "%s\n", - Cheat.g[i].name ? Cheat.g[i].name : "", - txt, - Cheat.g[i].enabled ? " enable\n" : "" - ); - - delete[] txt; + fprintf(file, + "cheat\n" + " name: %s\n" + " code: %s\n" + "%s\n", + Cheat.group[i].name.c_str(), + S9xCheatGroupToText(i).c_str(), + Cheat.group[i].enabled ? " enable\n" : ""); } - fclose (file); + fclose(file); return TRUE; } -void S9xCheatsDisable (void) +void S9xCheatsDisable(void) { unsigned int i; if (!Cheat.enabled) return; - for (i = 0; i < Cheat.g.size (); i++) + for (i = 0; i < Cheat.group.size(); i++) { - if (Cheat.g[i].enabled) + if (Cheat.group[i].enabled) { - S9xDisableCheatGroup (i); - Cheat.g[i].enabled = TRUE; + S9xDisableCheatGroup(i); + Cheat.group[i].enabled = TRUE; } } Cheat.enabled = FALSE; } -void S9xCheatsEnable (void) +void S9xCheatsEnable(void) { unsigned int i; @@ -719,17 +738,17 @@ void S9xCheatsEnable (void) Cheat.enabled = TRUE; - for (i = 0; i < Cheat.g.size (); i++) + for (i = 0; i < Cheat.group.size(); i++) { - if (Cheat.g[i].enabled) + if (Cheat.group[i].enabled) { - Cheat.g[i].enabled = FALSE; - S9xEnableCheatGroup (i); + Cheat.group[i].enabled = FALSE; + S9xEnableCheatGroup(i); } } } -int S9xImportCheatsFromDatabase (const char *filename) +int S9xImportCheatsFromDatabase(const std::string &filename) { char sha256_txt[65]; char hextable[] = "0123456789abcdef"; @@ -746,19 +765,16 @@ int S9xImportCheatsFromDatabase (const char *filename) } sha256_txt[64] = '\0'; - for (i = 0; i < bml.child.size (); i++) + for (auto &c : bml.child) { - if (!strcasecmp (bml.child[i].name.c_str(), "cartridge")) + if (!strcasecmp(c.name.c_str(), "cartridge")) { - bml_node *n; + auto n = c.find_subnode("sha256"); - if ((n = bml.child[i].find_subnode ("sha256"))) + if (n && !strcasecmp(n->data.c_str(), sha256_txt)) { - if (!strcasecmp (n->data.c_str(), sha256_txt)) - { - S9xLoadCheatsFromBMLNode (&bml.child[i]); - return 0; - } + S9xLoadCheatsFromBMLNode(c); + return 0; } } } diff --git a/compat.cpp b/compat.cpp deleted file mode 100644 index e03d3d83..00000000 --- a/compat.cpp +++ /dev/null @@ -1,67 +0,0 @@ -#include -#include "port.h" - -void _splitpath(const char *path, char *drive, char *dir, char *fname, char *ext) -{ - char *slash = strrchr((char *)path, SLASH_CHAR); - char *dot = strrchr((char *)path, '.'); - - *drive = '\0'; - - if (dot && slash && dot < slash) - { - dot = 0; - } - - if (!slash) - { - *dir = '\0'; - strcpy(fname, path); - - if (dot) - { - fname[dot - path] = '\0'; - strcpy(ext, dot + 1); - } - else - { - *ext = '\0'; - } - } - else - { - strcpy(dir, path); - dir[slash - path] = '\0'; - strcpy(fname, slash + 1); - - if (dot) - { - fname[(dot - slash) - 1] = '\0'; - strcpy(ext, dot + 1); - } - else - { - *ext = '\0'; - } - } -} - -void _makepath(char *path, const char *drive, const char *dir, const char *fname, const char *ext) -{ - if (dir && *dir) - { - strcpy(path, dir); - strcat(path, "/"); - } - else - *path = '\0'; - - strcat(path, fname); - - if (ext && *ext) - { - if (*ext != '.') - strcat(path, "."); - strcat(path, ext); - } -} diff --git a/controls.cpp b/controls.cpp index eaac35ad..878c809f 100644 --- a/controls.cpp +++ b/controls.cpp @@ -328,14 +328,12 @@ static void DoGunLatch (int x, int y) if (x > 295) x = 295; - else - if (x < 40) + else if (x < 40) x = 40; if (y > PPU.ScreenHeight - 1) y = PPU.ScreenHeight - 1; - else - if (y < 0) + else if (y < 0) y = 0; PPU.GunVLatch = (uint16) (y + 1); @@ -1164,8 +1162,7 @@ s9xcommand_t S9xGetCommandT (const char *name) if (!strcmp(name, "None")) cmd.type = S9xNoMapping; - else - if (!strncmp(name, "Joypad", 6)) + else if (!strncmp(name, "Joypad", 6)) { if (name[6] < '1' || name[6] > '8' || name[7] != ' ') return (cmd); @@ -1176,24 +1173,15 @@ s9xcommand_t S9xGetCommandT (const char *name) s = name + 13; if (!strncmp(s, "Left/Right ", 11)) { j = 0; i = 0; s += 11; } - else - if (!strncmp(s, "Right/Left ", 11)) { j = 0; i = 1; s += 11; } - else - if (!strncmp(s, "Up/Down ", 8)) { j = 1; i = 0; s += 8; } - else - if (!strncmp(s, "Down/Up ", 8)) { j = 1; i = 1; s += 8; } - else - if (!strncmp(s, "Y/A ", 4)) { j = 2; i = 0; s += 4; } - else - if (!strncmp(s, "A/Y ", 4)) { j = 2; i = 1; s += 4; } - else - if (!strncmp(s, "X/B ", 4)) { j = 3; i = 0; s += 4; } - else - if (!strncmp(s, "B/X ", 4)) { j = 3; i = 1; s += 4; } - else - if (!strncmp(s, "L/R ", 4)) { j = 4; i = 0; s += 4; } - else - if (!strncmp(s, "R/L ", 4)) { j = 4; i = 1; s += 4; } + else if (!strncmp(s, "Right/Left ", 11)) { j = 0; i = 1; s += 11; } + else if (!strncmp(s, "Up/Down ", 8)) { j = 1; i = 0; s += 8; } + else if (!strncmp(s, "Down/Up ", 8)) { j = 1; i = 1; s += 8; } + else if (!strncmp(s, "Y/A ", 4)) { j = 2; i = 0; s += 4; } + else if (!strncmp(s, "A/Y ", 4)) { j = 2; i = 1; s += 4; } + else if (!strncmp(s, "X/B ", 4)) { j = 3; i = 0; s += 4; } + else if (!strncmp(s, "B/X ", 4)) { j = 3; i = 1; s += 4; } + else if (!strncmp(s, "L/R ", 4)) { j = 4; i = 0; s += 4; } + else if (!strncmp(s, "R/L ", 4)) { j = 4; i = 1; s += 4; } else return (cmd); @@ -1374,8 +1362,7 @@ s9xcommand_t S9xGetCommandT (const char *name) if (*s == 'h') cmd.axis.pointer.HV = 0; - else - if (*s == 'v') + else if (*s == 'v') cmd.axis.pointer.HV = 1; else return (cmd); @@ -1523,8 +1510,7 @@ s9xcommand_t S9xGetCommandT (const char *name) j = i; } - else - if (name[i] == ',') + else if (name[i] == ',') { free(c); return (cmd); @@ -1537,8 +1523,7 @@ s9xcommand_t S9xGetCommandT (const char *name) { if (name[i] == '+') press = 1; - else - if (name[i] == '-') + else if (name[i] == '-') press = 2; else { @@ -2007,14 +1992,12 @@ void S9xApplyCommand (s9xcommand_t cmd, int16 data1, int16 data2) uint16 x = r; r = st; st = x; x = s; s = t; t = x; } - else - if (cmd.button.joypad.turbo) + else if (cmd.button.joypad.turbo) { uint16 x = r; r = t; t = x; x = s; s = st; st = x; } - else - if (cmd.button.joypad.sticky) + else if (cmd.button.joypad.sticky) { uint16 x = r; r = s; s = x; x = t; t = st; st = x; @@ -2276,16 +2259,12 @@ void S9xApplyCommand (s9xcommand_t cmd, int16 data1, int16 data2) case LoadOopsFile: { - char filename[PATH_MAX + 1]; - char drive[_MAX_DRIVE + 1], dir[_MAX_DIR + 1], def[_MAX_FNAME + 1], ext[_MAX_EXT + 1]; + std::string filename = S9xGetFilename("oops", SNAPSHOT_DIR); - _splitpath(Memory.ROMFilename, drive, dir, def, ext); - snprintf(filename, PATH_MAX + 1, "%s%s%s.%.*s", S9xGetDirectory(SNAPSHOT_DIR), SLASH_STR, def, _MAX_EXT - 1, "oops"); - - if (S9xUnfreezeGame(filename)) + if (S9xUnfreezeGame(filename.c_str())) { - snprintf(buf, 256, "%s.%.*s loaded", def, _MAX_EXT - 1, "oops"); - S9xSetInfoString (buf); + snprintf(buf, 256, "%.240s.oops loaded", S9xBasename(Memory.ROMFilename).c_str()); + S9xSetInfoString(buf); } else S9xMessage(S9X_ERROR, S9X_FREEZE_FILE_NOT_FOUND, "Oops file not found"); @@ -2313,15 +2292,15 @@ void S9xApplyCommand (s9xcommand_t cmd, int16 data1, int16 data2) case QuickLoad009: case QuickLoad010: { - char filename[PATH_MAX + 1]; - char drive[_MAX_DRIVE + 1], dir[_MAX_DIR + 1], def[_MAX_FNAME + 1], ext[_MAX_EXT + 1]; + std::string ext = std::to_string(i - QuickLoad000); + while (ext.length() < 3) + ext = '0' + ext; - _splitpath(Memory.ROMFilename, drive, dir, def, ext); - snprintf(filename, PATH_MAX + 1, "%s%s%s.%03d", S9xGetDirectory(SNAPSHOT_DIR), SLASH_STR, def, i - QuickLoad000); + auto filename = S9xGetFilename(ext, SNAPSHOT_DIR); - if (S9xUnfreezeGame(filename)) + if (S9xUnfreezeGame(filename.c_str())) { - snprintf(buf, 256, "%s.%03d loaded", def, i - QuickLoad000); + snprintf(buf, 256, "Quick save-state %s loaded", ext.c_str()); S9xSetInfoString(buf); } else @@ -2342,16 +2321,16 @@ void S9xApplyCommand (s9xcommand_t cmd, int16 data1, int16 data2) case QuickSave009: case QuickSave010: { - char filename[PATH_MAX + 1]; - char drive[_MAX_DRIVE + 1], dir[_MAX_DIR + 1], def[_MAX_FNAME + 1], ext[_MAX_EXT + 1]; + std::string ext = std::to_string(i - QuickLoad000); + while (ext.length() < 3) + ext = '0' + ext; - _splitpath(Memory.ROMFilename, drive, dir, def, ext); - snprintf(filename, PATH_MAX + 1, "%s%s%s.%03d", S9xGetDirectory(SNAPSHOT_DIR), SLASH_STR, def, i - QuickSave000); + auto filename = S9xGetFilename(ext, SNAPSHOT_DIR); - snprintf(buf, 256, "%s.%03d saved", def, i - QuickSave000); + snprintf(buf, 256, "Quick save-state %s saved", ext.c_str()); S9xSetInfoString(buf); - S9xFreezeGame(filename); + S9xFreezeGame(filename.c_str()); break; } @@ -2771,14 +2750,12 @@ static void UpdatePolledMouse (int i) mouse[i - MOUSE0].delta_x = 0xff; mouse[i - MOUSE0].old_x -= 127; } - else - if (j < 0) + else if (j < 0) { mouse[i - MOUSE0].delta_x = 0x80 | -j; mouse[i - MOUSE0].old_x = mouse[i - MOUSE0].cur_x; } - else - if (j > 127) + else if (j > 127) { mouse[i - MOUSE0].delta_x = 0x7f; mouse[i - MOUSE0].old_x += 127; @@ -2796,14 +2773,12 @@ static void UpdatePolledMouse (int i) mouse[i - MOUSE0].delta_y = 0xff; mouse[i - MOUSE0].old_y -= 127; } - else - if (j < 0) + else if (j < 0) { mouse[i - MOUSE0].delta_y = 0x80 | -j; mouse[i - MOUSE0].old_y = mouse[i - MOUSE0].cur_y; } - else - if (j > 127) + else if (j > 127) { mouse[i - MOUSE0].delta_y = 0x7f; mouse[i - MOUSE0].old_y += 127; @@ -3265,8 +3240,7 @@ void S9xControlEOF (void) pseudopointer[n].x += pseudopointer[n].H_adj; if (pseudopointer[n].x < 0) pseudopointer[n].x = 0; - else - if (pseudopointer[n].x > 255) + else if (pseudopointer[n].x > 255) pseudopointer[n].x = 255; if (pseudopointer[n].H_var) @@ -3289,8 +3263,7 @@ void S9xControlEOF (void) pseudopointer[n].y += pseudopointer[n].V_adj; if (pseudopointer[n].y < 0) pseudopointer[n].y = 0; - else - if (pseudopointer[n].y > PPU.ScreenHeight - 1) + else if (pseudopointer[n].y > PPU.ScreenHeight - 1) pseudopointer[n].y = PPU.ScreenHeight - 1; if (pseudopointer[n].V_var) diff --git a/cpu.cpp b/cpu.cpp index 4187496f..2c8d07d6 100644 --- a/cpu.cpp +++ b/cpu.cpp @@ -75,7 +75,6 @@ static void S9xSoftResetCPU (void) SetFlags(MemoryFlag | IndexFlag | IRQ | Emulation); ClearFlags(Decimal); - Timings.InterlaceField = FALSE; Timings.H_Max = Timings.H_Max_Master; Timings.V_Max = Timings.V_Max_Master; Timings.NMITriggerPos = 0xffff; @@ -99,8 +98,8 @@ void S9xReset (void) { S9xResetSaveTimer(FALSE); - memset(Memory.RAM, 0x55, 0x20000); - memset(Memory.VRAM, 0x00, 0x10000); + memset(Memory.RAM, 0x55, sizeof(Memory.RAM)); + memset(Memory.VRAM, 0x00, sizeof(Memory.VRAM)); memset(Memory.FillRAM, 0, 0x8000); S9xResetBSX(); diff --git a/cpuexec.cpp b/cpuexec.cpp index cd2b1ba1..9f220219 100644 --- a/cpuexec.cpp +++ b/cpuexec.cpp @@ -276,7 +276,6 @@ void S9xDoHEventProcessing (void) if (CPU.V_Counter >= Timings.V_Max) // V ranges from 0 to Timings.V_Max - 1 { CPU.V_Counter = 0; - Timings.InterlaceField ^= 1; // From byuu: // [NTSC] @@ -285,7 +284,7 @@ void S9xDoHEventProcessing (void) // [PAL] // interlace mode has 625 scanlines: 313 on the even frame, and 312 on the odd. // non-interlace mode has 624 scanlines: 312 scanlines on both even and odd frames. - if (IPPU.Interlace && !Timings.InterlaceField) + if (IPPU.Interlace && S9xInterlaceField()) Timings.V_Max = Timings.V_Max_Master + 1; // 263 (NTSC), 313?(PAL) else Timings.V_Max = Timings.V_Max_Master; // 262 (NTSC), 312?(PAL) @@ -306,14 +305,14 @@ void S9xDoHEventProcessing (void) // In interlace mode, there are always 341 dots per scanline. Even frames have 263 scanlines, // and odd frames have 262 scanlines. // Interlace mode scanline 240 on odd frames is not missing a dot. - if (CPU.V_Counter == 240 && !IPPU.Interlace && Timings.InterlaceField) // V=240 + if (CPU.V_Counter == 240 && !IPPU.Interlace && S9xInterlaceField()) // V=240 Timings.H_Max = Timings.H_Max_Master - ONE_DOT_CYCLE; // HC=1360 else Timings.H_Max = Timings.H_Max_Master; // HC=1364 if (Model->_5A22 == 2) { - if (CPU.V_Counter != 240 || IPPU.Interlace || !Timings.InterlaceField) // V=240 + if (CPU.V_Counter != 240 || IPPU.Interlace || !S9xInterlaceField()) // V=240 { if (Timings.WRAMRefreshPos == SNES_WRAM_REFRESH_HC_v2 - ONE_DOT_CYCLE) // HC=534 Timings.WRAMRefreshPos = SNES_WRAM_REFRESH_HC_v2; // HC=538 diff --git a/display.h b/display.h index a2f12670..58a9341a 100644 --- a/display.h +++ b/display.h @@ -9,24 +9,6 @@ #include "snes9x.h" -enum s9x_getdirtype -{ - DEFAULT_DIR = 0, - HOME_DIR, - ROMFILENAME_DIR, - ROM_DIR, - SRAM_DIR, - SNAPSHOT_DIR, - SCREENSHOT_DIR, - SPC_DIR, - CHEAT_DIR, - PATCH_DIR, - BIOS_DIR, - LOG_DIR, - SAT_DIR, - LAST_DIR -}; - void S9xUsage (void); char * S9xParseArgs (char **, int); void S9xParseArgsForCheats (char **, int); @@ -44,10 +26,6 @@ void S9xToggleSoundChannel (int); bool8 S9xOpenSnapshotFile (const char *, bool8, STREAM *); void S9xCloseSnapshotFile (STREAM); const char * S9xStringInput (const char *); -const char * S9xGetDirectory (enum s9x_getdirtype); -const char * S9xGetFilename (const char *, enum s9x_getdirtype); -const char * S9xGetFilenameInc (const char *, enum s9x_getdirtype); -const char * S9xBasename (const char *); // Routines the port has to implement if it uses command-line diff --git a/external/SPIRV-Cross b/external/SPIRV-Cross index 1458bae6..197a273f 160000 --- a/external/SPIRV-Cross +++ b/external/SPIRV-Cross @@ -1 +1 @@ -Subproject commit 1458bae62ec67ea7d12c5a13b740e23ed4bb226c +Subproject commit 197a273fd494321157f40a962c51b5fa8c9c3581 diff --git a/external/VulkanMemoryAllocator-Hpp/LICENSE b/external/VulkanMemoryAllocator-Hpp/LICENSE new file mode 100644 index 00000000..670154e3 --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/LICENSE @@ -0,0 +1,116 @@ +CC0 1.0 Universal + +Statement of Purpose + +The laws of most jurisdictions throughout the world automatically confer +exclusive Copyright and Related Rights (defined below) upon the creator and +subsequent owner(s) (each and all, an "owner") of an original work of +authorship and/or a database (each, a "Work"). + +Certain owners wish to permanently relinquish those rights to a Work for the +purpose of contributing to a commons of creative, cultural and scientific +works ("Commons") that the public can reliably and without fear of later +claims of infringement build upon, modify, incorporate in other works, reuse +and redistribute as freely as possible in any form whatsoever and for any +purposes, including without limitation commercial purposes. These owners may +contribute to the Commons to promote the ideal of a free culture and the +further production of creative, cultural and scientific works, or to gain +reputation or greater distribution for their Work in part through the use and +efforts of others. + +For these and/or other purposes and motivations, and without any expectation +of additional consideration or compensation, the person associating CC0 with a +Work (the "Affirmer"), to the extent that he or she is an owner of Copyright +and Related Rights in the Work, voluntarily elects to apply CC0 to the Work +and publicly distribute the Work under its terms, with knowledge of his or her +Copyright and Related Rights in the Work and the meaning and intended legal +effect of CC0 on those rights. + +1. Copyright and Related Rights. A Work made available under CC0 may be +protected by copyright and related or neighboring rights ("Copyright and +Related Rights"). Copyright and Related Rights include, but are not limited +to, the following: + + i. the right to reproduce, adapt, distribute, perform, display, communicate, + and translate a Work; + + ii. moral rights retained by the original author(s) and/or performer(s); + + iii. publicity and privacy rights pertaining to a person's image or likeness + depicted in a Work; + + iv. rights protecting against unfair competition in regards to a Work, + subject to the limitations in paragraph 4(a), below; + + v. rights protecting the extraction, dissemination, use and reuse of data in + a Work; + + vi. database rights (such as those arising under Directive 96/9/EC of the + European Parliament and of the Council of 11 March 1996 on the legal + protection of databases, and under any national implementation thereof, + including any amended or successor version of such directive); and + + vii. other similar, equivalent or corresponding rights throughout the world + based on applicable law or treaty, and any national implementations thereof. + +2. Waiver. To the greatest extent permitted by, but not in contravention of, +applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and +unconditionally waives, abandons, and surrenders all of Affirmer's Copyright +and Related Rights and associated claims and causes of action, whether now +known or unknown (including existing as well as future claims and causes of +action), in the Work (i) in all territories worldwide, (ii) for the maximum +duration provided by applicable law or treaty (including future time +extensions), (iii) in any current or future medium and for any number of +copies, and (iv) for any purpose whatsoever, including without limitation +commercial, advertising or promotional purposes (the "Waiver"). Affirmer makes +the Waiver for the benefit of each member of the public at large and to the +detriment of Affirmer's heirs and successors, fully intending that such Waiver +shall not be subject to revocation, rescission, cancellation, termination, or +any other legal or equitable action to disrupt the quiet enjoyment of the Work +by the public as contemplated by Affirmer's express Statement of Purpose. + +3. Public License Fallback. Should any part of the Waiver for any reason be +judged legally invalid or ineffective under applicable law, then the Waiver +shall be preserved to the maximum extent permitted taking into account +Affirmer's express Statement of Purpose. In addition, to the extent the Waiver +is so judged Affirmer hereby grants to each affected person a royalty-free, +non transferable, non sublicensable, non exclusive, irrevocable and +unconditional license to exercise Affirmer's Copyright and Related Rights in +the Work (i) in all territories worldwide, (ii) for the maximum duration +provided by applicable law or treaty (including future time extensions), (iii) +in any current or future medium and for any number of copies, and (iv) for any +purpose whatsoever, including without limitation commercial, advertising or +promotional purposes (the "License"). The License shall be deemed effective as +of the date CC0 was applied by Affirmer to the Work. Should any part of the +License for any reason be judged legally invalid or ineffective under +applicable law, such partial invalidity or ineffectiveness shall not +invalidate the remainder of the License, and in such case Affirmer hereby +affirms that he or she will not (i) exercise any of his or her remaining +Copyright and Related Rights in the Work or (ii) assert any associated claims +and causes of action with respect to the Work, in either case contrary to +Affirmer's express Statement of Purpose. + +4. Limitations and Disclaimers. + + a. No trademark or patent rights held by Affirmer are waived, abandoned, + surrendered, licensed or otherwise affected by this document. + + b. Affirmer offers the Work as-is and makes no representations or warranties + of any kind concerning the Work, express, implied, statutory or otherwise, + including without limitation warranties of title, merchantability, fitness + for a particular purpose, non infringement, or the absence of latent or + other defects, accuracy, or the present or absence of errors, whether or not + discoverable, all to the greatest extent permissible under applicable law. + + c. Affirmer disclaims responsibility for clearing rights of other persons + that may apply to the Work or any use thereof, including without limitation + any person's Copyright and Related Rights in the Work. Further, Affirmer + disclaims responsibility for obtaining any necessary consents, permissions + or other rights required for any use of the Work. + + d. Affirmer understands and acknowledges that Creative Commons is not a + party to this document and has no duty or obligation with respect to this + CC0 or use of the Work. + +For more information, please see + diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc.h b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc.h new file mode 100644 index 00000000..844bbff7 --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc.h @@ -0,0 +1,19612 @@ +// +// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. +// + +#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H +#define AMD_VULKAN_MEMORY_ALLOCATOR_H + +/** \mainpage Vulkan Memory Allocator + +Version 3.1.0-development + +Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n +License: MIT + +API documentation divided into groups: [Modules](modules.html) + +\section main_table_of_contents Table of contents + +- User guide + - \subpage quick_start + - [Project setup](@ref quick_start_project_setup) + - [Initialization](@ref quick_start_initialization) + - [Resource allocation](@ref quick_start_resource_allocation) + - \subpage choosing_memory_type + - [Usage](@ref choosing_memory_type_usage) + - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags) + - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types) + - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools) + - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations) + - \subpage memory_mapping + - [Mapping functions](@ref memory_mapping_mapping_functions) + - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory) + - [Cache flush and invalidate](@ref memory_mapping_cache_control) + - \subpage staying_within_budget + - [Querying for budget](@ref staying_within_budget_querying_for_budget) + - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage) + - \subpage resource_aliasing + - \subpage custom_memory_pools + - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex) + - [Linear allocation algorithm](@ref linear_algorithm) + - [Free-at-once](@ref linear_algorithm_free_at_once) + - [Stack](@ref linear_algorithm_stack) + - [Double stack](@ref linear_algorithm_double_stack) + - [Ring buffer](@ref linear_algorithm_ring_buffer) + - \subpage defragmentation + - \subpage statistics + - [Numeric statistics](@ref statistics_numeric_statistics) + - [JSON dump](@ref statistics_json_dump) + - \subpage allocation_annotation + - [Allocation user data](@ref allocation_user_data) + - [Allocation names](@ref allocation_names) + - \subpage virtual_allocator + - \subpage debugging_memory_usage + - [Memory initialization](@ref debugging_memory_usage_initialization) + - [Margins](@ref debugging_memory_usage_margins) + - [Corruption detection](@ref debugging_memory_usage_corruption_detection) + - \subpage opengl_interop +- \subpage usage_patterns + - [GPU-only resource](@ref usage_patterns_gpu_only) + - [Staging copy for upload](@ref usage_patterns_staging_copy_upload) + - [Readback](@ref usage_patterns_readback) + - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading) + - [Other use cases](@ref usage_patterns_other_use_cases) +- \subpage configuration + - [Pointers to Vulkan functions](@ref config_Vulkan_functions) + - [Custom host memory allocator](@ref custom_memory_allocator) + - [Device memory allocation callbacks](@ref allocation_callbacks) + - [Device heap memory limit](@ref heap_memory_limit) +- Extension support + - \subpage vk_khr_dedicated_allocation + - \subpage enabling_buffer_device_address + - \subpage vk_ext_memory_priority + - \subpage vk_amd_device_coherent_memory +- \subpage general_considerations + - [Thread safety](@ref general_considerations_thread_safety) + - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility) + - [Validation layer warnings](@ref general_considerations_validation_layer_warnings) + - [Allocation algorithm](@ref general_considerations_allocation_algorithm) + - [Features not supported](@ref general_considerations_features_not_supported) + +\section main_see_also See also + +- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/) +- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator) + +\defgroup group_init Library initialization + +\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object. + +\defgroup group_alloc Memory allocation + +\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images. +Most basic ones being: vmaCreateBuffer(), vmaCreateImage(). + +\defgroup group_virtual Virtual allocator + +\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm +for user-defined purpose without allocating any real GPU memory. + +\defgroup group_stats Statistics + +\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format. +See documentation chapter: \ref statistics. +*/ + + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef VULKAN_H_ + #include +#endif + +#if !defined(VMA_VULKAN_VERSION) + #if defined(VK_VERSION_1_3) + #define VMA_VULKAN_VERSION 1003000 + #elif defined(VK_VERSION_1_2) + #define VMA_VULKAN_VERSION 1002000 + #elif defined(VK_VERSION_1_1) + #define VMA_VULKAN_VERSION 1001000 + #else + #define VMA_VULKAN_VERSION 1000000 + #endif +#endif + +#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS + extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr; + extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr; + extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties; + extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties; + extern PFN_vkAllocateMemory vkAllocateMemory; + extern PFN_vkFreeMemory vkFreeMemory; + extern PFN_vkMapMemory vkMapMemory; + extern PFN_vkUnmapMemory vkUnmapMemory; + extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges; + extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges; + extern PFN_vkBindBufferMemory vkBindBufferMemory; + extern PFN_vkBindImageMemory vkBindImageMemory; + extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements; + extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements; + extern PFN_vkCreateBuffer vkCreateBuffer; + extern PFN_vkDestroyBuffer vkDestroyBuffer; + extern PFN_vkCreateImage vkCreateImage; + extern PFN_vkDestroyImage vkDestroyImage; + extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; + #if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; + #endif // #if VMA_VULKAN_VERSION >= 1001000 +#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES + +#if !defined(VMA_DEDICATED_ALLOCATION) + #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation + #define VMA_DEDICATED_ALLOCATION 1 + #else + #define VMA_DEDICATED_ALLOCATION 0 + #endif +#endif + +#if !defined(VMA_BIND_MEMORY2) + #if VK_KHR_bind_memory2 + #define VMA_BIND_MEMORY2 1 + #else + #define VMA_BIND_MEMORY2 0 + #endif +#endif + +#if !defined(VMA_MEMORY_BUDGET) + #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) + #define VMA_MEMORY_BUDGET 1 + #else + #define VMA_MEMORY_BUDGET 0 + #endif +#endif + +// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. +#if !defined(VMA_BUFFER_DEVICE_ADDRESS) + #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 + #define VMA_BUFFER_DEVICE_ADDRESS 1 + #else + #define VMA_BUFFER_DEVICE_ADDRESS 0 + #endif +#endif + +// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. +#if !defined(VMA_MEMORY_PRIORITY) + #if VK_EXT_memory_priority + #define VMA_MEMORY_PRIORITY 1 + #else + #define VMA_MEMORY_PRIORITY 0 + #endif +#endif + +// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. +#if !defined(VMA_EXTERNAL_MEMORY) + #if VK_KHR_external_memory + #define VMA_EXTERNAL_MEMORY 1 + #else + #define VMA_EXTERNAL_MEMORY 0 + #endif +#endif + +// Define these macros to decorate all public functions with additional code, +// before and after returned type, appropriately. This may be useful for +// exporting the functions when compiling VMA as a separate library. Example: +// #define VMA_CALL_PRE __declspec(dllexport) +// #define VMA_CALL_POST __cdecl +#ifndef VMA_CALL_PRE + #define VMA_CALL_PRE +#endif +#ifndef VMA_CALL_POST + #define VMA_CALL_POST +#endif + +// Define this macro to decorate pointers with an attribute specifying the +// length of the array they point to if they are not null. +// +// The length may be one of +// - The name of another parameter in the argument list where the pointer is declared +// - The name of another member in the struct where the pointer is declared +// - The name of a member of a struct type, meaning the value of that member in +// the context of the call. For example +// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"), +// this means the number of memory heaps available in the device associated +// with the VmaAllocator being dealt with. +#ifndef VMA_LEN_IF_NOT_NULL + #define VMA_LEN_IF_NOT_NULL(len) +#endif + +// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nullable +#ifndef VMA_NULLABLE + #ifdef __clang__ + #define VMA_NULLABLE _Nullable + #else + #define VMA_NULLABLE + #endif +#endif + +// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. +// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull +#ifndef VMA_NOT_NULL + #ifdef __clang__ + #define VMA_NOT_NULL _Nonnull + #else + #define VMA_NOT_NULL + #endif +#endif + +// If non-dispatchable handles are represented as pointers then we can give +// then nullability annotations +#ifndef VMA_NOT_NULL_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL + #else + #define VMA_NOT_NULL_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_NULLABLE_NON_DISPATCHABLE + #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) + #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE + #else + #define VMA_NULLABLE_NON_DISPATCHABLE + #endif +#endif + +#ifndef VMA_STATS_STRING_ENABLED + #define VMA_STATS_STRING_ENABLED 1 +#endif + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// INTERFACE +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. +#ifndef _VMA_ENUM_DECLARATIONS + +/** +\addtogroup group_init +@{ +*/ + +/// Flags for created #VmaAllocator. +typedef enum VmaAllocatorCreateFlagBits +{ + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. + + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + Using this extension will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. + + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: + + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) + + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. + + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. + + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. + + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. + + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. + + You may set this flag only if you: + + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. + + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + + You may set this flag only if you: + + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. + + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocatorCreateFlagBits; +/// See #VmaAllocatorCreateFlagBits. +typedef VkFlags VmaAllocatorCreateFlags; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// \brief Intended usage of the allocated memory. +typedef enum VmaMemoryUsage +{ + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. + + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. + + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, + + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF +} VmaMemoryUsage; + +/// Flags to be passed as VmaAllocationCreateInfo::flags. +typedef enum VmaAllocationCreateFlagBits +{ + /** \brief Set this flag if the allocation should have its own memory block. + + Use it for special, big resources, like fullscreen images used as attachments. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pName`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + /** \brief Set this flag if the allocated memory will have aliasing resources. + + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. + Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. + */ + VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. + + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is required. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, + /** Allocation strategy that chooses smallest possible free range for the allocation + to minimize memory usage and fragmentation, possibly at the expense of allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, + /** Allocation strategy that chooses first suitable free range for the allocation - + not necessarily in terms of the smallest offset but the one that is easiest and fastest to find + to minimize allocation time, possibly at the expense of allocation quality. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recommended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaAllocationCreateFlagBits; +/// See #VmaAllocationCreateFlagBits. +typedef VkFlags VmaAllocationCreateFlags; + +/// Flags to be passed as VmaPoolCreateInfo::flags. +typedef enum VmaPoolCreateFlagBits +{ + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + + /** \brief Enables alternative, linear allocation algorithm in this pool. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaPoolCreateFlagBits; +/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. +typedef VkFlags VmaPoolCreateFlags; + +/// Flags to be passed as VmaDefragmentationInfo::flags. +typedef enum VmaDefragmentationFlagBits +{ + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaDefragmentationFlagBits; +/// See #VmaDefragmentationFlagBits. +typedef VkFlags VmaDefragmentationFlags; + +/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. +typedef enum VmaDefragmentationMoveOperation +{ + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, +} VmaDefragmentationMoveOperation; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. +typedef enum VmaVirtualBlockCreateFlagBits +{ + /** \brief Enables alternative, linear allocation algorithm in this virtual block. + + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. + + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, + + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, + + VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualBlockCreateFlagBits; +/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. +typedef VkFlags VmaVirtualBlockCreateFlags; + +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. +typedef enum VmaVirtualAllocationCreateFlagBits +{ + /** \brief Allocation will be created from upper stack in a double stack pool. + + This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + /** \brief Allocation strategy that tries to minimize memory usage. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** \brief Allocation strategy that tries to minimize allocation time. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. + + These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, + + VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF +} VmaVirtualAllocationCreateFlagBits; +/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. +typedef VkFlags VmaVirtualAllocationCreateFlags; + +/** @} */ + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_DATA_TYPES_DECLARATIONS + +/** +\addtogroup group_init +@{ */ + +/** \struct VmaAllocator +\brief Represents main object of this library initialized. + +Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. +Call function vmaDestroyAllocator() to destroy it. + +It is recommended to create just one object of this type per `VkDevice` object, +right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. +*/ +VK_DEFINE_HANDLE(VmaAllocator) + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \struct VmaPool +\brief Represents custom memory pool + +Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. +Call function vmaDestroyPool() to destroy it. + +For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). +*/ +VK_DEFINE_HANDLE(VmaPool) + +/** \struct VmaAllocation +\brief Represents single memory allocation. + +It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type +plus unique offset. + +There are multiple ways to create such object. +You need to fill structure VmaAllocationCreateInfo. +For more information see [Choosing memory type](@ref choosing_memory_type). + +Although the library provides convenience functions that create Vulkan buffer or image, +allocate memory for it and bind them together, +binding of the allocation to a buffer or an image is out of scope of the allocation itself. +Allocation object can exist without buffer/image bound, +binding can be done manually by the user, and destruction of it can be done +independently of destruction of the allocation. + +The object also remembers its size and some other information. +To retrieve this information, use function vmaGetAllocationInfo() and inspect +returned structure VmaAllocationInfo. +*/ +VK_DEFINE_HANDLE(VmaAllocation) + +/** \struct VmaDefragmentationContext +\brief An opaque object that represents started defragmentation process. + +Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. +Call function vmaEndDefragmentation() to destroy it. +*/ +VK_DEFINE_HANDLE(VmaDefragmentationContext) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualAllocation +\brief Represents single memory allocation done inside VmaVirtualBlock. + +Use it as a unique identifier to virtual allocation within the single block. + +Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. +*/ +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \struct VmaVirtualBlock +\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. + +Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. +For more information, see documentation chapter \ref virtual_allocator. + +This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. +*/ +VK_DEFINE_HANDLE(VmaVirtualBlock) + +/** @} */ + +/** +\addtogroup group_init +@{ +*/ + +/// Callback function called after successful vkAllocateMemory. +typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/// Callback function called before vkFreeMemory. +typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void* VMA_NULLABLE pUserData); + +/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + +Provided for informative purpose, e.g. to gather statistics about number of +allocations or total amount of memory allocated in Vulkan. + +Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. +*/ +typedef struct VmaDeviceMemoryCallbacks +{ + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void* VMA_NULLABLE pUserData; +} VmaDeviceMemoryCallbacks; + +/** \brief Pointers to some Vulkan functions - a subset used by the library. + +Used in VmaAllocatorCreateInfo::pVulkanFunctions. +*/ +typedef struct VmaVulkanFunctions +{ + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; +#endif +#if VMA_VULKAN_VERSION >= 1003000 + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; +#endif +} VmaVulkanFunctions; + +/// Description of a Allocator to be created. +typedef struct VmaAllocatorCreateInfo +{ + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + + /** \brief Pointers to Vulkan functions. Can be null. + + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; + /** \brief Handle to Vulkan instance object. + + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; +#if VMA_EXTERNAL_MEMORY + /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. + + If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` + elements, defining external memory handle types of particular Vulkan memory type, + to be passed using `VkExportMemoryAllocateInfoKHR`. + + Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. + This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. + */ + const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; +#endif // #if VMA_EXTERNAL_MEMORY +} VmaAllocatorCreateInfo; + +/// Information about existing #VmaAllocator object. +typedef struct VmaAllocatorInfo +{ + /** \brief Handle to Vulkan instance object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. + + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; +} VmaAllocatorInfo; + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + +These are fast to calculate. +See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). +*/ +typedef struct VmaStatistics +{ + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ + uint32_t blockCount; + /** \brief Number of #VmaAllocation objects allocated. + + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ + uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. + + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; +} VmaStatistics; + +/** \brief More detailed statistics than #VmaStatistics. + +These are slower to calculate. Use for debugging purposes. +See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + +Previous version of the statistics API provided averages, but they have been removed +because they can be easily calculated as: + +\code +VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; +VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; +VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; +\endcode +*/ +typedef struct VmaDetailedStatistics +{ + /// Basic statistics. + VmaStatistics statistics; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; +} VmaDetailedStatistics; + +/** \brief General statistics from current state of the Allocator - +total memory usage across all memory heaps and types. + +These are slower to calculate. Use for debugging purposes. +See function vmaCalculateStatistics(). +*/ +typedef struct VmaTotalStatistics +{ + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; +} VmaTotalStatistics; + +/** \brief Statistics of current memory usage and available budget for a specific memory heap. + +These are fast to calculate. +See function vmaGetHeapBudgets(). +*/ +typedef struct VmaBudget +{ + /** \brief Statistics fetched from the library. + */ + VmaStatistics statistics; + /** \brief Estimated current memory usage of the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + /** \brief Estimated amount of memory available to the program, in bytes. + + Fetched from system using VK_EXT_memory_budget extension if enabled. + + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, decided by the operating system. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; +} VmaBudget; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Parameters of new #VmaAllocation. + +To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. +*/ +typedef struct VmaAllocationCreateInfo +{ + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. + + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. + + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). + + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void* VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; +} VmaAllocationCreateInfo; + +/// Describes parameter of created #VmaPool. +typedef struct VmaPoolCreateInfo +{ + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + In this case, the pool will also support dedicated allocations. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. + + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. + + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. + + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, + e.g. when doing interop with OpenGL. + */ + VkDeviceSize minAllocationAlignment; + /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. + + Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. + It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. + Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. + + Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, + can be attached automatically by this library when using other, more convenient of its features. + */ + void* VMA_NULLABLE pMemoryAllocateNext; +} VmaPoolCreateInfo; + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). +typedef struct VmaAllocationInfo +{ + /** \brief Memory type index that this allocation was allocated from. + + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. + + Same memory object can be shared by multiple allocations. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. + + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. + + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. + + It never changes. + + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. + + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after the allocation is moved during \ref defragmentation. + */ + void* VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void* VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). + + It can change after call to vmaSetAllocationName() for this allocation. + + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char* VMA_NULLABLE pName; +} VmaAllocationInfo; + +/** \brief Parameters for defragmentation. + +To be used with function vmaBeginDefragmentation(). +*/ +typedef struct VmaDefragmentationInfo +{ + /// \brief Use combination of #VmaDefragmentationFlagBits. + VmaDefragmentationFlags flags; + /** \brief Custom pool to be defragmented. + + If null then default pools will undergo defragmentation process. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. + + `0` means no limit. + */ + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. + + `0` means no limit. + */ + uint32_t maxAllocationsPerPass; +} VmaDefragmentationInfo; + +/// Single move of an allocation to be done for defragmentation. +typedef struct VmaDefragmentationMove +{ + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; +} VmaDefragmentationMove; + +/** \brief Parameters for incremental defragmentation steps. + +To be used with function vmaBeginDefragmentationPass(). +*/ +typedef struct VmaDefragmentationPassMoveInfo +{ + /// Number of elements in the `pMoves` array. + uint32_t moveCount; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. + + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). + + For each element, you should: + + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. + + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. + + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + + Alternatively, if you decide you want to completely remove the allocation: + + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + + Then, after vmaEndDefragmentationPass() the allocation will be freed. + */ + VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; +} VmaDefragmentationPassMoveInfo; + +/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). +typedef struct VmaDefragmentationStats +{ + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; +} VmaDefragmentationStats; + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). +typedef struct VmaVirtualBlockCreateInfo +{ + /** \brief Total size of the virtual block. + + Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. + For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + */ + VkDeviceSize size; + + /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. + */ + VmaVirtualBlockCreateFlags flags; + + /** \brief Custom CPU memory allocation callbacks. Optional. + + Optional, can be null. When specified, they will be used for all CPU-side memory allocations. + */ + const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; +} VmaVirtualBlockCreateInfo; + +/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). +typedef struct VmaVirtualAllocationCreateInfo +{ + /** \brief Size of the allocation. + + Cannot be zero. + */ + VkDeviceSize size; + /** \brief Required alignment of the allocation. Optional. + + Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. + */ + VkDeviceSize alignment; + /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. + */ + VmaVirtualAllocationCreateFlags flags; + /** \brief Custom pointer to be associated with the allocation. Optional. + + It can be any value and can be used for user-defined purposes. It can be fetched or changed later. + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationCreateInfo; + +/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). +typedef struct VmaVirtualAllocationInfo +{ + /** \brief Offset of the allocation. + + Offset at which the allocation was made. + */ + VkDeviceSize offset; + /** \brief Size of the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::size. + */ + VkDeviceSize size; + /** \brief Custom pointer associated with the allocation. + + Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + */ + void* VMA_NULLABLE pUserData; +} VmaVirtualAllocationInfo; + +/** @} */ + +#endif // _VMA_DATA_TYPES_DECLARATIONS + +#ifndef _VMA_FUNCTION_HEADERS + +/** +\addtogroup group_init +@{ +*/ + +/// Creates #VmaAllocator object. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); + +/// Destroys allocator object. +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); + +/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. + +It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to +`VkPhysicalDevice`, `VkDevice` etc. every time using this function. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); + +/** +PhysicalDeviceProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); + +/** +PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. +You can access it here, without fetching it again on your own. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); + +/** +\brief Given Memory Type Index, returns Property Flags of this memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Sets index of the current frame. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics from current state of the Allocator. + +This function is called "calculate" not "get" because it has to traverse all +internal data structures, so it may be quite slow. Use it for debugging purposes. +For faster but more brief statistics suitable to be called every frame or every allocation, +use vmaGetHeapBudgets(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaTotalStatistics* VMA_NOT_NULL pStats); + +/** \brief Retrieves information about current memory usage and budget for all memory heaps. + +\param allocator +\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. + +This function is called "get" not "calculate" because it is very fast, suitable to be called +every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). + +Note that when using allocator from multiple threads, returned information may immediately +become outdated. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** +\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + +This algorithm tries to find a memory type that: + +- Is allowed by memoryTypeBits. +- Contains all the flags from pAllocationCreateInfo->requiredFlags. +- Matches intended usage. +- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + +\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result +from this function or any other allocating function probably means that your +device doesn't support any memory type with requested features for the specific +type of resource you want to use it for. Please check parameters of your +resource, like image layout (OPTIMAL versus LINEAR) or mip level count. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy buffer that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** +\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + +It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. +It internally creates a temporary, dummy image that never has memory bound. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + +/** \brief Allocates Vulkan device memory and creates #VmaPool object. + +\param allocator Allocator object. +\param pCreateInfo Parameters of pool to create. +\param[out] pPool Handle to created pool. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); + +/** \brief Destroys #VmaPool object and frees Vulkan device memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); + +/** @} */ + +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Retrieves statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaStatistics* VMA_NOT_NULL pPoolStats); + +/** \brief Retrieves detailed statistics of existing #VmaPool object. + +\param allocator Allocator object. +\param pool Pool object. +\param[out] pPoolStats Statistics of specified pool. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); + +/** @} */ + +/** +\addtogroup group_alloc +@{ +*/ + +/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool); + +/** \brief Retrieves name of a custom pool. + +After the call `ppName` is either null or points to an internally-owned null-terminated string +containing name of the pool that was previously set. The pointer becomes invalid when the pool is +destroyed or its name is changed using vmaSetPoolName(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE* VMA_NOT_NULL ppName); + +/** \brief Sets name of a custom pool. + +`pName` can be either null or pointer to a null-terminated string with new name for the pool. +Function makes internal copy of the string, so it can be changed or freed immediately after this call. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char* VMA_NULLABLE pName); + +/** \brief General purpose memory allocation. + +\param allocator +\param pVkMemoryRequirements +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), +vmaCreateBuffer(), vmaCreateImage() instead whenever possible. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief General purpose memory allocation for multiple allocation objects at once. + +\param allocator Allocator object. +\param pVkMemoryRequirements Memory requirements for each allocation. +\param pCreateInfo Creation parameters for each allocation. +\param allocationCount Number of allocations to make. +\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. +\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + +You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + +Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. +It is just a general purpose allocation function able to make multiple allocations at once. +It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + +All allocations are made using same parameters. All of them are created out of the same memory pool and type. +If any allocation fails, all allocations already made within this function call are also freed, so that when +returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkBuffer`. + +\param allocator +\param buffer +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Allocates memory suitable for given `VkImage`. + +\param allocator +\param image +\param pCreateInfo +\param[out] pAllocation Handle to allocated memory. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + +This is a special-purpose function. In most cases you should use vmaCreateImage(). + +You must free the allocation using vmaFreeMemory() when no longer needed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + +Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); + +/** \brief Frees memory and destroys multiple allocations. + +Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. +It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), +vmaAllocateMemoryPages() and other functions. +It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + +Allocations in `pAllocations` array can come from any memory pools and types. +Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + +/** \brief Returns current information about specified allocation. + +Current parameters of given allocation are returned in `pAllocationInfo`. + +Although this function doesn't lock any mutex, so it should be quite efficient, +you should avoid calling it too often. +You can retrieve same VmaAllocationInfo structure while creating your resource, from function +vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change +(e.g. due to defragmentation). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + +/** \brief Sets pUserData in given allocation to new value. + +The value of pointer `pUserData` is copied to allocation's `pUserData`. +It is opaque, so you can use it however you want - e.g. +as a pointer, ordinal number or some handle to you own data. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Sets pName in given allocation to new value. + +`pName` must be either null, or pointer to a null-terminated string. The function +makes local copy of the string and sets it as allocation's `pName`. String +passed as pName doesn't need to be valid for whole lifetime of the allocation - +you can free it after this call. String previously pointed by allocation's +`pName` is freed from memory. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName); + +/** +\brief Given an allocation, returns Property Flags of its memory type. + +This is just a convenience function. Same information can be obtained using +vmaGetAllocationInfo() + vmaGetMemoryProperties(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + +/** \brief Maps memory represented by given allocation and returns pointer to it. + +Maps memory represented by given allocation to make it accessible to CPU code. +When succeeded, `*ppData` contains pointer to first byte of this memory. + +\warning +If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is +correctly offsetted to the beginning of region assigned to this particular allocation. +Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. +You should not add VmaAllocationInfo::offset to it! + +Mapping is internally reference-counted and synchronized, so despite raw Vulkan +function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` +multiple times simultaneously, it is safe to call this function on allocations +assigned to the same memory block. Actual Vulkan memory will be mapped on first +mapping and unmapped on last unmapping. + +If the function succeeded, you must call vmaUnmapMemory() to unmap the +allocation when mapping is no longer needed or before freeing the allocation, at +the latest. + +It also safe to call this function multiple times on the same allocation. You +must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + +It is also safe to call this function on allocation created with +#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. +You must still call vmaUnmapMemory() same number of times as you called +vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the +"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + +This function fails when used on allocation made in memory type that is not +`HOST_VISIBLE`. + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void* VMA_NULLABLE* VMA_NOT_NULL ppData); + +/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + +For details, see description of vmaMapMemory(). + +This function doesn't automatically flush or invalidate caches. +If the allocation is made from a memory types that is not `HOST_COHERENT`, +you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + +/** \brief Flushes memory of given allocation. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. +Unmap operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Invalidates memory of given allocation. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. +It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. +Map operation doesn't do that automatically. + +- `offset` must be relative to the beginning of allocation. +- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. +- `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. +- If `size` is 0, this call is ignored. +- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + +Warning! `offset` and `size` are relative to the contents of given `allocation`. +If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. +Do not pass allocation's offset as `offset`!!! + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if +it is called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); + +/** \brief Flushes memory of given set of allocations. + +Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaFlushAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Invalidates memory of given set of allocations. + +Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. +For more information, see documentation of vmaInvalidateAllocation(). + +\param allocator +\param allocationCount +\param allocations +\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. +\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + +This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is +called, otherwise `VK_SUCCESS`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + +/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + +\param allocator +\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + +Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, +`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + +Possible return values: + +- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. +- `VK_SUCCESS` - corruption detection has been performed and succeeded. +- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. +- Other value: Error returned by Vulkan, e.g. memory mapping failure. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits); + +/** \brief Begins defragmentation process. + +\param allocator Allocator object. +\param pInfo Structure filled with parameters of defragmentation. +\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. +\returns +- `VK_SUCCESS` if defragmentation can begin. +- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. + +For more information about defragmentation, see documentation chapter: +[Defragmentation](@ref defragmentation). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, + VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); + +/** \brief Ends defragmentation process. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pStats Optional stats for the defragmentation. Can be null. + +Use this function to finish defragmentation started by vmaBeginDefragmentation(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats* VMA_NULLABLE pStats); + +/** \brief Starts single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param[out] pPassInfo Computed information for current pass. +\returns +- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. +- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Ends single defragmentation pass. + +\param allocator Allocator object. +\param context Context object that has been created by vmaBeginDefragmentation(). +\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. + +Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + +Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. +After this call: + +- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. +- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. + +If no more moves are possible you can end whole defragmentation. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + +/** \brief Binds buffer to allocation. + +Binds specified buffer to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create a buffer, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindBufferMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateBuffer() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + +/** \brief Binds buffer to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param buffer +\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void* VMA_NULLABLE pNext); + +/** \brief Binds image to allocation. + +Binds specified image to region of memory represented by specified allocation. +Gets `VkDeviceMemory` handle and offset from the allocation. +If you want to create an image, allocate memory for it and bind them together separately, +you should use this function for binding instead of standard `vkBindImageMemory()`, +because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple +allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously +(which is illegal in Vulkan). + +It is recommended to use function vmaCreateImage() instead of this one. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + +/** \brief Binds image to allocation with additional parameters. + +\param allocator +\param allocation +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. +\param image +\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + +This function is similar to vmaBindImageMemory(), but it provides additional parameters. + +If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag +or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void* VMA_NULLABLE pNext); + +/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + +\param allocator +\param pBufferCreateInfo +\param pAllocationCreateInfo +\param[out] pBuffer Buffer that was created. +\param[out] pAllocation Allocation that was created. +\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + +This function automatically: + +-# Creates buffer. +-# Allocates appropriate memory for it. +-# Binds the buffer with the memory. + +If any of these operations fail, buffer and allocation are not created, +returned value is negative error code, `*pBuffer` and `*pAllocation` are null. + +If the function succeeded, you must destroy both buffer and allocation when you +no longer need them using either convenience function vmaDestroyBuffer() or +separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + +If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, +VK_KHR_dedicated_allocation extension is used internally to query driver whether +it requires or prefers the new buffer to have dedicated allocation. If yes, +and if dedicated allocation is possible +(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated +allocation for this buffer, just like when using +#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, +although recommended as a good practice, is out of scope of this library and could be implemented +by the user as a higher-level logic on top of VMA. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a buffer with additional minimum alignment. + +Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, +minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. +for interop with OpenGL. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Creates a new `VkBuffer`, binds already created memory for it. + +\param allocator +\param allocation Allocation that provides memory to be used for binding new buffer to it. +\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. +\param pBufferCreateInfo +\param[out] pBuffer Buffer that was created. + +This function automatically: + +-# Creates buffer. +-# Binds the buffer with the supplied memory. + +If any of these operations fail, buffer is not created, +returned value is negative error code and `*pBuffer` is null. + +If the function succeeded, you must destroy the buffer when you +no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding +allocation you can use convenience function vmaDestroyBuffer(). + +\note This is a new version of the function augmented with parameter `allocationLocalOffset`. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + +/** \brief Destroys Vulkan buffer and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyBuffer(device, buffer, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as buffer and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + +/// Function similar to vmaCreateBuffer(). +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, + VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + +/// Function similar to vmaCreateAliasingBuffer() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/// Function similar to vmaCreateAliasingBuffer2() but for images. +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + +/** \brief Destroys Vulkan image and frees allocated memory. + +This is just a convenience function equivalent to: + +\code +vkDestroyImage(device, image, allocationCallbacks); +vmaFreeMemory(allocator, allocation); +\endcode + +It is safe to pass null as image and/or allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); + +/** @} */ + +/** +\addtogroup group_virtual +@{ +*/ + +/** \brief Creates new #VmaVirtualBlock object. + +\param pCreateInfo Parameters for creation. +\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); + +/** \brief Destroys #VmaVirtualBlock object. + +Please note that you should consciously handle virtual allocations that could remain unfreed in the block. +You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() +if you are sure this is what you want. If you do neither, an assert is called. + +If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, +don't forget to free them. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( + VmaVirtualBlock VMA_NULLABLE virtualBlock); + +/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. +*/ +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + +/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + +If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned +(despite the function doesn't ever allocate actual GPU memory). +`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + +\param virtualBlock Virtual block +\param pCreateInfo Parameters for the allocation +\param[out] pAllocation Returned handle of the new allocation +\param[out] pOffset Returned offset of the new allocation. Optional, can be null. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset); + +/** \brief Frees virtual allocation inside given #VmaVirtualBlock. + +It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); + +/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. + +You must either call this function or free each virtual allocation individually with vmaVirtualFree() +before destroying a virtual block. Otherwise, an assert is called. + +If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, +don't forget to free it as well. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + +/** \brief Changes custom pointer associated with given virtual allocation. +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, + void* VMA_NULLABLE pUserData); + +/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats); + +/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + +This function is slow to call. Use for debugging purposes. +For less detailed statistics, see vmaGetVirtualBlockStatistics(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats); + +/** @} */ + +#if VMA_STATS_STRING_ENABLED +/** +\addtogroup group_stats +@{ +*/ + +/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. +\param virtualBlock Virtual block. +\param[out] ppStatsString Returned string. +\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. + +Returned string must be freed using vmaFreeVirtualBlockStatsString(). +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +/// Frees a string returned by vmaBuildVirtualBlockStatsString(). +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString); + +/** \brief Builds and returns statistics as a null-terminated string in JSON format. +\param allocator +\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. +\param detailedMap +*/ +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char* VMA_NULLABLE pStatsString); + +/** @} */ + +#endif // VMA_STATS_STRING_ENABLED + +#endif // _VMA_FUNCTION_HEADERS + +#ifdef __cplusplus +} +#endif + +#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H + +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// +// +// IMPLEMENTATION +// +//////////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////////// + +// For Visual Studio IntelliSense. +#if defined(__cplusplus) && defined(__INTELLISENSE__) +#define VMA_IMPLEMENTATION +#endif + +#ifdef VMA_IMPLEMENTATION +#undef VMA_IMPLEMENTATION + +#include +#include +#include +#include +#include + +#ifdef _MSC_VER + #include // For functions like __popcnt, _BitScanForward etc. +#endif +#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + #include // For std::popcount +#endif + +/******************************************************************************* +CONFIGURATION SECTION + +Define some of these macros before each #include of this header or change them +here if you need other then default behavior depending on your environment. +*/ +#ifndef _VMA_CONFIGURATION + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; +*/ +#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) + #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#endif + +/* +Define this macro to 1 to make the library fetch pointers to Vulkan functions +internally, like: + + vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory"); + +To use this feature in new versions of VMA you now have to pass +VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as +VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. +*/ +#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) + #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#endif + +#ifndef VMA_USE_STL_SHARED_MUTEX + // Compiler conforms to C++17. + #if __cplusplus >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus + // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. + #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L + #define VMA_USE_STL_SHARED_MUTEX 1 + #else + #define VMA_USE_STL_SHARED_MUTEX 0 + #endif +#endif + +/* +Define this macro to include custom header files without having to edit this file directly, e.g.: + + // Inside of "my_vma_configuration_user_includes.h": + + #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT + #include "my_custom_min.h" // for my_custom_min + #include + #include + + // Inside a different file, which includes "vk_mem_alloc.h": + + #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h" + #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr) + #define VMA_MIN(v1, v2) (my_custom_min(v1, v2)) + #include "vk_mem_alloc.h" + ... + +The following headers are used in this CONFIGURATION section only, so feel free to +remove them if not needed. +*/ +#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) + #include // for assert + #include // for min, max + #include +#else + #include VMA_CONFIGURATION_USER_INCLUDES_H +#endif + +#ifndef VMA_NULL + // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. + #define VMA_NULL nullptr +#endif + +// Normal assert to check for programmer's errors, especially in Debug configuration. +#ifndef VMA_ASSERT + #ifdef NDEBUG + #define VMA_ASSERT(expr) + #else + #define VMA_ASSERT(expr) assert(expr) + #endif +#endif + +// Assert that will be called very often, like inside data structures e.g. operator[]. +// Making it non-empty can make program slow. +#ifndef VMA_HEAVY_ASSERT + #ifdef NDEBUG + #define VMA_HEAVY_ASSERT(expr) + #else + #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) + #endif +#endif + +// If your compiler is not compatible with C++17 and definition of +// aligned_alloc() function is missing, uncommenting following line may help: + +//#include + +#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) +#include +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + return memalign(alignment, size); +} +#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) +#include + +#if defined(__APPLE__) +#include +#endif + +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) + // Therefore, for now disable this specific exception until a proper solution is found. + //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) + //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only + // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds + // // MAC_OS_X_VERSION_10_16), even though the function is marked + // // available for 10.15. That is why the preprocessor checks for 10.16 but + // // the __builtin_available checks for 10.15. + // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. + // if (__builtin_available(macOS 10.15, iOS 13, *)) + // return aligned_alloc(alignment, size); + //#endif + //#endif + + // alignment must be >= sizeof(void*) + if(alignment < sizeof(void*)) + { + alignment = sizeof(void*); + } + + void *pointer; + if(posix_memalign(&pointer, alignment, size) == 0) + return pointer; + return VMA_NULL; +} +#elif defined(_WIN32) +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return _aligned_malloc(size, alignment); +} +#elif __cplusplus >= 201703L // Compiler conforms to C++17. +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + return aligned_alloc(alignment, size); +} +#else +static void* vma_aligned_alloc(size_t alignment, size_t size) +{ + VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system."); + return VMA_NULL; +} +#endif + +#if defined(_WIN32) +static void vma_aligned_free(void* ptr) +{ + _aligned_free(ptr); +} +#else +static void vma_aligned_free(void* VMA_NULLABLE ptr) +{ + free(ptr); +} +#endif + +#ifndef VMA_ALIGN_OF + #define VMA_ALIGN_OF(type) (__alignof(type)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_MALLOC + #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#endif + +#ifndef VMA_SYSTEM_ALIGNED_FREE + // VMA_SYSTEM_FREE is the old name, but might have been defined by the user + #if defined(VMA_SYSTEM_FREE) + #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) + #else + #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) + #endif +#endif + +#ifndef VMA_COUNT_BITS_SET + // Returns number of bits set to 1 in (v) + #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +#endif + +#ifndef VMA_BITSCAN_LSB + // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +#endif + +#ifndef VMA_BITSCAN_MSB + // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX + #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +#endif + +#ifndef VMA_MIN + #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) +#endif + +#ifndef VMA_MAX + #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) +#endif + +#ifndef VMA_SWAP + #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) +#endif + +#ifndef VMA_SORT + #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#endif + +#ifndef VMA_DEBUG_LOG_FORMAT + #define VMA_DEBUG_LOG_FORMAT(format, ...) + /* + #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ + } while(false) + */ +#endif + +#ifndef VMA_DEBUG_LOG + #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) +#endif + +// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. +#if VMA_STATS_STRING_ENABLED + static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) + { + snprintf(outStr, strLen, "%u", static_cast(num)); + } + static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) + { + snprintf(outStr, strLen, "%llu", static_cast(num)); + } + static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) + { + snprintf(outStr, strLen, "%p", ptr); + } +#endif + +#ifndef VMA_MUTEX + class VmaMutex + { + public: + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + private: + std::mutex m_Mutex; + }; + #define VMA_MUTEX VmaMutex +#endif + +// Read-write mutex, where "read" is shared access, "write" is exclusive access. +#ifndef VMA_RW_MUTEX + #if VMA_USE_STL_SHARED_MUTEX + // Use std::shared_mutex from C++17. + #include + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + private: + std::shared_mutex m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 + // Use SRWLOCK from WinAPI. + // Minimum supported client = Windows Vista, server = Windows Server 2008. + class VmaRWMutex + { + public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + private: + SRWLOCK m_Lock; + }; + #define VMA_RW_MUTEX VmaRWMutex + #else + // Less efficient fallback: Use normal mutex. + class VmaRWMutex + { + public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + private: + VMA_MUTEX m_Mutex; + }; + #define VMA_RW_MUTEX VmaRWMutex + #endif // #if VMA_USE_STL_SHARED_MUTEX +#endif // #ifndef VMA_RW_MUTEX + +/* +If providing your own implementation, you need to implement a subset of std::atomic. +*/ +#ifndef VMA_ATOMIC_UINT32 + #include + #define VMA_ATOMIC_UINT32 std::atomic +#endif + +#ifndef VMA_ATOMIC_UINT64 + #include + #define VMA_ATOMIC_UINT64 std::atomic +#endif + +#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY + /** + Every allocation will have its own memory block. + Define to 1 for debugging purposes only. + */ + #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +#endif + +#ifndef VMA_MIN_ALIGNMENT + /** + Minimum alignment of all allocations, in bytes. + Set to more than 1 for debugging purposes. Must be power of two. + */ + #ifdef VMA_DEBUG_ALIGNMENT // Old name + #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT + #else + #define VMA_MIN_ALIGNMENT (1) + #endif +#endif + +#ifndef VMA_DEBUG_MARGIN + /** + Minimum margin after every allocation, in bytes. + Set nonzero for debugging purposes only. + */ + #define VMA_DEBUG_MARGIN (0) +#endif + +#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS + /** + Define this macro to 1 to automatically fill new allocations and destroyed + allocations with some bit pattern. + */ + #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +#endif + +#ifndef VMA_DEBUG_DETECT_CORRUPTION + /** + Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to + enable writing magic value to the margin after every allocation and + validating it, so that memory corruptions (out-of-bounds writes) are detected. + */ + #define VMA_DEBUG_DETECT_CORRUPTION (0) +#endif + +#ifndef VMA_DEBUG_GLOBAL_MUTEX + /** + Set this to 1 for debugging purposes only, to enable single mutex protecting all + entry calls to the library. Can be useful for debugging multithreading issues. + */ + #define VMA_DEBUG_GLOBAL_MUTEX (0) +#endif + +#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY + /** + Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. + Set to more than 1 for debugging purposes only. Must be power of two. + */ + #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +#endif + +#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + /* + Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount + and return error instead of leaving up to Vulkan implementation what to do in such cases. + */ + #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) +#endif + +#ifndef VMA_SMALL_HEAP_MAX_SIZE + /// Maximum size of a memory heap in Vulkan to consider it "small". + #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +#endif + +#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE + /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. + #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +#endif + +/* +Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called +or a persistently mapped allocation is created and destroyed several times in a row. +It keeps additional +1 mapping of a device memory block to prevent calling actual +vkMapMemory/vkUnmapMemory too many times, which may improve performance and help +tools like RenderDoc. +*/ +#ifndef VMA_MAPPING_HYSTERESIS_ENABLED + #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#endif + +#ifndef VMA_CLASS_NO_COPY + #define VMA_CLASS_NO_COPY(className) \ + private: \ + className(const className&) = delete; \ + className& operator=(const className&) = delete; +#endif + +#define VMA_VALIDATE(cond) do { if(!(cond)) { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } } while(false) + +/******************************************************************************* +END OF CONFIGURATION +*/ +#endif // _VMA_CONFIGURATION + + +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; +static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; +// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. +static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666; + +// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants. +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040; +static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080; +static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000; +static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200; +static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000; +static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u; +static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32; +static const uint32_t VMA_VENDOR_ID_AMD = 4098; + +// This one is tricky. Vulkan specification defines this code as available since +// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131. +// See pull request #207. +#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) + + +#if VMA_STATS_STRING_ENABLED +// Correspond to values of enum VmaSuballocationType. +static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = +{ + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", +}; +#endif + +static VkAllocationCallbacks VmaEmptyAllocationCallbacks = + { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; + + +#ifndef _VMA_ENUM_DECLARATIONS + +enum VmaSuballocationType +{ + VMA_SUBALLOCATION_TYPE_FREE = 0, + VMA_SUBALLOCATION_TYPE_UNKNOWN = 1, + VMA_SUBALLOCATION_TYPE_BUFFER = 2, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3, + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4, + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5, + VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF +}; + +enum VMA_CACHE_OPERATION +{ + VMA_CACHE_FLUSH, + VMA_CACHE_INVALIDATE +}; + +enum class VmaAllocationRequestType +{ + Normal, + TLSF, + // Used by "Linear" algorithm. + UpperAddress, + EndOf1st, + EndOf2nd, +}; + +#endif // _VMA_ENUM_DECLARATIONS + +#ifndef _VMA_FORWARD_DECLARATIONS +// Opaque handle used by allocation algorithms to identify single allocation in any conforming way. +VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle) + +struct VmaMutexLock; +struct VmaMutexLockRead; +struct VmaMutexLockWrite; + +template +struct AtomicTransactionalIncrement; + +template +struct VmaStlAllocator; + +template +class VmaVector; + +template +class VmaSmallVector; + +template +class VmaPoolAllocator; + +template +struct VmaListItem; + +template +class VmaRawList; + +template +class VmaList; + +template +class VmaIntrusiveLinkedList; + +// Unused in this version +#if 0 +template +struct VmaPair; +template +struct VmaPairFirstLess; + +template +class VmaMap; +#endif + +#if VMA_STATS_STRING_ENABLED +class VmaStringBuilder; +class VmaJsonWriter; +#endif + +class VmaDeviceMemoryBlock; + +struct VmaDedicatedAllocationListItemTraits; +class VmaDedicatedAllocationList; + +struct VmaSuballocation; +struct VmaSuballocationOffsetLess; +struct VmaSuballocationOffsetGreater; +struct VmaSuballocationItemSizeLess; + +typedef VmaList> VmaSuballocationList; + +struct VmaAllocationRequest; + +class VmaBlockMetadata; +class VmaBlockMetadata_Linear; +class VmaBlockMetadata_TLSF; + +class VmaBlockVector; + +struct VmaPoolListItemTraits; + +struct VmaCurrentBudgetData; + +class VmaAllocationObjectAllocator; + +#endif // _VMA_FORWARD_DECLARATIONS + + +#ifndef _VMA_FUNCTIONS + +/* +Returns number of bits set to 1 in (v). + +On specific platforms and compilers you can use instrinsics like: + +Visual Studio: + return __popcnt(v); +GCC, Clang: + return static_cast(__builtin_popcount(v)); + +Define macro VMA_COUNT_BITS_SET to provide your optimized implementation. +But you need to check in runtime whether user's CPU supports these, as some old processors don't. +*/ +static inline uint32_t VmaCountBitsSet(uint32_t v) +{ +#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 + return std::popcount(v); +#else + uint32_t c = v - ((v >> 1) & 0x55555555); + c = ((c >> 2) & 0x33333333) + (c & 0x33333333); + c = ((c >> 4) + c) & 0x0F0F0F0F; + c = ((c >> 8) + c) & 0x00FF00FF; + c = ((c >> 16) + c) & 0x0000FFFF; + return c; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanForward64(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffsll(mask)) - 1U; +#else + uint8_t pos = 0; + uint64_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 63); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanLSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanForward(&pos, mask)) + return static_cast(pos); + return UINT8_MAX; +#elif defined __GNUC__ || defined __clang__ + return static_cast(__builtin_ffs(mask)) - 1U; +#else + uint8_t pos = 0; + uint32_t bit = 1; + do + { + if (mask & bit) + return pos; + bit <<= 1; + } while (pos++ < 31); + return UINT8_MAX; +#endif +} + +static inline uint8_t VmaBitScanMSB(uint64_t mask) +{ +#if defined(_MSC_VER) && defined(_WIN64) + unsigned long pos; + if (_BitScanReverse64(&pos, mask)) + return static_cast(pos); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 63 - static_cast(__builtin_clzll(mask)); +#else + uint8_t pos = 63; + uint64_t bit = 1ULL << 63; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +static inline uint8_t VmaBitScanMSB(uint32_t mask) +{ +#ifdef _MSC_VER + unsigned long pos; + if (_BitScanReverse(&pos, mask)) + return static_cast(pos); +#elif defined __GNUC__ || defined __clang__ + if (mask) + return 31 - static_cast(__builtin_clz(mask)); +#else + uint8_t pos = 31; + uint32_t bit = 1UL << 31; + do + { + if (mask & bit) + return pos; + bit >>= 1; + } while (pos-- > 0); +#endif + return UINT8_MAX; +} + +/* +Returns true if given number is a power of two. +T must be unsigned integer number or signed integer but always nonnegative. +For 0 returns true. +*/ +template +inline bool VmaIsPow2(T x) +{ + return (x & (x - 1)) == 0; +} + +// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignUp(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return (val + alignment - 1) & ~(alignment - 1); +} + +// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8. +// Use types like uint32_t, uint64_t as T. +template +static inline T VmaAlignDown(T val, T alignment) +{ + VMA_HEAVY_ASSERT(VmaIsPow2(alignment)); + return val & ~(alignment - 1); +} + +// Division with mathematical rounding to nearest number. +template +static inline T VmaRoundDiv(T x, T y) +{ + return (x + (y / (T)2)) / y; +} + +// Divide by 'y' and round up to nearest integer. +template +static inline T VmaDivideRoundingUp(T x, T y) +{ + return (x + y - (T)1) / y; +} + +// Returns smallest power of 2 greater or equal to v. +static inline uint32_t VmaNextPow2(uint32_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v++; + return v; +} + +static inline uint64_t VmaNextPow2(uint64_t v) +{ + v--; + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v++; + return v; +} + +// Returns largest power of 2 less or equal to v. +static inline uint32_t VmaPrevPow2(uint32_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v = v ^ (v >> 1); + return v; +} + +static inline uint64_t VmaPrevPow2(uint64_t v) +{ + v |= v >> 1; + v |= v >> 2; + v |= v >> 4; + v |= v >> 8; + v |= v >> 16; + v |= v >> 32; + v = v ^ (v >> 1); + return v; +} + +static inline bool VmaStrIsEmpty(const char* pStr) +{ + return pStr == VMA_NULL || *pStr == '\0'; +} + +/* +Returns true if two memory blocks occupy overlapping pages. +ResourceA must be in less memory offset than ResourceB. + +Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)" +chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity". +*/ +static inline bool VmaBlocksOnSamePage( + VkDeviceSize resourceAOffset, + VkDeviceSize resourceASize, + VkDeviceSize resourceBOffset, + VkDeviceSize pageSize) +{ + VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0); + VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1; + VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1); + VkDeviceSize resourceBStart = resourceBOffset; + VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1); + return resourceAEndPage == resourceBStartPage; +} + +/* +Returns true if given suballocation types could conflict and must respect +VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer +or linear image and another one is optimal image. If type is unknown, behave +conservatively. +*/ +static inline bool VmaIsBufferImageGranularityConflict( + VmaSuballocationType suballocType1, + VmaSuballocationType suballocType2) +{ + if (suballocType1 > suballocType2) + { + VMA_SWAP(suballocType1, suballocType2); + } + + switch (suballocType1) + { + case VMA_SUBALLOCATION_TYPE_FREE: + return false; + case VMA_SUBALLOCATION_TYPE_UNKNOWN: + return true; + case VMA_SUBALLOCATION_TYPE_BUFFER: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: + return + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: + return false; + default: + VMA_ASSERT(0); + return true; + } +} + +static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + uint32_t* pDst = (uint32_t*)((char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pDst) + { + *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE; + } +#else + // no-op +#endif +} + +static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +{ +#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION + const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); + for (size_t i = 0; i < numberCount; ++i, ++pSrc) + { + if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE) + { + return false; + } + } +#endif + return true; +} + +/* +Fills structure with parameters of an example buffer to be used for transfers +during GPU memory defragmentation. +*/ +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +{ + memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); + outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; + outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. +} + + +/* +Performs binary search and returns iterator to first element that is greater or +equal to (key), according to comparison (cmp). + +Cmp should return true if first argument is less than second argument. + +Returned value is the found element, if present in the collection or place where +new element with value (key) should be inserted. +*/ +template +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) +{ + size_t down = 0, up = (end - beg); + while (down < up) + { + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + if (cmp(*(beg + mid), key)) + { + down = mid + 1; + } + else + { + up = mid; + } + } + return beg + down; +} + +template +IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +{ + IterT it = VmaBinaryFindFirstNotLess( + beg, end, value, cmp); + if (it == end || + (!cmp(*it, value) && !cmp(value, *it))) + { + return it; + } + return end; +} + +/* +Returns true if all pointers in the array are not-null and unique. +Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. +T must be pointer type, e.g. VmaAllocation, VmaPool. +*/ +template +static bool VmaValidatePointerArray(uint32_t count, const T* arr) +{ + for (uint32_t i = 0; i < count; ++i) + { + const T iPtr = arr[i]; + if (iPtr == VMA_NULL) + { + return false; + } + for (uint32_t j = i + 1; j < count; ++j) + { + if (iPtr == arr[j]) + { + return false; + } + } + } + return true; +} + +template +static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +{ + newStruct->pNext = mainStruct->pNext; + mainStruct->pNext = newStruct; +} + +// This is the main algorithm that guides the selection of a memory type best for an allocation - +// converts usage to required/preferred/not preferred flags. +static bool FindMemoryPreferences( + bool isIntegratedGPU, + const VmaAllocationCreateInfo& allocCreateInfo, + VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. + VkMemoryPropertyFlags& outRequiredFlags, + VkMemoryPropertyFlags& outPreferredFlags, + VkMemoryPropertyFlags& outNotPreferredFlags) +{ + outRequiredFlags = allocCreateInfo.requiredFlags; + outPreferredFlags = allocCreateInfo.preferredFlags; + outNotPreferredFlags = 0; + + switch(allocCreateInfo.usage) + { + case VMA_MEMORY_USAGE_UNKNOWN: + break; + case VMA_MEMORY_USAGE_GPU_ONLY: + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_CPU_ONLY: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + break; + case VMA_MEMORY_USAGE_CPU_TO_GPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + case VMA_MEMORY_USAGE_GPU_TO_CPU: + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + break; + case VMA_MEMORY_USAGE_CPU_COPY: + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + break; + case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED: + outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT; + break; + case VMA_MEMORY_USAGE_AUTO: + case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: + case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: + { + if(bufImgUsage == UINT32_MAX) + { + VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."); + return false; + } + // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*. + const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0; + const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0; + const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0; + const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0; + const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE; + const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; + + // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. + if(hostAccessRandom) + { + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. + // Omitting HOST_VISIBLE here is intentional. + // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one. + // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list. + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + else + { + // Always CPU memory, cached. + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + } + } + // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. + else if(hostAccessSequentialWrite) + { + // Want uncached and write-combined. + outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; + + if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + { + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + else + { + outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) + if(deviceAccess) + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU) + else + { + // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. + if(preferDevice) + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + } + } + // No CPU access + else + { + // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory + if(deviceAccess) + { + // ...unless there is a clear preference from the user not to do so. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + // No direct GPU access, no CPU access, just transfers. + // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or + // a "swap file" copy to free some GPU memory (then better CPU memory). + // Up to the user to decide. If no preferece, assume the former and choose GPU memory. + if(preferHost) + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + else + outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + } + break; + } + default: + VMA_ASSERT(0); + } + + // Avoid DEVICE_COHERENT unless explicitly requested. + if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + { + outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; + } + + return true; +} + +//////////////////////////////////////////////////////////////////////////////// +// Memory allocation + +static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +{ + void* result = VMA_NULL; + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnAllocation != VMA_NULL)) + { + result = (*pAllocationCallbacks->pfnAllocation)( + pAllocationCallbacks->pUserData, + size, + alignment, + VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); + } + else + { + result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment); + } + VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed."); + return result; +} + +static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +{ + if ((pAllocationCallbacks != VMA_NULL) && + (pAllocationCallbacks->pfnFree != VMA_NULL)) + { + (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr); + } + else + { + VMA_SYSTEM_ALIGNED_FREE(ptr); + } +} + +template +static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +{ + return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) + +#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) + +template +static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +{ + ptr->~T(); + VmaFree(pAllocationCallbacks, ptr); +} + +template +static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +{ + if (ptr != VMA_NULL) + { + for (size_t i = count; i--; ) + { + ptr[i].~T(); + } + VmaFree(pAllocationCallbacks, ptr); + } +} + +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +{ + if (srcStr != VMA_NULL) + { + const size_t len = strlen(srcStr); + char* const result = vma_new_array(allocs, char, len + 1); + memcpy(result, srcStr, len + 1); + return result; + } + return VMA_NULL; +} + +#if VMA_STATS_STRING_ENABLED +static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) +{ + if (srcStr != VMA_NULL) + { + char* const result = vma_new_array(allocs, char, strLen + 1); + memcpy(result, srcStr, strLen); + result[strLen] = '\0'; + return result; + } + return VMA_NULL; +} +#endif // VMA_STATS_STRING_ENABLED + +static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +{ + if (str != VMA_NULL) + { + const size_t len = strlen(str); + vma_delete_array(allocs, str, len + 1); + } +} + +template +size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - vector.data(); + VmaVectorInsert(vector, indexToInsert, value); + return indexToInsert; +} + +template +bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +{ + CmpLess comparator; + typename VectorT::iterator it = VmaBinaryFindFirstNotLess( + vector.begin(), + vector.end(), + value, + comparator); + if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it)) + { + size_t indexToRemove = it - vector.begin(); + VmaVectorRemove(vector, indexToRemove); + return true; + } + return false; +} +#endif // _VMA_FUNCTIONS + +#ifndef _VMA_STATISTICS_FUNCTIONS + +static void VmaClearStatistics(VmaStatistics& outStats) +{ + outStats.blockCount = 0; + outStats.allocationCount = 0; + outStats.blockBytes = 0; + outStats.allocationBytes = 0; +} + +static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) +{ + inoutStats.blockCount += src.blockCount; + inoutStats.allocationCount += src.allocationCount; + inoutStats.blockBytes += src.blockBytes; + inoutStats.allocationBytes += src.allocationBytes; +} + +static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) +{ + VmaClearStatistics(outStats.statistics); + outStats.unusedRangeCount = 0; + outStats.allocationSizeMin = VK_WHOLE_SIZE; + outStats.allocationSizeMax = 0; + outStats.unusedRangeSizeMin = VK_WHOLE_SIZE; + outStats.unusedRangeSizeMax = 0; +} + +static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.statistics.allocationCount++; + inoutStats.statistics.allocationBytes += size; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); +} + +static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +{ + inoutStats.unusedRangeCount++; + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); +} + +static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) +{ + VmaAddStatistics(inoutStats.statistics, src.statistics); + inoutStats.unusedRangeCount += src.unusedRangeCount; + inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin); + inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax); + inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin); + inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax); +} + +#endif // _VMA_STATISTICS_FUNCTIONS + +#ifndef _VMA_MUTEX_LOCK +// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope). +struct VmaMutexLock +{ + VMA_CLASS_NO_COPY(VmaMutexLock) +public: + VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->Lock(); } + } + ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } + +private: + VMA_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. +struct VmaMutexLockRead +{ + VMA_CLASS_NO_COPY(VmaMutexLockRead) +public: + VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : + m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockRead(); } + } + ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. +struct VmaMutexLockWrite +{ + VMA_CLASS_NO_COPY(VmaMutexLockWrite) +public: + VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) + : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) { m_pMutex->LockWrite(); } + } + ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } + +private: + VMA_RW_MUTEX* m_pMutex; +}; + +#if VMA_DEBUG_GLOBAL_MUTEX + static VMA_MUTEX gDebugGlobalMutex; + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +#else + #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#endif +#endif // _VMA_MUTEX_LOCK + +#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT +// An object that increments given atomic but decrements it back in the destructor unless Commit() is called. +template +struct AtomicTransactionalIncrement +{ +public: + typedef std::atomic AtomicT; + + ~AtomicTransactionalIncrement() + { + if(m_Atomic) + --(*m_Atomic); + } + + void Commit() { m_Atomic = nullptr; } + T Increment(AtomicT* atomic) + { + m_Atomic = atomic; + return m_Atomic->fetch_add(1); + } + +private: + AtomicT* m_Atomic = nullptr; +}; +#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT + +#ifndef _VMA_STL_ALLOCATOR +// STL-compatible allocator. +template +struct VmaStlAllocator +{ + const VkAllocationCallbacks* const m_pCallbacks; + typedef T value_type; + + VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} + template + VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} + VmaStlAllocator(const VmaStlAllocator&) = default; + VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; + + T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + + template + bool operator==(const VmaStlAllocator& rhs) const + { + return m_pCallbacks == rhs.m_pCallbacks; + } + template + bool operator!=(const VmaStlAllocator& rhs) const + { + return m_pCallbacks != rhs.m_pCallbacks; + } +}; +#endif // _VMA_STL_ALLOCATOR + +#ifndef _VMA_VECTOR +/* Class with interface compatible with subset of std::vector. +T must be POD because constructors and destructors are not called and memcpy is +used for these objects. */ +template +class VmaVector +{ +public: + typedef T value_type; + typedef T* iterator; + typedef const T* const_iterator; + + VmaVector(const AllocatorT& allocator); + VmaVector(size_t count, const AllocatorT& allocator); + // This version of the constructor is here for compatibility with pre-C++14 std::vector. + // value is unused. + VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} + VmaVector(const VmaVector& src); + VmaVector& operator=(const VmaVector& rhs); + ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_pArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + const T* data() const { return m_pArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + + iterator begin() { return m_pArray; } + iterator end() { return m_pArray + m_Count; } + const_iterator cbegin() const { return m_pArray; } + const_iterator cend() const { return m_pArray + m_Count; } + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void reserve(size_t newCapacity, bool freeMemory = false); + void resize(size_t newCount); + void clear() { resize(0); } + void shrink_to_fit(); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + +private: + AllocatorT m_Allocator; + T* m_pArray; + size_t m_Count; + size_t m_Capacity; +}; + +#ifndef _VMA_VECTOR_FUNCTIONS +template +VmaVector::VmaVector(const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) {} + +template +VmaVector::VmaVector(size_t count, const AllocatorT& allocator) + : m_Allocator(allocator), + m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) {} + +template +VmaVector::VmaVector(const VmaVector& src) + : m_Allocator(src.m_Allocator), + m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) +{ + if (m_Count != 0) + { + memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T)); + } +} + +template +VmaVector& VmaVector::operator=(const VmaVector& rhs) +{ + if (&rhs != this) + { + resize(rhs.m_Count); + if (m_Count != 0) + { + memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T)); + } + } + return *this; +} + +template +void VmaVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + m_pArray[newIndex] = src; +} + +template +void VmaVector::reserve(size_t newCapacity, bool freeMemory) +{ + newCapacity = VMA_MAX(newCapacity, m_Count); + + if ((newCapacity < m_Capacity) && !freeMemory) + { + newCapacity = m_Capacity; + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + if (m_Count != 0) + { + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } +} + +template +void VmaVector::resize(size_t newCount) +{ + size_t newCapacity = m_Capacity; + if (newCount > m_Capacity) + { + newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8)); + } + + if (newCapacity != m_Capacity) + { + T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + const size_t elementsToCopy = VMA_MIN(m_Count, newCount); + if (elementsToCopy != 0) + { + memcpy(newArray, m_pArray, elementsToCopy * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = newCapacity; + m_pArray = newArray; + } + + m_Count = newCount; +} + +template +void VmaVector::shrink_to_fit() +{ + if (m_Capacity > m_Count) + { + T* newArray = VMA_NULL; + if (m_Count > 0) + { + newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); + memcpy(newArray, m_pArray, m_Count * sizeof(T)); + } + VmaFree(m_Allocator.m_pCallbacks, m_pArray); + m_Capacity = m_Count; + m_pArray = newArray; + } +} + +template +void VmaVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + if (index < oldCount) + { + memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T)); + } + m_pArray[index] = src; +} + +template +void VmaVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_VECTOR_FUNCTIONS + +template +static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +{ + vec.insert(index, item); +} + +template +static void VmaVectorRemove(VmaVector& vec, size_t index) +{ + vec.remove(index); +} +#endif // _VMA_VECTOR + +#ifndef _VMA_SMALL_VECTOR +/* +This is a vector (a variable-sized array), optimized for the case when the array is small. + +It contains some number of elements in-place, which allows it to avoid heap allocation +when the actual number of elements is below that threshold. This allows normal "small" +cases to be fast without losing generality for large inputs. +*/ +template +class VmaSmallVector +{ +public: + typedef T value_type; + typedef T* iterator; + + VmaSmallVector(const AllocatorT& allocator); + VmaSmallVector(size_t count, const AllocatorT& allocator); + template + VmaSmallVector(const VmaSmallVector&) = delete; + template + VmaSmallVector& operator=(const VmaSmallVector&) = delete; + ~VmaSmallVector() = default; + + bool empty() const { return m_Count == 0; } + size_t size() const { return m_Count; } + T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } + const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + + iterator begin() { return data(); } + iterator end() { return data() + m_Count; } + + void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } + void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } + void push_front(const T& src) { insert(0, src); } + + void push_back(const T& src); + void resize(size_t newCount, bool freeMemory = false); + void clear(bool freeMemory = false); + void insert(size_t index, const T& src); + void remove(size_t index); + + T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + +private: + size_t m_Count; + T m_StaticArray[N]; // Used when m_Size <= N + VmaVector m_DynamicArray; // Used when m_Size > N +}; + +#ifndef _VMA_SMALL_VECTOR_FUNCTIONS +template +VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) + : m_Count(0), + m_DynamicArray(allocator) {} + +template +VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) + : m_Count(count), + m_DynamicArray(count > N ? count : 0, allocator) {} + +template +void VmaSmallVector::push_back(const T& src) +{ + const size_t newIndex = size(); + resize(newIndex + 1); + data()[newIndex] = src; +} + +template +void VmaSmallVector::resize(size_t newCount, bool freeMemory) +{ + if (newCount > N && m_Count > N) + { + // Any direction, staying in m_DynamicArray + m_DynamicArray.resize(newCount); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else if (newCount > N && m_Count <= N) + { + // Growing, moving from m_StaticArray to m_DynamicArray + m_DynamicArray.resize(newCount); + if (m_Count > 0) + { + memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T)); + } + } + else if (newCount <= N && m_Count > N) + { + // Shrinking, moving from m_DynamicArray to m_StaticArray + if (newCount > 0) + { + memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T)); + } + m_DynamicArray.resize(0); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + } + else + { + // Any direction, staying in m_StaticArray - nothing to do here + } + m_Count = newCount; +} + +template +void VmaSmallVector::clear(bool freeMemory) +{ + m_DynamicArray.clear(); + if (freeMemory) + { + m_DynamicArray.shrink_to_fit(); + } + m_Count = 0; +} + +template +void VmaSmallVector::insert(size_t index, const T& src) +{ + VMA_HEAVY_ASSERT(index <= m_Count); + const size_t oldCount = size(); + resize(oldCount + 1); + T* const dataPtr = data(); + if (index < oldCount) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. + memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T)); + } + dataPtr[index] = src; +} + +template +void VmaSmallVector::remove(size_t index) +{ + VMA_HEAVY_ASSERT(index < m_Count); + const size_t oldCount = size(); + if (index < oldCount - 1) + { + // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. + T* const dataPtr = data(); + memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); + } + resize(oldCount - 1); +} +#endif // _VMA_SMALL_VECTOR_FUNCTIONS +#endif // _VMA_SMALL_VECTOR + +#ifndef _VMA_POOL_ALLOCATOR +/* +Allocator for objects of type T using a list of arrays (pools) to speed up +allocation. Number of elements that can be allocated is not bounded because +allocator can create multiple blocks. +*/ +template +class VmaPoolAllocator +{ + VMA_CLASS_NO_COPY(VmaPoolAllocator) +public: + VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + ~VmaPoolAllocator(); + template T* Alloc(Types&&... args); + void Free(T* ptr); + +private: + union Item + { + uint32_t NextFreeIndex; + alignas(T) char Value[sizeof(T)]; + }; + struct ItemBlock + { + Item* pItems; + uint32_t Capacity; + uint32_t FirstFreeIndex; + }; + + const VkAllocationCallbacks* m_pAllocationCallbacks; + const uint32_t m_FirstBlockCapacity; + VmaVector> m_ItemBlocks; + + ItemBlock& CreateNewBlock(); +}; + +#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) +{ + VMA_ASSERT(m_FirstBlockCapacity > 1); +} + +template +VmaPoolAllocator::~VmaPoolAllocator() +{ + for (size_t i = m_ItemBlocks.size(); i--;) + vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity); + m_ItemBlocks.clear(); +} + +template +template T* VmaPoolAllocator::Alloc(Types&&... args) +{ + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + // This block has some free items: Use first one. + if (block.FirstFreeIndex != UINT32_MAX) + { + Item* const pItem = &block.pItems[block.FirstFreeIndex]; + block.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result)T(std::forward(args)...); // Explicit constructor call. + return result; + } + } + + // No block has free item: Create new one and use it. + ItemBlock& newBlock = CreateNewBlock(); + Item* const pItem = &newBlock.pItems[0]; + newBlock.FirstFreeIndex = pItem->NextFreeIndex; + T* result = (T*)&pItem->Value; + new(result) T(std::forward(args)...); // Explicit constructor call. + return result; +} + +template +void VmaPoolAllocator::Free(T* ptr) +{ + // Search all memory blocks to find ptr. + for (size_t i = m_ItemBlocks.size(); i--; ) + { + ItemBlock& block = m_ItemBlocks[i]; + + // Casting to union. + Item* pItemPtr; + memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); + + // Check if pItemPtr is in address range of this block. + if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity)) + { + ptr->~T(); // Explicit destructor call. + const uint32_t index = static_cast(pItemPtr - block.pItems); + pItemPtr->NextFreeIndex = block.FirstFreeIndex; + block.FirstFreeIndex = index; + return; + } + } + VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); +} + +template +typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +{ + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? + m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + + const ItemBlock newBlock = + { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0 + }; + + m_ItemBlocks.push_back(newBlock); + + // Setup singly-linked list of all free items in this block. + for (uint32_t i = 0; i < newBlockCapacity - 1; ++i) + newBlock.pItems[i].NextFreeIndex = i + 1; + newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX; + return m_ItemBlocks.back(); +} +#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS +#endif // _VMA_POOL_ALLOCATOR + +#ifndef _VMA_RAW_LIST +template +struct VmaListItem +{ + VmaListItem* pPrev; + VmaListItem* pNext; + T Value; +}; + +// Doubly linked list. +template +class VmaRawList +{ + VMA_CLASS_NO_COPY(VmaRawList) +public: + typedef VmaListItem ItemType; + + VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + // Intentionally not calling Clear, because that would be unnecessary + // computations to return all items to m_ItemAllocator as free. + ~VmaRawList() = default; + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + + ItemType* Front() { return m_pFront; } + ItemType* Back() { return m_pBack; } + const ItemType* Front() const { return m_pFront; } + const ItemType* Back() const { return m_pBack; } + + ItemType* PushFront(); + ItemType* PushBack(); + ItemType* PushFront(const T& value); + ItemType* PushBack(const T& value); + void PopFront(); + void PopBack(); + + // Item can be null - it means PushBack. + ItemType* InsertBefore(ItemType* pItem); + // Item can be null - it means PushFront. + ItemType* InsertAfter(ItemType* pItem); + ItemType* InsertBefore(ItemType* pItem, const T& value); + ItemType* InsertAfter(ItemType* pItem, const T& value); + + void Clear(); + void Remove(ItemType* pItem); + +private: + const VkAllocationCallbacks* const m_pAllocationCallbacks; + VmaPoolAllocator m_ItemAllocator; + ItemType* m_pFront; + ItemType* m_pBack; + size_t m_Count; +}; + +#ifndef _VMA_RAW_LIST_FUNCTIONS +template +VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) + : m_pAllocationCallbacks(pAllocationCallbacks), + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) {} + +template +VmaListItem* VmaRawList::PushFront() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pPrev = VMA_NULL; + if (IsEmpty()) + { + pNewItem->pNext = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pNext = m_pFront; + m_pFront->pPrev = pNewItem; + m_pFront = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack() +{ + ItemType* const pNewItem = m_ItemAllocator.Alloc(); + pNewItem->pNext = VMA_NULL; + if(IsEmpty()) + { + pNewItem->pPrev = VMA_NULL; + m_pFront = pNewItem; + m_pBack = pNewItem; + m_Count = 1; + } + else + { + pNewItem->pPrev = m_pBack; + m_pBack->pNext = pNewItem; + m_pBack = pNewItem; + ++m_Count; + } + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushFront(const T& value) +{ + ItemType* const pNewItem = PushFront(); + pNewItem->Value = value; + return pNewItem; +} + +template +VmaListItem* VmaRawList::PushBack(const T& value) +{ + ItemType* const pNewItem = PushBack(); + pNewItem->Value = value; + return pNewItem; +} + +template +void VmaRawList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pFrontItem = m_pFront; + ItemType* const pNextItem = pFrontItem->pNext; + if (pNextItem != VMA_NULL) + { + pNextItem->pPrev = VMA_NULL; + } + m_pFront = pNextItem; + m_ItemAllocator.Free(pFrontItem); + --m_Count; +} + +template +void VmaRawList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const pBackItem = m_pBack; + ItemType* const pPrevItem = pBackItem->pPrev; + if(pPrevItem != VMA_NULL) + { + pPrevItem->pNext = VMA_NULL; + } + m_pBack = pPrevItem; + m_ItemAllocator.Free(pBackItem); + --m_Count; +} + +template +void VmaRawList::Clear() +{ + if (IsEmpty() == false) + { + ItemType* pItem = m_pBack; + while (pItem != VMA_NULL) + { + ItemType* const pPrevItem = pItem->pPrev; + m_ItemAllocator.Free(pItem); + pItem = pPrevItem; + } + m_pFront = VMA_NULL; + m_pBack = VMA_NULL; + m_Count = 0; + } +} + +template +void VmaRawList::Remove(ItemType* pItem) +{ + VMA_HEAVY_ASSERT(pItem != VMA_NULL); + VMA_HEAVY_ASSERT(m_Count > 0); + + if(pItem->pPrev != VMA_NULL) + { + pItem->pPrev->pNext = pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = pItem->pNext; + } + + if(pItem->pNext != VMA_NULL) + { + pItem->pNext->pPrev = pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = pItem->pPrev; + } + + m_ItemAllocator.Free(pItem); + --m_Count; +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const prevItem = pItem->pPrev; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pPrev = prevItem; + newItem->pNext = pItem; + pItem->pPrev = newItem; + if(prevItem != VMA_NULL) + { + prevItem->pNext = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pFront == pItem); + m_pFront = newItem; + } + ++m_Count; + return newItem; + } + else + return PushBack(); +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +{ + if(pItem != VMA_NULL) + { + ItemType* const nextItem = pItem->pNext; + ItemType* const newItem = m_ItemAllocator.Alloc(); + newItem->pNext = nextItem; + newItem->pPrev = pItem; + pItem->pNext = newItem; + if(nextItem != VMA_NULL) + { + nextItem->pPrev = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_pBack == pItem); + m_pBack = newItem; + } + ++m_Count; + return newItem; + } + else + return PushFront(); +} + +template +VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertBefore(pItem); + newItem->Value = value; + return newItem; +} + +template +VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +{ + ItemType* const newItem = InsertAfter(pItem); + newItem->Value = value; + return newItem; +} +#endif // _VMA_RAW_LIST_FUNCTIONS +#endif // _VMA_RAW_LIST + +#ifndef _VMA_LIST +template +class VmaList +{ + VMA_CLASS_NO_COPY(VmaList) +public: + class reverse_iterator; + class const_iterator; + class const_reverse_iterator; + + class iterator + { + friend class const_iterator; + friend class VmaList; + public: + iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + iterator operator++(int) { iterator result = *this; ++*this; return result; } + iterator operator--(int) { iterator result = *this; --*this; return result; } + + iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class reverse_iterator + { + friend class const_reverse_iterator; + friend class VmaList; + public: + reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } + reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } + + reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + reverse_iterator& operator--(); + + private: + VmaRawList* m_pList; + VmaListItem* m_pItem; + + reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_iterator + { + friend class VmaList; + public: + const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } + const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } + + const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } + const_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + class const_reverse_iterator + { + friend class VmaList; + public: + const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + + reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + + const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } + const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + + bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } + bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + + const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } + const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } + + const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } + const_reverse_iterator& operator--(); + + private: + const VmaRawList* m_pList; + const VmaListItem* m_pItem; + + const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + }; + + VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} + + bool empty() const { return m_RawList.IsEmpty(); } + size_t size() const { return m_RawList.GetCount(); } + + iterator begin() { return iterator(&m_RawList, m_RawList.Front()); } + iterator end() { return iterator(&m_RawList, VMA_NULL); } + + const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); } + const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); } + + const_iterator begin() const { return cbegin(); } + const_iterator end() const { return cend(); } + + reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); } + reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); } + const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); } + + const_reverse_iterator rbegin() const { return crbegin(); } + const_reverse_iterator rend() const { return crend(); } + + void push_back(const T& value) { m_RawList.PushBack(value); } + iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + + void clear() { m_RawList.Clear(); } + void erase(iterator it) { m_RawList.Remove(it.m_pItem); } + +private: + VmaRawList m_RawList; +}; + +#ifndef _VMA_LIST_FUNCTIONS +template +typename VmaList::iterator& VmaList::iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Front(); + } + return *this; +} + +template +typename VmaList::const_iterator& VmaList::const_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pPrev; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} + +template +typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() +{ + if (m_pItem != VMA_NULL) + { + m_pItem = m_pItem->pNext; + } + else + { + VMA_HEAVY_ASSERT(!m_pList->IsEmpty()); + m_pItem = m_pList->Back(); + } + return *this; +} +#endif // _VMA_LIST_FUNCTIONS +#endif // _VMA_LIST + +#ifndef _VMA_INTRUSIVE_LINKED_LIST +/* +Expected interface of ItemTypeTraits: +struct MyItemTypeTraits +{ + typedef MyItem ItemType; + static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; } + static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; } + static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; } + static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } +}; +*/ +template +class VmaIntrusiveLinkedList +{ +public: + typedef typename ItemTypeTraits::ItemType ItemType; + static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } + static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } + + // Movable, not copyable. + VmaIntrusiveLinkedList() = default; + VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); + VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; + VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); + VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; + ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } + + size_t GetCount() const { return m_Count; } + bool IsEmpty() const { return m_Count == 0; } + ItemType* Front() { return m_Front; } + ItemType* Back() { return m_Back; } + const ItemType* Front() const { return m_Front; } + const ItemType* Back() const { return m_Back; } + + void PushBack(ItemType* item); + void PushFront(ItemType* item); + ItemType* PopBack(); + ItemType* PopFront(); + + // MyItem can be null - it means PushBack. + void InsertBefore(ItemType* existingItem, ItemType* newItem); + // MyItem can be null - it means PushFront. + void InsertAfter(ItemType* existingItem, ItemType* newItem); + void Remove(ItemType* item); + void RemoveAll(); + +private: + ItemType* m_Front = VMA_NULL; + ItemType* m_Back = VMA_NULL; + size_t m_Count = 0; +}; + +#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +template +VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) + : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) +{ + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; +} + +template +VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) +{ + if (&src != this) + { + VMA_HEAVY_ASSERT(IsEmpty()); + m_Front = src.m_Front; + m_Back = src.m_Back; + m_Count = src.m_Count; + src.m_Front = src.m_Back = VMA_NULL; + src.m_Count = 0; + } + return *this; +} + +template +void VmaIntrusiveLinkedList::PushBack(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessPrev(item) = m_Back; + ItemTypeTraits::AccessNext(m_Back) = item; + m_Back = item; + ++m_Count; + } +} + +template +void VmaIntrusiveLinkedList::PushFront(ItemType* item) +{ + VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); + if (IsEmpty()) + { + m_Front = item; + m_Back = item; + m_Count = 1; + } + else + { + ItemTypeTraits::AccessNext(item) = m_Front; + ItemTypeTraits::AccessPrev(m_Front) = item; + m_Front = item; + ++m_Count; + } +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const backItem = m_Back; + ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; + } + m_Back = prevItem; + --m_Count; + ItemTypeTraits::AccessPrev(backItem) = VMA_NULL; + ItemTypeTraits::AccessNext(backItem) = VMA_NULL; + return backItem; +} + +template +typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() +{ + VMA_HEAVY_ASSERT(m_Count > 0); + ItemType* const frontItem = m_Front; + ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; + } + m_Front = nextItem; + --m_Count; + ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL; + ItemTypeTraits::AccessNext(frontItem) = VMA_NULL; + return frontItem; +} + +template +void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); + ItemTypeTraits::AccessPrev(newItem) = prevItem; + ItemTypeTraits::AccessNext(newItem) = existingItem; + ItemTypeTraits::AccessPrev(existingItem) = newItem; + if (prevItem != VMA_NULL) + { + ItemTypeTraits::AccessNext(prevItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Front == existingItem); + m_Front = newItem; + } + ++m_Count; + } + else + PushBack(newItem); +} + +template +void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) +{ + VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); + if (existingItem != VMA_NULL) + { + ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); + ItemTypeTraits::AccessNext(newItem) = nextItem; + ItemTypeTraits::AccessPrev(newItem) = existingItem; + ItemTypeTraits::AccessNext(existingItem) = newItem; + if (nextItem != VMA_NULL) + { + ItemTypeTraits::AccessPrev(nextItem) = newItem; + } + else + { + VMA_HEAVY_ASSERT(m_Back == existingItem); + m_Back = newItem; + } + ++m_Count; + } + else + return PushFront(newItem); +} + +template +void VmaIntrusiveLinkedList::Remove(ItemType* item) +{ + VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); + if (ItemTypeTraits::GetPrev(item) != VMA_NULL) + { + ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item); + } + else + { + VMA_HEAVY_ASSERT(m_Front == item); + m_Front = ItemTypeTraits::GetNext(item); + } + + if (ItemTypeTraits::GetNext(item) != VMA_NULL) + { + ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item); + } + else + { + VMA_HEAVY_ASSERT(m_Back == item); + m_Back = ItemTypeTraits::GetPrev(item); + } + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + --m_Count; +} + +template +void VmaIntrusiveLinkedList::RemoveAll() +{ + if (!IsEmpty()) + { + ItemType* item = m_Back; + while (item != VMA_NULL) + { + ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); + ItemTypeTraits::AccessPrev(item) = VMA_NULL; + ItemTypeTraits::AccessNext(item) = VMA_NULL; + item = prevItem; + } + m_Front = VMA_NULL; + m_Back = VMA_NULL; + m_Count = 0; + } +} +#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS +#endif // _VMA_INTRUSIVE_LINKED_LIST + +// Unused in this version. +#if 0 + +#ifndef _VMA_PAIR +template +struct VmaPair +{ + T1 first; + T2 second; + + VmaPair() : first(), second() {} + VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {} +}; + +template +struct VmaPairFirstLess +{ + bool operator()(const VmaPair& lhs, const VmaPair& rhs) const + { + return lhs.first < rhs.first; + } + bool operator()(const VmaPair& lhs, const FirstT& rhsFirst) const + { + return lhs.first < rhsFirst; + } +}; +#endif // _VMA_PAIR + +#ifndef _VMA_MAP +/* Class compatible with subset of interface of std::unordered_map. +KeyT, ValueT must be POD because they will be stored in VmaVector. +*/ +template +class VmaMap +{ +public: + typedef VmaPair PairType; + typedef PairType* iterator; + + VmaMap(const VmaStlAllocator& allocator) : m_Vector(allocator) {} + + iterator begin() { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + size_t size() { return m_Vector.size(); } + + void insert(const PairType& pair); + iterator find(const KeyT& key); + void erase(iterator it); + +private: + VmaVector< PairType, VmaStlAllocator> m_Vector; +}; + +#ifndef _VMA_MAP_FUNCTIONS +template +void VmaMap::insert(const PairType& pair) +{ + const size_t indexToInsert = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + pair, + VmaPairFirstLess()) - m_Vector.data(); + VmaVectorInsert(m_Vector, indexToInsert, pair); +} + +template +VmaPair* VmaMap::find(const KeyT& key) +{ + PairType* it = VmaBinaryFindFirstNotLess( + m_Vector.data(), + m_Vector.data() + m_Vector.size(), + key, + VmaPairFirstLess()); + if ((it != m_Vector.end()) && (it->first == key)) + { + return it; + } + else + { + return m_Vector.end(); + } +} + +template +void VmaMap::erase(iterator it) +{ + VmaVectorRemove(m_Vector, it - m_Vector.begin()); +} +#endif // _VMA_MAP_FUNCTIONS +#endif // _VMA_MAP + +#endif // #if 0 + +#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED +class VmaStringBuilder +{ +public: + VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} + ~VmaStringBuilder() = default; + + size_t GetLength() const { return m_Data.size(); } + const char* GetData() const { return m_Data.data(); } + void AddNewLine() { Add('\n'); } + void Add(char ch) { m_Data.push_back(ch); } + + void Add(const char* pStr); + void AddNumber(uint32_t num); + void AddNumber(uint64_t num); + void AddPointer(const void* ptr); + +private: + VmaVector> m_Data; +}; + +#ifndef _VMA_STRING_BUILDER_FUNCTIONS +void VmaStringBuilder::Add(const char* pStr) +{ + const size_t strLen = strlen(pStr); + if (strLen > 0) + { + const size_t oldCount = m_Data.size(); + m_Data.resize(oldCount + strLen); + memcpy(m_Data.data() + oldCount, pStr, strLen); + } +} + +void VmaStringBuilder::AddNumber(uint32_t num) +{ + char buf[11]; + buf[10] = '\0'; + char* p = &buf[10]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddNumber(uint64_t num) +{ + char buf[21]; + buf[20] = '\0'; + char* p = &buf[20]; + do + { + *--p = '0' + (num % 10); + num /= 10; + } while (num); + Add(p); +} + +void VmaStringBuilder::AddPointer(const void* ptr) +{ + char buf[21]; + VmaPtrToStr(buf, sizeof(buf), ptr); + Add(buf); +} +#endif //_VMA_STRING_BUILDER_FUNCTIONS +#endif // _VMA_STRING_BUILDER + +#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED +/* +Allows to conveniently build a correct JSON document to be written to the +VmaStringBuilder passed to the constructor. +*/ +class VmaJsonWriter +{ + VMA_CLASS_NO_COPY(VmaJsonWriter) +public: + // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. + VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + ~VmaJsonWriter(); + + // Begins object by writing "{". + // Inside an object, you must call pairs of WriteString and a value, e.g.: + // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject(); + // Will write: { "A": 1, "B": 2 } + void BeginObject(bool singleLine = false); + // Ends object by writing "}". + void EndObject(); + + // Begins array by writing "[". + // Inside an array, you can write a sequence of any values. + void BeginArray(bool singleLine = false); + // Ends array by writing "[". + void EndArray(); + + // Writes a string value inside "". + // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. + void WriteString(const char* pStr); + + // Begins writing a string value. + // Call BeginString, ContinueString, ContinueString, ..., EndString instead of + // WriteString to conveniently build the string content incrementally, made of + // parts including numbers. + void BeginString(const char* pStr = VMA_NULL); + // Posts next part of an open string. + void ContinueString(const char* pStr); + // Posts next part of an open string. The number is converted to decimal characters. + void ContinueString(uint32_t n); + void ContinueString(uint64_t n); + // Posts next part of an open string. Pointer value is converted to characters + // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 + void ContinueString_Pointer(const void* ptr); + // Ends writing a string value by writing '"'. + void EndString(const char* pStr = VMA_NULL); + + // Writes a number value. + void WriteNumber(uint32_t n); + void WriteNumber(uint64_t n); + // Writes a boolean value - false or true. + void WriteBool(bool b); + // Writes a null value. + void WriteNull(); + +private: + enum COLLECTION_TYPE + { + COLLECTION_TYPE_OBJECT, + COLLECTION_TYPE_ARRAY, + }; + struct StackItem + { + COLLECTION_TYPE type; + uint32_t valueCount; + bool singleLineMode; + }; + + static const char* const INDENT; + + VmaStringBuilder& m_SB; + VmaVector< StackItem, VmaStlAllocator > m_Stack; + bool m_InsideString; + + void BeginValue(bool isString); + void WriteIndent(bool oneLess = false); +}; +const char* const VmaJsonWriter::INDENT = " "; + +#ifndef _VMA_JSON_WRITER_FUNCTIONS +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) + : m_SB(sb), + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) {} + +VmaJsonWriter::~VmaJsonWriter() +{ + VMA_ASSERT(!m_InsideString); + VMA_ASSERT(m_Stack.empty()); +} + +void VmaJsonWriter::BeginObject(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('{'); + + StackItem item; + item.type = COLLECTION_TYPE_OBJECT; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndObject() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add('}'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT); + m_Stack.pop_back(); +} + +void VmaJsonWriter::BeginArray(bool singleLine) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(false); + m_SB.Add('['); + + StackItem item; + item.type = COLLECTION_TYPE_ARRAY; + item.valueCount = 0; + item.singleLineMode = singleLine; + m_Stack.push_back(item); +} + +void VmaJsonWriter::EndArray() +{ + VMA_ASSERT(!m_InsideString); + + WriteIndent(true); + m_SB.Add(']'); + + VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY); + m_Stack.pop_back(); +} + +void VmaJsonWriter::WriteString(const char* pStr) +{ + BeginString(pStr); + EndString(); +} + +void VmaJsonWriter::BeginString(const char* pStr) +{ + VMA_ASSERT(!m_InsideString); + + BeginValue(true); + m_SB.Add('"'); + m_InsideString = true; + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } +} + +void VmaJsonWriter::ContinueString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + + const size_t strLen = strlen(pStr); + for (size_t i = 0; i < strLen; ++i) + { + char ch = pStr[i]; + if (ch == '\\') + { + m_SB.Add("\\\\"); + } + else if (ch == '"') + { + m_SB.Add("\\\""); + } + else if (ch >= 32) + { + m_SB.Add(ch); + } + else switch (ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + break; + } + } +} + +void VmaJsonWriter::ContinueString(uint32_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString(uint64_t n) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +{ + VMA_ASSERT(m_InsideString); + m_SB.AddPointer(ptr); +} + +void VmaJsonWriter::EndString(const char* pStr) +{ + VMA_ASSERT(m_InsideString); + if (pStr != VMA_NULL && pStr[0] != '\0') + { + ContinueString(pStr); + } + m_SB.Add('"'); + m_InsideString = false; +} + +void VmaJsonWriter::WriteNumber(uint32_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteNumber(uint64_t n) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.AddNumber(n); +} + +void VmaJsonWriter::WriteBool(bool b) +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add(b ? "true" : "false"); +} + +void VmaJsonWriter::WriteNull() +{ + VMA_ASSERT(!m_InsideString); + BeginValue(false); + m_SB.Add("null"); +} + +void VmaJsonWriter::BeginValue(bool isString) +{ + if (!m_Stack.empty()) + { + StackItem& currItem = m_Stack.back(); + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 == 0) + { + VMA_ASSERT(isString); + } + + if (currItem.type == COLLECTION_TYPE_OBJECT && + currItem.valueCount % 2 != 0) + { + m_SB.Add(": "); + } + else if (currItem.valueCount > 0) + { + m_SB.Add(", "); + WriteIndent(); + } + else + { + WriteIndent(); + } + ++currItem.valueCount; + } +} + +void VmaJsonWriter::WriteIndent(bool oneLess) +{ + if (!m_Stack.empty() && !m_Stack.back().singleLineMode) + { + m_SB.AddNewLine(); + + size_t count = m_Stack.size(); + if (count > 0 && oneLess) + { + --count; + } + for (size_t i = 0; i < count; ++i) + { + m_SB.Add(INDENT); + } + } +} +#endif // _VMA_JSON_WRITER_FUNCTIONS + +static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) +{ + json.BeginObject(); + + json.WriteString("BlockCount"); + json.WriteNumber(stat.statistics.blockCount); + json.WriteString("BlockBytes"); + json.WriteNumber(stat.statistics.blockBytes); + json.WriteString("AllocationCount"); + json.WriteNumber(stat.statistics.allocationCount); + json.WriteString("AllocationBytes"); + json.WriteNumber(stat.statistics.allocationBytes); + json.WriteString("UnusedRangeCount"); + json.WriteNumber(stat.unusedRangeCount); + + if (stat.statistics.allocationCount > 1) + { + json.WriteString("AllocationSizeMin"); + json.WriteNumber(stat.allocationSizeMin); + json.WriteString("AllocationSizeMax"); + json.WriteNumber(stat.allocationSizeMax); + } + if (stat.unusedRangeCount > 1) + { + json.WriteString("UnusedRangeSizeMin"); + json.WriteNumber(stat.unusedRangeSizeMin); + json.WriteString("UnusedRangeSizeMax"); + json.WriteNumber(stat.unusedRangeSizeMax); + } + json.EndObject(); +} +#endif // _VMA_JSON_WRITER + +#ifndef _VMA_MAPPING_HYSTERESIS + +class VmaMappingHysteresis +{ + VMA_CLASS_NO_COPY(VmaMappingHysteresis) +public: + VmaMappingHysteresis() = default; + + uint32_t GetExtraMapping() const { return m_ExtraMapping; } + + // Call when Map was called. + // Returns true if switched to extra +1 mapping reference count. + bool PostMap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + { + m_ExtraMapping = 1; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + + // Call when Unmap was called. + void PostUnmap() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 0) + ++m_MajorCounter; + else // m_ExtraMapping == 1 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was made from the memory block. + void PostAlloc() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + ++m_MajorCounter; + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + } + + // Call when allocation was freed from the memory block. + // Returns true if switched to extra -1 mapping reference count. + bool PostFree() + { +#if VMA_MAPPING_HYSTERESIS_ENABLED + if(m_ExtraMapping == 1) + { + ++m_MajorCounter; + if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + m_MajorCounter > m_MinorCounter + 1) + { + m_ExtraMapping = 0; + m_MajorCounter = 0; + m_MinorCounter = 0; + return true; + } + } + else // m_ExtraMapping == 0 + PostMinorCounter(); +#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED + return false; + } + +private: + static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7; + + uint32_t m_MinorCounter = 0; + uint32_t m_MajorCounter = 0; + uint32_t m_ExtraMapping = 0; // 0 or 1. + + void PostMinorCounter() + { + if(m_MinorCounter < m_MajorCounter) + { + ++m_MinorCounter; + } + else if(m_MajorCounter > 0) + { + --m_MajorCounter; + --m_MinorCounter; + } + } +}; + +#endif // _VMA_MAPPING_HYSTERESIS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK +/* +Represents a single block of device memory (`VkDeviceMemory`) with all the +data about its regions (aka suballocations, #VmaAllocation), assigned and free. + +Thread-safety: +- Access to m_pMetadata must be externally synchronized. +- Map, Unmap, Bind* are synchronized internally. +*/ +class VmaDeviceMemoryBlock +{ + VMA_CLASS_NO_COPY(VmaDeviceMemoryBlock) +public: + VmaBlockMetadata* m_pMetadata; + + VmaDeviceMemoryBlock(VmaAllocator hAllocator); + ~VmaDeviceMemoryBlock(); + + // Always call after construction. + void Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity); + // Always call before destruction. + void Destroy(VmaAllocator allocator); + + VmaPool GetParentPool() const { return m_hParentPool; } + VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + uint32_t GetId() const { return m_Id; } + void* GetMappedData() const { return m_pMappedData; } + uint32_t GetMapRefCount() const { return m_MapCount; } + + // Call when allocation/free was made from m_pMetadata. + // Used for m_MappingHysteresis. + void PostAlloc(VmaAllocator hAllocator); + void PostFree(VmaAllocator hAllocator); + + // Validates all data structures inside this object. If not valid, returns false. + bool Validate() const; + VkResult CheckCorruption(VmaAllocator hAllocator); + + // ppData can be null. + VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + void Unmap(VmaAllocator hAllocator, uint32_t count); + + VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); + + VkResult BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + +private: + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + uint32_t m_MemoryTypeIndex; + uint32_t m_Id; + VkDeviceMemory m_hMemory; + + /* + Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory. + Also protects m_MapCount, m_pMappedData. + Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex. + */ + VMA_MUTEX m_MapAndBindMutex; + VmaMappingHysteresis m_MappingHysteresis; + uint32_t m_MapCount; + void* m_pMappedData; +}; +#endif // _VMA_DEVICE_MEMORY_BLOCK + +#ifndef _VMA_ALLOCATION_T +struct VmaAllocation_T +{ + friend struct VmaDedicatedAllocationListItemTraits; + + enum FLAGS + { + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, + }; + +public: + enum ALLOCATION_TYPE + { + ALLOCATION_TYPE_NONE, + ALLOCATION_TYPE_BLOCK, + ALLOCATION_TYPE_DEDICATED, + }; + + // This struct is allocated using VmaPoolAllocator. + VmaAllocation_T(bool mappingAllowed); + ~VmaAllocation_T(); + + void InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped); + // pMappedData not null means allocation is created with MAPPED flag. + void InitDedicatedAllocation( + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size); + + ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } + VkDeviceSize GetAlignment() const { return m_Alignment; } + VkDeviceSize GetSize() const { return m_Size; } + void* GetUserData() const { return m_pUserData; } + const char* GetName() const { return m_pName; } + VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } + + VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } + bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } + + void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char* pName); + void FreeName(VmaAllocator hAllocator); + uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); + VmaAllocHandle GetAllocHandle() const; + VkDeviceSize GetOffset() const; + VmaPool GetParentPool() const; + VkDeviceMemory GetMemory() const; + void* GetMappedData() const; + + void BlockAllocMap(); + void BlockAllocUnmap(); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + void DedicatedAllocUnmap(VmaAllocator hAllocator); + +#if VMA_STATS_STRING_ENABLED + uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } + + void InitBufferImageUsage(uint32_t bufferImageUsage); + void PrintParameters(class VmaJsonWriter& json) const; +#endif + +private: + // Allocation out of VmaDeviceMemoryBlock. + struct BlockAllocation + { + VmaDeviceMemoryBlock* m_Block; + VmaAllocHandle m_AllocHandle; + }; + // Allocation for an object that has its own private VkDeviceMemory. + struct DedicatedAllocation + { + VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. + VkDeviceMemory m_hMemory; + void* m_pMappedData; // Not null means memory is mapped. + VmaAllocation_T* m_Prev; + VmaAllocation_T* m_Next; + }; + union + { + // Allocation out of VmaDeviceMemoryBlock. + BlockAllocation m_BlockAllocation; + // Allocation for an object that has its own private VkDeviceMemory. + DedicatedAllocation m_DedicatedAllocation; + }; + + VkDeviceSize m_Alignment; + VkDeviceSize m_Size; + void* m_pUserData; + char* m_pName; + uint32_t m_MemoryTypeIndex; + uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_SuballocationType; // VmaSuballocationType + // Reference counter for vmaMapMemory()/vmaUnmapMemory(). + uint8_t m_MapCount; + uint8_t m_Flags; // enum FLAGS +#if VMA_STATS_STRING_ENABLED + uint32_t m_BufferImageUsage; // 0 if unknown. +#endif +}; +#endif // _VMA_ALLOCATION_T + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS +struct VmaDedicatedAllocationListItemTraits +{ + typedef VmaAllocation_T ItemType; + + static ItemType* GetPrev(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType* GetNext(const ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } + static ItemType*& AccessPrev(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Prev; + } + static ItemType*& AccessNext(ItemType* item) + { + VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + return item->m_DedicatedAllocation.m_Next; + } +}; +#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST +/* +Stores linked list of VmaAllocation_T objects. +Thread-safe, synchronized internally. +*/ +class VmaDedicatedAllocationList +{ +public: + VmaDedicatedAllocationList() {} + ~VmaDedicatedAllocationList(); + + void Init(bool useMutex) { m_UseMutex = useMutex; } + bool Validate(); + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics& inoutStats); +#if VMA_STATS_STRING_ENABLED + // Writes JSON array with the list of allocations. + void BuildStatsString(VmaJsonWriter& json); +#endif + + bool IsEmpty(); + void Register(VmaAllocation alloc); + void Unregister(VmaAllocation alloc); + +private: + typedef VmaIntrusiveLinkedList DedicatedAllocationLinkedList; + + bool m_UseMutex = true; + VMA_RW_MUTEX m_Mutex; + DedicatedAllocationLinkedList m_AllocationList; +}; + +#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS + +VmaDedicatedAllocationList::~VmaDedicatedAllocationList() +{ + VMA_HEAVY_ASSERT(Validate()); + + if (!m_AllocationList.IsEmpty()) + { + VMA_ASSERT(false && "Unfreed dedicated allocations found!"); + } +} + +bool VmaDedicatedAllocationList::Validate() +{ + const size_t declaredCount = m_AllocationList.GetCount(); + size_t actualCount = 0; + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + ++actualCount; + } + VMA_VALIDATE(actualCount == declaredCount); + + return true; +} + +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize()); + } +} + +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + + const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount(); + inoutStats.blockCount += allocCount; + inoutStats.allocationCount += allocCount; + + for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) + { + const VkDeviceSize size = item->GetSize(); + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size; + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + json.BeginArray(); + for (VmaAllocation alloc = m_AllocationList.Front(); + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + { + json.BeginObject(true); + alloc->PrintParameters(json); + json.EndObject(); + } + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaDedicatedAllocationList::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_UseMutex); + return m_AllocationList.IsEmpty(); +} + +void VmaDedicatedAllocationList::Register(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.PushBack(alloc); +} + +void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc) +{ + VmaMutexLockWrite lock(m_Mutex, m_UseMutex); + m_AllocationList.Remove(alloc); +} +#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS +#endif // _VMA_DEDICATED_ALLOCATION_LIST + +#ifndef _VMA_SUBALLOCATION +/* +Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as +allocated memory block or free. +*/ +struct VmaSuballocation +{ + VkDeviceSize offset; + VkDeviceSize size; + void* userData; + VmaSuballocationType type; +}; + +// Comparator for offsets. +struct VmaSuballocationOffsetLess +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset < rhs.offset; + } +}; + +struct VmaSuballocationOffsetGreater +{ + bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + { + return lhs.offset > rhs.offset; + } +}; + +struct VmaSuballocationItemSizeLess +{ + bool operator()(const VmaSuballocationList::iterator lhs, + const VmaSuballocationList::iterator rhs) const + { + return lhs->size < rhs->size; + } + + bool operator()(const VmaSuballocationList::iterator lhs, + VkDeviceSize rhsSize) const + { + return lhs->size < rhsSize; + } +}; +#endif // _VMA_SUBALLOCATION + +#ifndef _VMA_ALLOCATION_REQUEST +/* +Parameters of planned allocation inside a VmaDeviceMemoryBlock. +item points to a FREE suballocation. +*/ +struct VmaAllocationRequest +{ + VmaAllocHandle allocHandle; + VkDeviceSize size; + VmaSuballocationList::iterator item; + void* customData; + uint64_t algorithmData; + VmaAllocationRequestType type; +}; +#endif // _VMA_ALLOCATION_REQUEST + +#ifndef _VMA_BLOCK_METADATA +/* +Data structure used for bookkeeping of allocations and unused ranges of memory +in a single VkDeviceMemory block. +*/ +class VmaBlockMetadata +{ +public: + // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. + VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata() = default; + + virtual void Init(VkDeviceSize size) { m_Size = size; } + bool IsVirtual() const { return m_IsVirtual; } + VkDeviceSize GetSize() const { return m_Size; } + + // Validates all data structures inside this object. If not valid, returns false. + virtual bool Validate() const = 0; + virtual size_t GetAllocationCount() const = 0; + virtual size_t GetFreeRegionsCount() const = 0; + virtual VkDeviceSize GetSumFreeSize() const = 0; + // Returns true if this block is empty - contains only single free suballocation. + virtual bool IsEmpty() const = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; + virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + + virtual VmaAllocHandle GetAllocationListBegin() const = 0; + virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; + virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; + + // Shouldn't modify blockCount. + virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; + +#if VMA_STATS_STRING_ENABLED + virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; +#endif + + // Tries to find a place for suballocation with given parameters inside this block. + // If succeeded, fills pAllocationRequest and returns true. + // If failed, returns false. + virtual bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) = 0; + + virtual VkResult CheckCorruption(const void* pBlockData) = 0; + + // Makes actual allocation based on request. Request must already be checked and valid. + virtual void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) = 0; + + // Frees suballocation assigned to given memory region. + virtual void Free(VmaAllocHandle allocHandle) = 0; + + // Frees all allocations. + // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! + virtual void Clear() = 0; + + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; + virtual void DebugLogAllAllocations() const = 0; + +protected: + const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + VkDeviceSize GetDebugMargin() const { return IsVirtual() ? 0 : VMA_DEBUG_MARGIN; } + + void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; +#if VMA_STATS_STRING_ENABLED + // mapRefCount == UINT32_MAX means unspecified. + void PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter& json) const; +#endif + +private: + VkDeviceSize m_Size; + const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkDeviceSize m_BufferImageGranularity; + const bool m_IsVirtual; +}; + +#ifndef _VMA_BLOCK_METADATA_FUNCTIONS +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : m_Size(0), + m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), + m_IsVirtual(isVirtual) {} + +void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + if (IsVirtual()) + { + VMA_DEBUG_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData); + } + else + { + VMA_ASSERT(userData != VMA_NULL); + VmaAllocation allocation = reinterpret_cast(userData); + + userData = allocation->GetUserData(); + const char* name = allocation->GetName(); + +#if VMA_STATS_STRING_ENABLED + VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u", + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + allocation->GetBufferImageUsage()); +#else + VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u", + offset, size, userData, name ? name : "vma_empty", + (uint32_t)allocation->GetSuballocationType()); +#endif // VMA_STATS_STRING_ENABLED + } + +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const +{ + json.WriteString("TotalBytes"); + json.WriteNumber(GetSize()); + + json.WriteString("UnusedBytes"); + json.WriteNumber(unusedBytes); + + json.WriteString("Allocations"); + json.WriteNumber((uint64_t)allocationCount); + + json.WriteString("UnusedRanges"); + json.WriteNumber((uint64_t)unusedRangeCount); + + json.WriteString("Suballocations"); + json.BeginArray(); +} + +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size, void* userData) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + if (IsVirtual()) + { + json.WriteString("Size"); + json.WriteNumber(size); + if (userData) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(userData); + json.EndString(); + } + } + else + { + ((VmaAllocation)userData)->PrintParameters(json); + } + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, + VkDeviceSize offset, VkDeviceSize size) const +{ + json.BeginObject(true); + + json.WriteString("Offset"); + json.WriteNumber(offset); + + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]); + + json.WriteString("Size"); + json.WriteNumber(size); + + json.EndObject(); +} + +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +{ + json.EndArray(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_FUNCTIONS +#endif // _VMA_BLOCK_METADATA + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY +// Before deleting object of this class remember to call 'Destroy()' +class VmaBlockBufferImageGranularity final +{ +public: + struct ValidationContext + { + const VkAllocationCallbacks* allocCallbacks; + uint16_t* pageAllocs; + }; + + VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); + ~VmaBlockBufferImageGranularity(); + + bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } + + void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + // Before destroying object you must call free it's memory + void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + + void RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const; + + bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const; + + void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); + void FreePages(VkDeviceSize offset, VkDeviceSize size); + void Clear(); + + ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext& ctx) const; + +private: + static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; + + struct RegionInfo + { + uint8_t allocType; + uint16_t allocCount; + }; + + VkDeviceSize m_BufferImageGranularity; + uint32_t m_RegionCount; + RegionInfo* m_RegionInfo; + + uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } + uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } + + uint32_t OffsetToPageIndex(VkDeviceSize offset) const; + void AllocPage(RegionInfo& page, uint8_t allocType); +}; + +#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) + : m_BufferImageGranularity(bufferImageGranularity), + m_RegionCount(0), + m_RegionInfo(VMA_NULL) {} + +VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() +{ + VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); +} + +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +{ + if (IsEnabled()) + { + m_RegionCount = static_cast(VmaDivideRoundingUp(size, m_BufferImageGranularity)); + m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount); + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); + } +} + +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +{ + if (m_RegionInfo) + { + vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount); + m_RegionInfo = VMA_NULL; + } +} + +void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, + VkDeviceSize& inOutAllocSize, + VkDeviceSize& inOutAllocAlignment) const +{ + if (m_BufferImageGranularity > 1 && + m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) + { + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity); + inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity); + } + } +} + +bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(inOutAllocOffset); + if (m_RegionInfo[startPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[startPage].allocType), allocType)) + { + inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity); + if (blockSize < allocSize + inOutAllocOffset - blockOffset) + return true; + ++startPage; + } + uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize); + if (endPage != startPage && + m_RegionInfo[endPage].allocCount > 0 && + VmaIsBufferImageGranularityConflict(static_cast(m_RegionInfo[endPage].allocType), allocType)) + { + return true; + } + } + return false; +} + +void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + AllocPage(m_RegionInfo[startPage], allocType); + + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + AllocPage(m_RegionInfo[endPage], allocType); + } +} + +void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size) +{ + if (IsEnabled()) + { + uint32_t startPage = GetStartPage(offset); + --m_RegionInfo[startPage].allocCount; + if (m_RegionInfo[startPage].allocCount == 0) + m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + uint32_t endPage = GetEndPage(offset, size); + if (startPage != endPage) + { + --m_RegionInfo[endPage].allocCount; + if (m_RegionInfo[endPage].allocCount == 0) + m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE; + } + } +} + +void VmaBlockBufferImageGranularity::Clear() +{ + if (m_RegionInfo) + memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo)); +} + +VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( + const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const +{ + ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + if (!isVirutal && IsEnabled()) + { + ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); + memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t)); + } + return ctx; +} + +bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, + VkDeviceSize offset, VkDeviceSize size) const +{ + if (IsEnabled()) + { + uint32_t start = GetStartPage(offset); + ++ctx.pageAllocs[start]; + VMA_VALIDATE(m_RegionInfo[start].allocCount > 0); + + uint32_t end = GetEndPage(offset, size); + if (start != end) + { + ++ctx.pageAllocs[end]; + VMA_VALIDATE(m_RegionInfo[end].allocCount > 0); + } + } + return true; +} + +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +{ + // Check proper page structure + if (IsEnabled()) + { + VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!"); + + for (uint32_t page = 0; page < m_RegionCount; ++page) + { + VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount); + } + vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount); + ctx.pageAllocs = VMA_NULL; + } + return true; +} + +uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const +{ + return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); +} + +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +{ + // When current alloc type is free then it can be overridden by new type + if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) + page.allocType = allocType; + + ++page.allocCount; +} +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS +#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY + +#if 0 +#ifndef _VMA_BLOCK_METADATA_GENERIC +class VmaBlockMetadata_Generic : public VmaBlockMetadata +{ + friend class VmaDefragmentationAlgorithm_Generic; + friend class VmaDefragmentationAlgorithm_Fast; + VMA_CLASS_NO_COPY(VmaBlockMetadata_Generic) +public: + VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Generic() = default; + + size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); } + void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + uint32_t m_FreeCount; + VkDeviceSize m_SumFreeSize; + VmaSuballocationList m_Suballocations; + // Suballocations that are free. Sorted by size, ascending. + VmaVector> m_FreeSuballocationsBySize; + + VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); } + + VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const; + bool ValidateFreeSuballocationList() const; + + // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem. + // If yes, fills pOffset and returns true. If no, returns false. + bool CheckAllocation( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + VmaAllocHandle* pAllocHandle) const; + + // Given free suballocation, it merges it with following one, which must also be free. + void MergeFreeWithNext(VmaSuballocationList::iterator item); + // Releases given suballocation, making it free. + // Merges it with adjacent free suballocations if applicable. + // Returns iterator to new free suballocation at this place. + VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem); + // Given free suballocation, it inserts it into sorted list of + // m_FreeSuballocationsBySize if it is suitable. + void RegisterFreeSuballocation(VmaSuballocationList::iterator item); + // Given free suballocation, it removes it from sorted list of + // m_FreeSuballocationsBySize if it is suitable. + void UnregisterFreeSuballocation(VmaSuballocationList::iterator item); +}; + +#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS +VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_FreeCount(0), + m_SumFreeSize(0), + m_Suballocations(VmaStlAllocator(pAllocationCallbacks)), + m_FreeSuballocationsBySize(VmaStlAllocator(pAllocationCallbacks)) {} + +void VmaBlockMetadata_Generic::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_FreeCount = 1; + m_SumFreeSize = size; + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + + m_Suballocations.push_back(suballoc); + m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); +} + +bool VmaBlockMetadata_Generic::Validate() const +{ + VMA_VALIDATE(!m_Suballocations.empty()); + + // Expected offset of new suballocation as calculated from previous ones. + VkDeviceSize calculatedOffset = 0; + // Expected number of free suballocations as calculated from traversing their list. + uint32_t calculatedFreeCount = 0; + // Expected sum size of free suballocations as calculated from traversing their list. + VkDeviceSize calculatedSumFreeSize = 0; + // Expected number of free suballocations that should be registered in + // m_FreeSuballocationsBySize calculated from traversing their list. + size_t freeSuballocationsToRegister = 0; + // True if previous visited suballocation was free. + bool prevFree = false; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + for (const auto& subAlloc : m_Suballocations) + { + // Actual offset of this suballocation doesn't match expected one. + VMA_VALIDATE(subAlloc.offset == calculatedOffset); + + const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE); + // Two adjacent free suballocations are invalid. They should be merged. + VMA_VALIDATE(!prevFree || !currFree); + + VmaAllocation alloc = (VmaAllocation)subAlloc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + + if (currFree) + { + calculatedSumFreeSize += subAlloc.size; + ++calculatedFreeCount; + ++freeSuballocationsToRegister; + + // Margin required between allocations - every free space must be at least that large. + VMA_VALIDATE(subAlloc.size >= debugMargin); + } + else + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == subAlloc.size); + } + + // Margin required between allocations - previous allocation must be free. + VMA_VALIDATE(debugMargin == 0 || prevFree); + } + + calculatedOffset += subAlloc.size; + prevFree = currFree; + } + + // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't + // match expected one. + VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister); + + VkDeviceSize lastSize = 0; + for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i) + { + VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i]; + + // Only free suballocations can be registered in m_FreeSuballocationsBySize. + VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE); + // They must be sorted by size ascending. + VMA_VALIDATE(suballocItem->size >= lastSize); + + lastSize = suballocItem->size; + } + + // Check if totals match calculated values. + VMA_VALIDATE(ValidateFreeSuballocationList()); + VMA_VALIDATE(calculatedOffset == GetSize()); + VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize); + VMA_VALIDATE(calculatedFreeCount == m_FreeCount); + + return true; +} + +void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const uint32_t rangeCount = (uint32_t)m_Suballocations.size(); + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + else + VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size); + } +} + +void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - m_SumFreeSize; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const +{ + PrintDetailedMap_Begin(json, + m_SumFreeSize, // unusedBytes + m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount + m_FreeCount, // unusedRangeCount + mapRefCount); + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE) + { + PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size); + } + else + { + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Generic::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(!upperAddress); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + + allocSize = AlignAllocationSize(allocSize); + + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->size = allocSize; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // There is not enough total free space in this block to fulfill the request: Early return. + if (m_SumFreeSize < allocSize + debugMargin) + { + return false; + } + + // New algorithm, efficiently searching freeSuballocationsBySize. + const size_t freeSuballocCount = m_FreeSuballocationsBySize.size(); + if (freeSuballocCount > 0) + { + if (strategy == 0 || + strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Find first free suballocation with size not less than allocSize + debugMargin. + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + freeSuballocCount, + allocSize + debugMargin, + VmaSuballocationItemSizeLess()); + size_t index = it - m_FreeSuballocationsBySize.data(); + for (; index < freeSuballocCount; ++index) + { + if (CheckAllocation( + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + &pAllocationRequest->allocHandle)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET) + { + for (VmaSuballocationList::iterator it = m_Suballocations.begin(); + it != m_Suballocations.end(); + ++it) + { + if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation( + allocSize, + allocAlignment, + allocType, + it, + &pAllocationRequest->allocHandle)) + { + pAllocationRequest->item = it; + return true; + } + } + } + else + { + VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )); + // Search staring from biggest suballocations. + for (size_t index = freeSuballocCount; index--; ) + { + if (CheckAllocation( + allocSize, + allocAlignment, + allocType, + m_FreeSuballocationsBySize[index], + &pAllocationRequest->allocHandle)) + { + pAllocationRequest->item = m_FreeSuballocationsBySize[index]; + return true; + } + } + } + } + + return false; +} + +VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData) +{ + for (auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Generic::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + VMA_ASSERT(request.item != m_Suballocations.end()); + VmaSuballocation& suballoc = *request.item; + // Given suballocation is a free block. + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + // Given offset is inside this suballocation. + VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset); + const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1; + VMA_ASSERT(suballoc.size >= paddingBegin + request.size); + const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size; + + // Unregister this free suballocation from m_FreeSuballocationsBySize and update + // it to become used. + UnregisterFreeSuballocation(request.item); + + suballoc.offset = (VkDeviceSize)request.allocHandle - 1; + suballoc.size = request.size; + suballoc.type = type; + suballoc.userData = userData; + + // If there are any free bytes remaining at the end, insert new free suballocation after current one. + if (paddingEnd) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = suballoc.offset + suballoc.size; + paddingSuballoc.size = paddingEnd; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + VmaSuballocationList::iterator next = request.item; + ++next; + const VmaSuballocationList::iterator paddingEndItem = + m_Suballocations.insert(next, paddingSuballoc); + RegisterFreeSuballocation(paddingEndItem); + } + + // If there are any free bytes remaining at the beginning, insert new free suballocation before current one. + if (paddingBegin) + { + VmaSuballocation paddingSuballoc = {}; + paddingSuballoc.offset = suballoc.offset - paddingBegin; + paddingSuballoc.size = paddingBegin; + paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + const VmaSuballocationList::iterator paddingBeginItem = + m_Suballocations.insert(request.item, paddingSuballoc); + RegisterFreeSuballocation(paddingBeginItem); + } + + // Update totals. + m_FreeCount = m_FreeCount - 1; + if (paddingBegin > 0) + { + ++m_FreeCount; + } + if (paddingEnd > 0) + { + ++m_FreeCount; + } + m_SumFreeSize -= request.size; +} + +void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData; +} + +VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const +{ + if (IsEmpty()) + return VK_NULL_HANDLE; + + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + return (VmaAllocHandle)(suballoc.offset + 1); + } + VMA_ASSERT(false && "Should contain at least 1 allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1); + + for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it) + { + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + return (VmaAllocHandle)(it->offset + 1); + } + return VK_NULL_HANDLE; +} + +void VmaBlockMetadata_Generic::Clear() +{ + const VkDeviceSize size = GetSize(); + + VMA_ASSERT(IsVirtual()); + m_FreeCount = 1; + m_SumFreeSize = size; + m_Suballocations.clear(); + m_FreeSuballocationsBySize.clear(); + + VmaSuballocation suballoc = {}; + suballoc.offset = 0; + suballoc.size = size; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + m_Suballocations.push_back(suballoc); + + m_FreeSuballocationsBySize.push_back(m_Suballocations.begin()); +} + +void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Generic::DebugLogAllAllocations() const +{ + for (const auto& suballoc : m_Suballocations) + { + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData); + } +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const +{ + VMA_HEAVY_ASSERT(!m_Suballocations.empty()); + const VkDeviceSize last = m_Suballocations.rbegin()->offset; + if (last == offset) + return m_Suballocations.rbegin().drop_const(); + const VkDeviceSize first = m_Suballocations.begin()->offset; + if (first == offset) + return m_Suballocations.begin().drop_const(); + + const size_t suballocCount = m_Suballocations.size(); + const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount; + auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator + { + for (auto suballocItem = begin; + suballocItem != end; + ++suballocItem) + { + if (suballocItem->offset == offset) + return suballocItem.drop_const(); + } + VMA_ASSERT(false && "Not found!"); + return m_Suballocations.end().drop_const(); + }; + // If requested offset is closer to the end of range, search from the end + if (offset - first > suballocCount * step / 2) + { + return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend()); + } + return findSuballocation(m_Suballocations.begin(), m_Suballocations.end()); +} + +bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const +{ + VkDeviceSize lastSize = 0; + for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i) + { + const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i]; + + VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_VALIDATE(it->size >= lastSize); + lastSize = it->size; + } + return true; +} + +bool VmaBlockMetadata_Generic::CheckAllocation( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaSuballocationList::const_iterator suballocItem, + VmaAllocHandle* pAllocHandle) const +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(suballocItem != m_Suballocations.cend()); + VMA_ASSERT(pAllocHandle != VMA_NULL); + + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + + const VmaSuballocation& suballoc = *suballocItem; + VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + // Size of this suballocation is too small for this request: Early return. + if (suballoc.size < allocSize) + { + return false; + } + + // Start from offset equal to beginning of this suballocation. + VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin()); + + // Apply debugMargin from the end of previous alloc. + if (debugMargin > 0) + { + offset += debugMargin; + } + + // Apply alignment. + offset = VmaAlignUp(offset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment) + { + bool bufferImageGranularityConflict = false; + VmaSuballocationList::const_iterator prevSuballocItem = suballocItem; + while (prevSuballocItem != m_Suballocations.cbegin()) + { + --prevSuballocItem; + const VmaSuballocation& prevSuballoc = *prevSuballocItem; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + offset = VmaAlignUp(offset, bufferImageGranularity); + } + } + + // Calculate padding at the beginning based on current offset. + const VkDeviceSize paddingBegin = offset - suballoc.offset; + + // Fail if requested size plus margin after is bigger than size of this suballocation. + if (paddingBegin + allocSize + debugMargin > suballoc.size) + { + return false; + } + + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || offset % bufferImageGranularity) + { + VmaSuballocationList::const_iterator nextSuballocItem = suballocItem; + ++nextSuballocItem; + while (nextSuballocItem != m_Suballocations.cend()) + { + const VmaSuballocation& nextSuballoc = *nextSuballocItem; + if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + ++nextSuballocItem; + } + } + + *pAllocHandle = (VmaAllocHandle)(offset + 1); + // All tests passed: Success. pAllocHandle is already filled. + return true; +} + +void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item != m_Suballocations.end()); + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaSuballocationList::iterator nextItem = item; + ++nextItem; + VMA_ASSERT(nextItem != m_Suballocations.end()); + VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE); + + item->size += nextItem->size; + --m_FreeCount; + m_Suballocations.erase(nextItem); +} + +VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem) +{ + // Change this suballocation to be marked as free. + VmaSuballocation& suballoc = *suballocItem; + suballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + suballoc.userData = VMA_NULL; + + // Update totals. + ++m_FreeCount; + m_SumFreeSize += suballoc.size; + + // Merge with previous and/or next suballocation if it's also free. + bool mergeWithNext = false; + bool mergeWithPrev = false; + + VmaSuballocationList::iterator nextItem = suballocItem; + ++nextItem; + if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE)) + { + mergeWithNext = true; + } + + VmaSuballocationList::iterator prevItem = suballocItem; + if (suballocItem != m_Suballocations.begin()) + { + --prevItem; + if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE) + { + mergeWithPrev = true; + } + } + + if (mergeWithNext) + { + UnregisterFreeSuballocation(nextItem); + MergeFreeWithNext(suballocItem); + } + + if (mergeWithPrev) + { + UnregisterFreeSuballocation(prevItem); + MergeFreeWithNext(prevItem); + RegisterFreeSuballocation(prevItem); + return prevItem; + } + else + { + RegisterFreeSuballocation(suballocItem); + return suballocItem; + } +} + +void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + if (m_FreeSuballocationsBySize.empty()) + { + m_FreeSuballocationsBySize.push_back(item); + } + else + { + VmaVectorInsertSorted(m_FreeSuballocationsBySize, item); + } + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} + +void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item) +{ + VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(item->size > 0); + + // You may want to enable this validation at the beginning or at the end of + // this function, depending on what do you want to check. + VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); + + VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess( + m_FreeSuballocationsBySize.data(), + m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(), + item, + VmaSuballocationItemSizeLess()); + for (size_t index = it - m_FreeSuballocationsBySize.data(); + index < m_FreeSuballocationsBySize.size(); + ++index) + { + if (m_FreeSuballocationsBySize[index] == item) + { + VmaVectorRemove(m_FreeSuballocationsBySize, index); + return; + } + VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found."); + } + VMA_ASSERT(0 && "Not found."); + + //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList()); +} +#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_GENERIC +#endif // #if 0 + +#ifndef _VMA_BLOCK_METADATA_LINEAR +/* +Allocations and their references in internal data structure look like this: + +if(m_2ndVectorMode == SECOND_VECTOR_EMPTY): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER): + + 0 +-------+ + | Alloc | 2nd[0] + +-------+ + | Alloc | 2nd[1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | +GetSize() +-------+ + +if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK): + + 0 +-------+ + | | + | | + | | + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount] + +-------+ + | Alloc | 1st[m_1stNullItemsBeginCount + 1] + +-------+ + | ... | + +-------+ + | Alloc | 1st[1st.size() - 1] + +-------+ + | | + | | + | | + +-------+ + | Alloc | 2nd[2nd.size() - 1] + +-------+ + | ... | + +-------+ + | Alloc | 2nd[1] + +-------+ + | Alloc | 2nd[0] +GetSize() +-------+ + +*/ +class VmaBlockMetadata_Linear : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Linear) +public: + VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Linear() = default; + + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } + bool IsEmpty() const override { return GetAllocationCount() == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + size_t GetAllocationCount() const override; + size_t GetFreeRegionsCount() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + /* + There are two suballocation vectors, used in ping-pong way. + The one with index m_1stVectorIndex is called 1st. + The one with index (m_1stVectorIndex ^ 1) is called 2nd. + 2nd can be non-empty only when 1st is not empty. + When 2nd is not empty, m_2ndVectorMode indicates its mode of operation. + */ + typedef VmaVector> SuballocationVectorType; + + enum SECOND_VECTOR_MODE + { + SECOND_VECTOR_EMPTY, + /* + Suballocations in 2nd vector are created later than the ones in 1st, but they + all have smaller offset. + */ + SECOND_VECTOR_RING_BUFFER, + /* + Suballocations in 2nd vector are upper side of double stack. + They all have offsets higher than those in 1st vector. + Top of this stack means smaller offsets, but higher indices in this vector. + */ + SECOND_VECTOR_DOUBLE_STACK, + }; + + VkDeviceSize m_SumFreeSize; + SuballocationVectorType m_Suballocations0, m_Suballocations1; + uint32_t m_1stVectorIndex; + SECOND_VECTOR_MODE m_2ndVectorMode; + // Number of items in 1st vector with hAllocation = null at the beginning. + size_t m_1stNullItemsBeginCount; + // Number of other items in 1st vector with hAllocation = null somewhere in the middle. + size_t m_1stNullItemsMiddleCount; + // Number of items in 2nd vector with hAllocation = null. + size_t m_2ndNullItemsCount; + + SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + + VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; + bool ShouldCompact1st() const; + void CleanupAfterFree(); + + bool CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); + bool CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), + m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) {} + +void VmaBlockMetadata_Linear::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + m_SumFreeSize = size; +} + +bool VmaBlockMetadata_Linear::Validate() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); + VMA_VALIDATE(!suballocations1st.empty() || + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + + if (!suballocations1st.empty()) + { + // Null item at the beginning should be accounted into m_1stNullItemsBeginCount. + VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE); + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + if (!suballocations2nd.empty()) + { + // Null item at the end should be just pop_back(). + VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE); + } + + VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size()); + VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size()); + + VkDeviceSize sumUsedSize = 0; + const size_t suballoc1stCount = suballocations1st.size(); + const VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize offset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = 0; i < suballoc2ndCount; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && + suballoc.userData == VMA_NULL); + } + + size_t nullItem1stCount = m_1stNullItemsBeginCount; + + for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem1stCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount); + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + const size_t suballoc2ndCount = suballocations2nd.size(); + size_t nullItem2ndCount = 0; + for (size_t i = suballoc2ndCount; i--; ) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); + + VmaAllocation const alloc = (VmaAllocation)suballoc.userData; + if (!IsVirtual()) + { + VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE)); + } + VMA_VALIDATE(suballoc.offset >= offset); + + if (!currFree) + { + if (!IsVirtual()) + { + VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1); + VMA_VALIDATE(alloc->GetSize() == suballoc.size); + } + sumUsedSize += suballoc.size; + } + else + { + ++nullItem2ndCount; + } + + offset = suballoc.offset + suballoc.size + debugMargin; + } + + VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount); + } + + VMA_VALIDATE(offset <= GetSize()); + VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize); + + return true; +} + +size_t VmaBlockMetadata_Linear::GetAllocationCount() const +{ + return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + + AccessSuballocations2nd().size() - m_2ndNullItemsCount; +} + +size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return SIZE_MAX; +} + +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += size; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + if (lastOffset < freeSpace2ndTo1stEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + if (lastOffset < freeSpace1stTo2ndEnd) + { + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // There is free space from lastOffset to size. + if (lastOffset < size) + { + const VkDeviceSize unusedRangeSize = size - lastOffset; + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } +} + +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const VkDeviceSize size = GetSize(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + inoutStats.blockCount++; + inoutStats.blockBytes += size; + inoutStats.allocationBytes += size - m_SumFreeSize; + + VkDeviceSize lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++inoutStats.allocationCount; + + // Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + // End of loop. + lastOffset = size; + } + } + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +{ + const VkDeviceSize size = GetSize(); + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const size_t suballoc1stCount = suballocations1st.size(); + const size_t suballoc2ndCount = suballocations2nd.size(); + + // FIRST PASS + + size_t unusedRangeCount = 0; + VkDeviceSize usedBytes = 0; + + VkDeviceSize lastOffset = 0; + + size_t alloc2ndCount = 0; + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + size_t nextAlloc1stIndex = m_1stNullItemsBeginCount; + size_t alloc1stCount = 0; + const VkDeviceSize freeSpace1stTo2ndEnd = + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc1stCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + ++unusedRangeCount; + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + ++alloc2ndCount; + usedBytes += suballoc.size; + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + ++unusedRangeCount; + } + + // End of loop. + lastOffset = size; + } + } + } + + const VkDeviceSize unusedBytes = size - usedBytes; + PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount); + + // SECOND PASS + lastOffset = 0; + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset; + size_t nextAlloc2ndIndex = 0; + while (lastOffset < freeSpace2ndTo1stEnd) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex < suballoc2ndCount && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + ++nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex < suballoc2ndCount) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace2ndTo1stEnd) + { + // There is free space from lastOffset to freeSpace2ndTo1stEnd. + const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace2ndTo1stEnd; + } + } + } + + nextAlloc1stIndex = m_1stNullItemsBeginCount; + while (lastOffset < freeSpace1stTo2ndEnd) + { + // Find next non-null allocation or move nextAllocIndex to the end. + while (nextAlloc1stIndex < suballoc1stCount && + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + { + ++nextAlloc1stIndex; + } + + // Found non-null allocation. + if (nextAlloc1stIndex < suballoc1stCount) + { + const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + ++nextAlloc1stIndex; + } + // We are at the end. + else + { + if (lastOffset < freeSpace1stTo2ndEnd) + { + // There is free space from lastOffset to freeSpace1stTo2ndEnd. + const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = freeSpace1stTo2ndEnd; + } + } + + if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + size_t nextAlloc2ndIndex = suballocations2nd.size() - 1; + while (lastOffset < size) + { + // Find next non-null allocation or move nextAlloc2ndIndex to the end. + while (nextAlloc2ndIndex != SIZE_MAX && + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + { + --nextAlloc2ndIndex; + } + + // Found non-null allocation. + if (nextAlloc2ndIndex != SIZE_MAX) + { + const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + + // 1. Process free space before this allocation. + if (lastOffset < suballoc.offset) + { + // There is free space from lastOffset to suballoc.offset. + const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // 2. Process this allocation. + // There is allocation with suballoc.offset, suballoc.size. + PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData); + + // 3. Prepare for next iteration. + lastOffset = suballoc.offset + suballoc.size; + --nextAlloc2ndIndex; + } + // We are at the end. + else + { + if (lastOffset < size) + { + // There is free space from lastOffset to size. + const VkDeviceSize unusedRangeSize = size - lastOffset; + PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize); + } + + // End of loop. + lastOffset = size; + } + } + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Linear::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0); + VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); + VMA_ASSERT(pAllocationRequest != VMA_NULL); + VMA_HEAVY_ASSERT(Validate()); + pAllocationRequest->size = allocSize; + return upperAddress ? + CreateAllocationRequest_UpperAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : + CreateAllocationRequest_LowerAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest); +} + +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +{ + VMA_ASSERT(!IsVirtual()); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations1st[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) + { + const VmaSuballocation& suballoc = suballocations2nd[i]; + if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) + { + if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_Linear::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; + + switch (request.type) + { + case VmaAllocationRequestType::UpperAddress: + { + VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + suballocations2nd.push_back(newSuballoc); + m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; + } + break; + case VmaAllocationRequestType::EndOf1st: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + + VMA_ASSERT(suballocations1st.empty() || + offset >= suballocations1st.back().offset + suballocations1st.back().size); + // Check if it fits before the end of the block. + VMA_ASSERT(offset + request.size <= GetSize()); + + suballocations1st.push_back(newSuballoc); + } + break; + case VmaAllocationRequestType::EndOf2nd: + { + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. + VMA_ASSERT(!suballocations1st.empty() && + offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + switch (m_2ndVectorMode) + { + case SECOND_VECTOR_EMPTY: + // First allocation from second part ring buffer. + VMA_ASSERT(suballocations2nd.empty()); + m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER; + break; + case SECOND_VECTOR_RING_BUFFER: + // 2-part ring buffer is already started. + VMA_ASSERT(!suballocations2nd.empty()); + break; + case SECOND_VECTOR_DOUBLE_STACK: + VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack."); + break; + default: + VMA_ASSERT(0); + } + + suballocations2nd.push_back(newSuballoc); + } + break; + default: + VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR."); + } + + m_SumFreeSize -= newSuballoc.size; +} + +void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; + + if (!suballocations1st.empty()) + { + // First allocation: Mark it as next empty at the beginning. + VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + if (firstSuballoc.offset == offset) + { + firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; + firstSuballoc.userData = VMA_NULL; + m_SumFreeSize += firstSuballoc.size; + ++m_1stNullItemsBeginCount; + CleanupAfterFree(); + return; + } + } + + // Last allocation in 2-part ring buffer or top of upper stack (same logic). + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || + m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + VmaSuballocation& lastSuballoc = suballocations2nd.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations2nd.pop_back(); + CleanupAfterFree(); + return; + } + } + // Last allocation in 1st vector. + else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) + { + VmaSuballocation& lastSuballoc = suballocations1st.back(); + if (lastSuballoc.offset == offset) + { + m_SumFreeSize += lastSuballoc.size; + suballocations1st.pop_back(); + CleanupAfterFree(); + return; + } + } + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the middle of 1st vector. + { + const SuballocationVectorType::iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_1stNullItemsMiddleCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Item from the middle of 2nd vector. + const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + it->type = VMA_SUBALLOCATION_TYPE_FREE; + it->userData = VMA_NULL; + ++m_2ndNullItemsCount; + m_SumFreeSize += it->size; + CleanupAfterFree(); + return; + } + } + + VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); +} + +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + outInfo.offset = (VkDeviceSize)allocHandle - 1; + VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); + outInfo.size = suballoc.size; + outInfo.pUserData = suballoc.userData; +} + +void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + VMA_ASSERT(0); + return 0; +} + +void VmaBlockMetadata_Linear::Clear() +{ + m_SumFreeSize = GetSize(); + m_Suballocations0.clear(); + m_Suballocations1.clear(); + // Leaving m_1stVectorIndex unchanged - it doesn't matter. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; +} + +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); + suballoc.userData = userData; +} + +void VmaBlockMetadata_Linear::DebugLogAllAllocations() const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); + + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) + if (it->type != VMA_SUBALLOCATION_TYPE_FREE) + DebugLogAllocation(it->offset, it->size, it->userData); +} + +VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const +{ + const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + VmaSuballocation refSuballoc; + refSuballoc.offset = offset; + // Rest of members stays uninitialized intentionally for better performance. + + // Item from the 1st vector. + { + SuballocationVectorType::const_iterator it = VmaBinaryFindSorted( + suballocations1st.begin() + m_1stNullItemsBeginCount, + suballocations1st.end(), + refSuballoc, + VmaSuballocationOffsetLess()); + if (it != suballocations1st.end()) + { + return const_cast(*it); + } + } + + if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) + { + // Rest of members stays uninitialized intentionally for better performance. + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : + VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + if (it != suballocations2nd.end()) + { + return const_cast(*it); + } + } + + VMA_ASSERT(0 && "Allocation not found in linear allocator!"); + return const_cast(suballocations1st.back()); // Should never occur. +} + +bool VmaBlockMetadata_Linear::ShouldCompact1st() const +{ + const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + const size_t suballocCount = AccessSuballocations1st().size(); + return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3; +} + +void VmaBlockMetadata_Linear::CleanupAfterFree() +{ + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (IsEmpty()) + { + suballocations1st.clear(); + suballocations2nd.clear(); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + m_2ndNullItemsCount = 0; + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + else + { + const size_t suballoc1stCount = suballocations1st.size(); + const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount; + VMA_ASSERT(nullItem1stCount <= suballoc1stCount); + + // Find more null items at the beginning of 1st vector. + while (m_1stNullItemsBeginCount < suballoc1stCount && + suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + + // Find more null items at the end of 1st vector. + while (m_1stNullItemsMiddleCount > 0 && + suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_1stNullItemsMiddleCount; + suballocations1st.pop_back(); + } + + // Find more null items at the end of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + suballocations2nd.pop_back(); + } + + // Find more null items at the beginning of 2nd vector. + while (m_2ndNullItemsCount > 0 && + suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) + { + --m_2ndNullItemsCount; + VmaVectorRemove(suballocations2nd, 0); + } + + if (ShouldCompact1st()) + { + const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount; + size_t srcIndex = m_1stNullItemsBeginCount; + for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex) + { + while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++srcIndex; + } + if (dstIndex != srcIndex) + { + suballocations1st[dstIndex] = suballocations1st[srcIndex]; + } + ++srcIndex; + } + suballocations1st.resize(nonNullItemCount); + m_1stNullItemsBeginCount = 0; + m_1stNullItemsMiddleCount = 0; + } + + // 2nd vector became empty. + if (suballocations2nd.empty()) + { + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + } + + // 1st vector became empty. + if (suballocations1st.size() - m_1stNullItemsBeginCount == 0) + { + suballocations1st.clear(); + m_1stNullItemsBeginCount = 0; + + if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + // Swap 1st with 2nd. Now 2nd is empty. + m_2ndVectorMode = SECOND_VECTOR_EMPTY; + m_1stNullItemsMiddleCount = m_2ndNullItemsCount; + while (m_1stNullItemsBeginCount < suballocations2nd.size() && + suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + { + ++m_1stNullItemsBeginCount; + --m_1stNullItemsMiddleCount; + } + m_2ndNullItemsCount = 0; + m_1stVectorIndex ^= 1; + } + } + } + + VMA_HEAVY_ASSERT(Validate()); +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize debugMargin = GetDebugMargin(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + // Try to allocate at the end of 1st vector. + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations1st.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations1st.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? + suballocations2nd.back().offset : blockSize; + + // There is enough free space at the end after alignment. + if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) + { + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on previous page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item, customData unused. + pAllocationRequest->type = VmaAllocationRequestType::EndOf1st; + return true; + } + } + + // Wrap-around to end of 2nd vector. Try to allocate there, watching for the + // beginning of 1st vector as the end of free space. + if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(!suballocations1st.empty()); + + VkDeviceSize resultBaseOffset = 0; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; + } + + // Start from offset equal to beginning of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + // Apply alignment. + resultOffset = VmaAlignUp(resultOffset, allocAlignment); + + // Check previous suballocations for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity); + } + } + + size_t index1st = m_1stNullItemsBeginCount; + + // There is enough free space at the end after alignment. + if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) || + (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset)) + { + // Check next suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) + { + for (size_t nextSuballocIndex = index1st; + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) + { + const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd; + // pAllocationRequest->item, customData unused. + return true; + } + } + + return false; +} + +bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + const VkDeviceSize blockSize = GetSize(); + const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); + SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + + if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) + { + VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer."); + return false; + } + + // Try to allocate before 2nd.back(), or end of block if 2nd.empty(). + if (allocSize > blockSize) + { + return false; + } + VkDeviceSize resultBaseOffset = blockSize - allocSize; + if (!suballocations2nd.empty()) + { + const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + resultBaseOffset = lastSuballoc.offset - allocSize; + if (allocSize > lastSuballoc.offset) + { + return false; + } + } + + // Start from offset equal to end of free space. + VkDeviceSize resultOffset = resultBaseOffset; + + const VkDeviceSize debugMargin = GetDebugMargin(); + + // Apply debugMargin at the end. + if (debugMargin > 0) + { + if (resultOffset < debugMargin) + { + return false; + } + resultOffset -= debugMargin; + } + + // Apply alignment. + resultOffset = VmaAlignDown(resultOffset, allocAlignment); + + // Check next suballocations from 2nd for BufferImageGranularity conflicts. + // Make bigger alignment if necessary. + if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) + { + bool bufferImageGranularityConflict = false; + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + { + const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) + { + bufferImageGranularityConflict = true; + break; + } + } + else + // Already on previous page. + break; + } + if (bufferImageGranularityConflict) + { + resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity); + } + } + + // There is enough free space. + const VkDeviceSize endOf1st = !suballocations1st.empty() ? + suballocations1st.back().offset + suballocations1st.back().size : + 0; + if (endOf1st + debugMargin <= resultOffset) + { + // Check previous suballocations for BufferImageGranularity conflicts. + // If conflict exists, allocation cannot be made here. + if (bufferImageGranularity > 1) + { + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + { + const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) + { + if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) + { + return false; + } + } + else + { + // Already on next page. + break; + } + } + } + + // All tests passed: Success. + pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1); + // pAllocationRequest->item unused. + pAllocationRequest->type = VmaAllocationRequestType::UpperAddress; + return true; + } + + return false; +} +#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_LINEAR + +#if 0 +#ifndef _VMA_BLOCK_METADATA_BUDDY +/* +- GetSize() is the original size of allocated memory block. +- m_UsableSize is this size aligned down to a power of two. + All allocations and calculations happen relative to m_UsableSize. +- GetUnusableSize() is the difference between them. + It is reported as separate, unused range, not available for allocations. + +Node at level 0 has size = m_UsableSize. +Each next level contains nodes with size 2 times smaller than current level. +m_LevelCount is the maximum number of levels to use in the current object. +*/ +class VmaBlockMetadata_Buddy : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_Buddy) +public: + VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_Buddy(); + + size_t GetAllocationCount() const override { return m_AllocationCount; } + VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); } + bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; } + VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; } + void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + +private: + static const size_t MAX_LEVELS = 48; + + struct ValidationContext + { + size_t calculatedAllocationCount = 0; + size_t calculatedFreeCount = 0; + VkDeviceSize calculatedSumFreeSize = 0; + }; + struct Node + { + VkDeviceSize offset; + enum TYPE + { + TYPE_FREE, + TYPE_ALLOCATION, + TYPE_SPLIT, + TYPE_COUNT + } type; + Node* parent; + Node* buddy; + + union + { + struct + { + Node* prev; + Node* next; + } free; + struct + { + void* userData; + } allocation; + struct + { + Node* leftChild; + } split; + }; + }; + + // Size of the memory block aligned down to a power of two. + VkDeviceSize m_UsableSize; + uint32_t m_LevelCount; + VmaPoolAllocator m_NodeAllocator; + Node* m_Root; + struct + { + Node* front; + Node* back; + } m_FreeList[MAX_LEVELS]; + + // Number of nodes in the tree with type == TYPE_ALLOCATION. + size_t m_AllocationCount; + // Number of nodes in the tree with type == TYPE_FREE. + size_t m_FreeCount; + // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes. + // Doesn't include unusable size. + VkDeviceSize m_SumFreeSize; + + VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; } + VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; } + + VkDeviceSize AlignAllocationSize(VkDeviceSize size) const + { + if (!IsVirtual()) + { + size = VmaAlignUp(size, (VkDeviceSize)16); + } + return VmaNextPow2(size); + } + Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const; + void DeleteNodeChildren(Node* node); + bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const; + uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const; + void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const; + // Adds node to the front of FreeList at given level. + // node->type must be FREE. + // node->free.prev, next can be undefined. + void AddToFreeListFront(uint32_t level, Node* node); + // Removes node from FreeList at given level. + // node->type must be FREE. + // node->free.prev, next stay untouched. + void RemoveFromFreeList(uint32_t level, Node* node); + void DebugLogAllAllocationNode(Node* node, uint32_t level) const; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const; +#endif +}; + +#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS +VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity + m_Root(VMA_NULL), + m_AllocationCount(0), + m_FreeCount(1), + m_SumFreeSize(0) +{ + memset(m_FreeList, 0, sizeof(m_FreeList)); +} + +VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy() +{ + DeleteNodeChildren(m_Root); + m_NodeAllocator.Free(m_Root); +} + +void VmaBlockMetadata_Buddy::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + m_UsableSize = VmaPrevPow2(size); + m_SumFreeSize = m_UsableSize; + + // Calculate m_LevelCount. + const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16; + m_LevelCount = 1; + while (m_LevelCount < MAX_LEVELS && + LevelToNodeSize(m_LevelCount) >= minNodeSize) + { + ++m_LevelCount; + } + + Node* rootNode = m_NodeAllocator.Alloc(); + rootNode->offset = 0; + rootNode->type = Node::TYPE_FREE; + rootNode->parent = VMA_NULL; + rootNode->buddy = VMA_NULL; + + m_Root = rootNode; + AddToFreeListFront(0, rootNode); +} + +bool VmaBlockMetadata_Buddy::Validate() const +{ + // Validate tree. + ValidationContext ctx; + if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0))) + { + VMA_VALIDATE(false && "ValidateNode failed."); + } + VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount); + VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize); + + // Validate free node lists. + for (uint32_t level = 0; level < m_LevelCount; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL || + m_FreeList[level].front->free.prev == VMA_NULL); + + for (Node* node = m_FreeList[level].front; + node != VMA_NULL; + node = node->free.next) + { + VMA_VALIDATE(node->type == Node::TYPE_FREE); + + if (node->free.next == VMA_NULL) + { + VMA_VALIDATE(m_FreeList[level].back == node); + } + else + { + VMA_VALIDATE(node->free.next->free.prev == node); + } + } + } + + // Validate that free lists ar higher levels are empty. + for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level) + { + VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL); + } + + return true; +} + +void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + + AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if (unusableSize > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize); +} + +void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocationCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - m_SumFreeSize; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const +{ + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin( + json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount, + mapRefCount); + + PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0)); + + const VkDeviceSize unusableSize = GetUnusableSize(); + if (unusableSize > 0) + { + PrintDetailedMap_UnusedRange(json, + m_UsableSize, // offset + unusableSize); // size + } + + PrintDetailedMap_End(json); +} +#endif // VMA_STATS_STRING_ENABLED + +bool VmaBlockMetadata_Buddy::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + allocSize = AlignAllocationSize(allocSize); + + // Simple way to respect bufferImageGranularity. May be optimized some day. + // Whenever it might be an OPTIMAL image... + if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL) + { + allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity()); + allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity()); + } + + if (allocSize > m_UsableSize) + { + return false; + } + + const uint32_t targetLevel = AllocSizeToLevel(allocSize); + for (uint32_t level = targetLevel; level--; ) + { + for (Node* freeNode = m_FreeList[level].front; + freeNode != VMA_NULL; + freeNode = freeNode->free.next) + { + if (freeNode->offset % allocAlignment == 0) + { + pAllocationRequest->type = VmaAllocationRequestType::Normal; + pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1); + pAllocationRequest->size = allocSize; + pAllocationRequest->customData = (void*)(uintptr_t)level; + return true; + } + } + } + + return false; +} + +void VmaBlockMetadata_Buddy::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::Normal); + + const uint32_t targetLevel = AllocSizeToLevel(request.size); + uint32_t currLevel = (uint32_t)(uintptr_t)request.customData; + + Node* currNode = m_FreeList[currLevel].front; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; + while (currNode->offset != offset) + { + currNode = currNode->free.next; + VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE); + } + + // Go down, splitting free nodes. + while (currLevel < targetLevel) + { + // currNode is already first free node at currLevel. + // Remove it from list of free nodes at this currLevel. + RemoveFromFreeList(currLevel, currNode); + + const uint32_t childrenLevel = currLevel + 1; + + // Create two free sub-nodes. + Node* leftChild = m_NodeAllocator.Alloc(); + Node* rightChild = m_NodeAllocator.Alloc(); + + leftChild->offset = currNode->offset; + leftChild->type = Node::TYPE_FREE; + leftChild->parent = currNode; + leftChild->buddy = rightChild; + + rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel); + rightChild->type = Node::TYPE_FREE; + rightChild->parent = currNode; + rightChild->buddy = leftChild; + + // Convert current currNode to split type. + currNode->type = Node::TYPE_SPLIT; + currNode->split.leftChild = leftChild; + + // Add child nodes to free list. Order is important! + AddToFreeListFront(childrenLevel, rightChild); + AddToFreeListFront(childrenLevel, leftChild); + + ++m_FreeCount; + ++currLevel; + currNode = m_FreeList[currLevel].front; + + /* + We can be sure that currNode, as left child of node previously split, + also fulfills the alignment requirement. + */ + } + + // Remove from free list. + VMA_ASSERT(currLevel == targetLevel && + currNode != VMA_NULL && + currNode->type == Node::TYPE_FREE); + RemoveFromFreeList(currLevel, currNode); + + // Convert to allocation node. + currNode->type = Node::TYPE_ALLOCATION; + currNode->allocation.userData = userData; + + ++m_AllocationCount; + --m_FreeCount; + m_SumFreeSize -= request.size; +} + +void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + uint32_t level = 0; + outInfo.offset = (VkDeviceSize)allocHandle - 1; + const Node* const node = FindAllocationNode(outInfo.offset, level); + outInfo.size = LevelToNodeSize(level); + outInfo.pUserData = node->allocation.userData; +} + +void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + uint32_t level = 0; + const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + return node->allocation.userData; +} + +VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const +{ + // Function only used for defragmentation, which is disabled for this algorithm + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + // Function only used for defragmentation, which is disabled for this algorithm + return VK_NULL_HANDLE; +} + +void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node) +{ + if (node->type == Node::TYPE_SPLIT) + { + DeleteNodeChildren(node->split.leftChild->buddy); + DeleteNodeChildren(node->split.leftChild); + const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks(); + m_NodeAllocator.Free(node->split.leftChild->buddy); + m_NodeAllocator.Free(node->split.leftChild); + } +} + +void VmaBlockMetadata_Buddy::Clear() +{ + DeleteNodeChildren(m_Root); + m_Root->type = Node::TYPE_FREE; + m_AllocationCount = 0; + m_FreeCount = 1; + m_SumFreeSize = m_UsableSize; +} + +void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + uint32_t level = 0; + Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + node->allocation.userData = userData; +} + +VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const +{ + Node* node = m_Root; + VkDeviceSize nodeOffset = 0; + outLevel = 0; + VkDeviceSize levelNodeSize = LevelToNodeSize(0); + while (node->type == Node::TYPE_SPLIT) + { + const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1; + if (offset < nodeOffset + nextLevelNodeSize) + { + node = node->split.leftChild; + } + else + { + node = node->split.leftChild->buddy; + nodeOffset += nextLevelNodeSize; + } + ++outLevel; + levelNodeSize = nextLevelNodeSize; + } + + VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION); + return node; +} + +bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const +{ + VMA_VALIDATE(level < m_LevelCount); + VMA_VALIDATE(curr->parent == parent); + VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL)); + VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr); + switch (curr->type) + { + case Node::TYPE_FREE: + // curr->free.prev, next are validated separately. + ctx.calculatedSumFreeSize += levelNodeSize; + ++ctx.calculatedFreeCount; + break; + case Node::TYPE_ALLOCATION: + ++ctx.calculatedAllocationCount; + if (!IsVirtual()) + { + VMA_VALIDATE(curr->allocation.userData != VMA_NULL); + } + break; + case Node::TYPE_SPLIT: + { + const uint32_t childrenLevel = level + 1; + const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1; + const Node* const leftChild = curr->split.leftChild; + VMA_VALIDATE(leftChild != VMA_NULL); + VMA_VALIDATE(leftChild->offset == curr->offset); + if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for left child failed."); + } + const Node* const rightChild = leftChild->buddy; + VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize); + if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize)) + { + VMA_VALIDATE(false && "ValidateNode for right child failed."); + } + } + break; + default: + return false; + } + + return true; +} + +uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const +{ + // I know this could be optimized somehow e.g. by using std::log2p1 from C++20. + uint32_t level = 0; + VkDeviceSize currLevelNodeSize = m_UsableSize; + VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1; + while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount) + { + ++level; + currLevelNodeSize >>= 1; + nextLevelNodeSize >>= 1; + } + return level; +} + +void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle) +{ + uint32_t level = 0; + Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level); + + ++m_FreeCount; + --m_AllocationCount; + m_SumFreeSize += LevelToNodeSize(level); + + node->type = Node::TYPE_FREE; + + // Join free nodes if possible. + while (level > 0 && node->buddy->type == Node::TYPE_FREE) + { + RemoveFromFreeList(level, node->buddy); + Node* const parent = node->parent; + + m_NodeAllocator.Free(node->buddy); + m_NodeAllocator.Free(node); + parent->type = Node::TYPE_FREE; + + node = parent; + --level; + --m_FreeCount; + } + + AddToFreeListFront(level, node); +} + +void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch (node->type) + { + case Node::TYPE_FREE: + VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize); + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} + +void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node) +{ + VMA_ASSERT(node->type == Node::TYPE_FREE); + + // List is empty. + Node* const frontNode = m_FreeList[level].front; + if (frontNode == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == VMA_NULL); + node->free.prev = node->free.next = VMA_NULL; + m_FreeList[level].front = m_FreeList[level].back = node; + } + else + { + VMA_ASSERT(frontNode->free.prev == VMA_NULL); + node->free.prev = VMA_NULL; + node->free.next = frontNode; + frontNode->free.prev = node; + m_FreeList[level].front = node; + } +} + +void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node) +{ + VMA_ASSERT(m_FreeList[level].front != VMA_NULL); + + // It is at the front. + if (node->free.prev == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].front == node); + m_FreeList[level].front = node->free.next; + } + else + { + Node* const prevFreeNode = node->free.prev; + VMA_ASSERT(prevFreeNode->free.next == node); + prevFreeNode->free.next = node->free.next; + } + + // It is at the back. + if (node->free.next == VMA_NULL) + { + VMA_ASSERT(m_FreeList[level].back == node); + m_FreeList[level].back = node->free.prev; + } + else + { + Node* const nextFreeNode = node->free.next; + VMA_ASSERT(nextFreeNode->free.prev == node); + nextFreeNode->free.prev = node->free.prev; + } +} + +void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const +{ + switch (node->type) + { + case Node::TYPE_FREE: + break; + case Node::TYPE_ALLOCATION: + DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData); + break; + case Node::TYPE_SPLIT: + { + ++level; + DebugLogAllAllocationNode(node->split.leftChild, level); + DebugLogAllAllocationNode(node->split.leftChild->buddy, level); + } + break; + default: + VMA_ASSERT(0); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const +{ + switch (node->type) + { + case Node::TYPE_FREE: + PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize); + break; + case Node::TYPE_ALLOCATION: + PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData); + break; + case Node::TYPE_SPLIT: + { + const VkDeviceSize childrenNodeSize = levelNodeSize / 2; + const Node* const leftChild = node->split.leftChild; + PrintDetailedMapNode(json, leftChild, childrenNodeSize); + const Node* const rightChild = leftChild->buddy; + PrintDetailedMapNode(json, rightChild, childrenNodeSize); + } + break; + default: + VMA_ASSERT(0); + } +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_BUDDY +#endif // #if 0 + +#ifndef _VMA_BLOCK_METADATA_TLSF +// To not search current larger region if first allocation won't succeed and skip to smaller range +// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest(). +// When fragmentation and reusal of previous blocks doesn't matter then use with +// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible. +class VmaBlockMetadata_TLSF : public VmaBlockMetadata +{ + VMA_CLASS_NO_COPY(VmaBlockMetadata_TLSF) +public: + VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); + virtual ~VmaBlockMetadata_TLSF(); + + size_t GetAllocationCount() const override { return m_AllocCount; } + size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } + VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } + bool IsEmpty() const override { return m_NullBlock->offset == 0; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; } + + void Init(VkDeviceSize size) override; + bool Validate() const override; + + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; + void AddStatistics(VmaStatistics& inoutStats) const override; + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json) const override; +#endif + + bool CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) override; + + VkResult CheckCorruption(const void* pBlockData) override; + void Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) override; + + void Free(VmaAllocHandle allocHandle) override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; + void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + VmaAllocHandle GetAllocationListBegin() const override; + VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; + VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; + void Clear() override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void DebugLogAllAllocations() const override; + +private: + // According to original paper it should be preferable 4 or 5: + // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems" + // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf + static const uint8_t SECOND_LEVEL_INDEX = 5; + static const uint16_t SMALL_BUFFER_SIZE = 256; + static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16; + static const uint8_t MEMORY_CLASS_SHIFT = 7; + static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT; + + class Block + { + public: + VkDeviceSize offset; + VkDeviceSize size; + Block* prevPhysical; + Block* nextPhysical; + + void MarkFree() { prevFree = VMA_NULL; } + void MarkTaken() { prevFree = this; } + bool IsFree() const { return prevFree != this; } + void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } + Block*& PrevFree() { return prevFree; } + Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + + private: + Block* prevFree; // Address of the same block here indicates that block is taken + union + { + Block* nextFree; + void* userData; + }; + }; + + size_t m_AllocCount; + // Total number of free blocks besides null block + size_t m_BlocksFreeCount; + // Total size of free blocks excluding null block + VkDeviceSize m_BlocksFreeSize; + uint32_t m_IsFreeBitmap; + uint8_t m_MemoryClasses; + uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; + uint32_t m_ListsCount; + /* + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block** m_FreeList; + VmaPoolAllocator m_BlockAllocator; + Block* m_NullBlock; + VmaBlockBufferImageGranularity m_GranularityHandler; + + uint8_t SizeToMemoryClass(VkDeviceSize size) const; + uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const; + uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; + uint32_t GetListIndex(VkDeviceSize size) const; + + void RemoveFreeBlock(Block* block); + void InsertFreeBlock(Block* block); + void MergeBlock(Block* block, Block* prev); + + Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + bool CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest); +}; + +#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) + : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFreeBitmap(0), + m_MemoryClasses(0), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) {} + +VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() +{ + if (m_FreeList) + vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount); + m_GranularityHandler.Destroy(GetAllocationCallbacks()); +} + +void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) +{ + VmaBlockMetadata::Init(size); + + if (!IsVirtual()) + m_GranularityHandler.Init(GetAllocationCallbacks(), size); + + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = size; + m_NullBlock->offset = 0; + m_NullBlock->prevPhysical = VMA_NULL; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + uint8_t memoryClass = SizeToMemoryClass(size); + uint16_t sli = SizeToSecondIndex(size, memoryClass); + m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1; + if (IsVirtual()) + m_ListsCount += 1UL << SECOND_LEVEL_INDEX; + else + m_ListsCount += 4; + + m_MemoryClasses = memoryClass + 2; + memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); + + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); +} + +bool VmaBlockMetadata_TLSF::Validate() const +{ + VMA_VALIDATE(GetSumFreeSize() <= GetSize()); + + VkDeviceSize calculatedSize = m_NullBlock->size; + VkDeviceSize calculatedFreeSize = m_NullBlock->size; + size_t allocCount = 0; + size_t freeCount = 0; + + // Check integrity of free lists + for (uint32_t list = 0; list < m_ListsCount; ++list) + { + Block* block = m_FreeList[list]; + if (block != VMA_NULL) + { + VMA_VALIDATE(block->IsFree()); + VMA_VALIDATE(block->PrevFree() == VMA_NULL); + while (block->NextFree()) + { + VMA_VALIDATE(block->NextFree()->IsFree()); + VMA_VALIDATE(block->NextFree()->PrevFree() == block); + block = block->NextFree(); + } + } + } + + VkDeviceSize nextOffset = m_NullBlock->offset; + auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual()); + + VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL); + if (m_NullBlock->prevPhysical) + { + VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); + } + // Check all blocks + for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + { + VMA_VALIDATE(prev->offset + prev->size == nextOffset); + nextOffset = prev->offset; + calculatedSize += prev->size; + + uint32_t listIndex = GetListIndex(prev->size); + if (prev->IsFree()) + { + ++freeCount; + // Check if free block belongs to free list + Block* freeBlock = m_FreeList[listIndex]; + VMA_VALIDATE(freeBlock != VMA_NULL); + + bool found = false; + do + { + if (freeBlock == prev) + found = true; + + freeBlock = freeBlock->NextFree(); + } while (!found && freeBlock != VMA_NULL); + + VMA_VALIDATE(found); + calculatedFreeSize += prev->size; + } + else + { + ++allocCount; + // Check if taken block is not on a free list + Block* freeBlock = m_FreeList[listIndex]; + while (freeBlock) + { + VMA_VALIDATE(freeBlock != prev); + freeBlock = freeBlock->NextFree(); + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size)); + } + } + + if (prev->prevPhysical) + { + VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev); + } + } + + if (!IsVirtual()) + { + VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx)); + } + + VMA_VALIDATE(nextOffset == 0); + VMA_VALIDATE(calculatedSize == GetSize()); + VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize()); + VMA_VALIDATE(allocCount == m_AllocCount); + VMA_VALIDATE(freeCount == m_BlocksFreeCount); + + return true; +} + +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +{ + inoutStats.statistics.blockCount++; + inoutStats.statistics.blockBytes += GetSize(); + if (m_NullBlock->size > 0) + VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); + + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree()) + VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); + else + VmaAddDetailedStatisticsAllocation(inoutStats, block->size); + } +} + +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const +{ + inoutStats.blockCount++; + inoutStats.allocationCount += (uint32_t)m_AllocCount; + inoutStats.blockBytes += GetSize(); + inoutStats.allocationBytes += GetSize() - GetSumFreeSize(); +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +{ + size_t blockCount = m_AllocCount + m_BlocksFreeCount; + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); + + size_t i = blockCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + blockList[--i] = block; + } + VMA_ASSERT(i == 0); + + VmaDetailedStatistics stats; + VmaClearDetailedStatistics(stats); + AddDetailedStatistics(stats); + + PrintDetailedMap_Begin(json, + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount); + + for (; i < blockCount; ++i) + { + Block* block = blockList[i]; + if (block->IsFree()) + PrintDetailedMap_UnusedRange(json, block->offset, block->size); + else + PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData()); + } + if (m_NullBlock->size > 0) + PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size); + + PrintDetailedMap_End(json); +} +#endif + +bool VmaBlockMetadata_TLSF::CreateAllocationRequest( + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + bool upperAddress, + VmaSuballocationType allocType, + uint32_t strategy, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); + VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); + + // For small granularity round up + if (!IsVirtual()) + m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment); + + allocSize += GetDebugMargin(); + // Quick check for too small pool + if (allocSize > GetSumFreeSize()) + return false; + + // If no free blocks in pool then check only null block + if (m_BlocksFreeCount == 0) + return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest); + + // Round up to the next block + VkDeviceSize sizeForNextList = allocSize; + VkDeviceSize smallSizeStep = SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4); + if (allocSize > SMALL_BUFFER_SIZE) + { + sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX)); + } + else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep) + sizeForNextList = SMALL_BUFFER_SIZE + 1; + else + sizeForNextList += smallSizeStep; + + uint32_t nextListIndex = 0; + uint32_t prevListIndex = 0; + Block* nextListBlock = VMA_NULL; + Block* prevListBlock = VMA_NULL; + + // Check blocks according to strategies + if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) + { + // Quick check for larger block first + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // If not fitted then null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Null block failed, search larger bucket + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // Failed again, check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT) + { + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + { + // Perform search from the start + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(m_BlocksFreeCount, allocator); + + size_t i = m_BlocksFreeCount; + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (block->IsFree() && block->size >= allocSize) + blockList[--i] = block; + } + + for (; i < m_BlocksFreeCount; ++i) + { + Block& block = *blockList[i]; + if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Whole range searched, no more memory + return false; + } + else + { + // Check larger bucket + nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex); + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + + // If failed check null block + if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + + // Check best fit bucket + prevListBlock = FindFreeBlock(allocSize, prevListIndex); + while (prevListBlock) + { + if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + prevListBlock = prevListBlock->NextFree(); + } + } + + // Worst case, full search has to be done + while (++nextListIndex < m_ListsCount) + { + nextListBlock = m_FreeList[nextListIndex]; + while (nextListBlock) + { + if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest)) + return true; + nextListBlock = nextListBlock->NextFree(); + } + } + + // No more memory sadly + return false; +} + +VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + { + if (!block->IsFree()) + { + if (!VmaValidateMagicValue(pBlockData, block->offset + block->size)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!"); + return VK_ERROR_UNKNOWN_COPY; + } + } + } + + return VK_SUCCESS; +} + +void VmaBlockMetadata_TLSF::Alloc( + const VmaAllocationRequest& request, + VmaSuballocationType type, + void* userData) +{ + VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); + + // Get block and pop it from the free list + Block* currentBlock = (Block*)request.allocHandle; + VkDeviceSize offset = request.algorithmData; + VMA_ASSERT(currentBlock != VMA_NULL); + VMA_ASSERT(currentBlock->offset <= offset); + + if (currentBlock != m_NullBlock) + RemoveFreeBlock(currentBlock); + + VkDeviceSize debugMargin = GetDebugMargin(); + VkDeviceSize misssingAlignment = offset - currentBlock->offset; + + // Append missing alignment to prev block or create new one + if (misssingAlignment) + { + Block* prevBlock = currentBlock->prevPhysical; + VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); + + if (prevBlock->IsFree() && prevBlock->size != debugMargin) + { + uint32_t oldList = GetListIndex(prevBlock->size); + prevBlock->size += misssingAlignment; + // Check if new size crosses list bucket + if (oldList != GetListIndex(prevBlock->size)) + { + prevBlock->size -= misssingAlignment; + RemoveFreeBlock(prevBlock); + prevBlock->size += misssingAlignment; + InsertFreeBlock(prevBlock); + } + else + m_BlocksFreeSize += misssingAlignment; + } + else + { + Block* newBlock = m_BlockAllocator.Alloc(); + currentBlock->prevPhysical = newBlock; + prevBlock->nextPhysical = newBlock; + newBlock->prevPhysical = prevBlock; + newBlock->nextPhysical = currentBlock; + newBlock->size = misssingAlignment; + newBlock->offset = currentBlock->offset; + newBlock->MarkTaken(); + + InsertFreeBlock(newBlock); + } + + currentBlock->size -= misssingAlignment; + currentBlock->offset += misssingAlignment; + } + + VkDeviceSize size = request.size + debugMargin; + if (currentBlock->size == size) + { + if (currentBlock == m_NullBlock) + { + // Setup new null block + m_NullBlock = m_BlockAllocator.Alloc(); + m_NullBlock->size = 0; + m_NullBlock->offset = currentBlock->offset + size; + m_NullBlock->prevPhysical = currentBlock; + m_NullBlock->nextPhysical = VMA_NULL; + m_NullBlock->MarkFree(); + m_NullBlock->PrevFree() = VMA_NULL; + m_NullBlock->NextFree() = VMA_NULL; + currentBlock->nextPhysical = m_NullBlock; + currentBlock->MarkTaken(); + } + } + else + { + VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); + + // Create new free block + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = currentBlock->size - size; + newBlock->offset = currentBlock->offset + size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + currentBlock->nextPhysical = newBlock; + currentBlock->size = size; + + if (currentBlock == m_NullBlock) + { + m_NullBlock = newBlock; + m_NullBlock->MarkFree(); + m_NullBlock->NextFree() = VMA_NULL; + m_NullBlock->PrevFree() = VMA_NULL; + currentBlock->MarkTaken(); + } + else + { + newBlock->nextPhysical->prevPhysical = newBlock; + newBlock->MarkTaken(); + InsertFreeBlock(newBlock); + } + } + currentBlock->UserData() = userData; + + if (debugMargin > 0) + { + currentBlock->size -= debugMargin; + Block* newBlock = m_BlockAllocator.Alloc(); + newBlock->size = debugMargin; + newBlock->offset = currentBlock->offset + currentBlock->size; + newBlock->prevPhysical = currentBlock; + newBlock->nextPhysical = currentBlock->nextPhysical; + newBlock->MarkTaken(); + currentBlock->nextPhysical->prevPhysical = newBlock; + currentBlock->nextPhysical = newBlock; + InsertFreeBlock(newBlock); + } + + if (!IsVirtual()) + m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, + currentBlock->offset, currentBlock->size); + ++m_AllocCount; +} + +void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) +{ + Block* block = (Block*)allocHandle; + Block* next = block->nextPhysical; + VMA_ASSERT(!block->IsFree() && "Block is already free!"); + + if (!IsVirtual()) + m_GranularityHandler.FreePages(block->offset, block->size); + --m_AllocCount; + + VkDeviceSize debugMargin = GetDebugMargin(); + if (debugMargin > 0) + { + RemoveFreeBlock(next); + MergeBlock(next, block); + block = next; + next = next->nextPhysical; + } + + // Try merging + Block* prev = block->prevPhysical; + if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) + { + RemoveFreeBlock(prev); + MergeBlock(block, prev); + } + + if (!next->IsFree()) + InsertFreeBlock(block); + else if (next == m_NullBlock) + MergeBlock(m_NullBlock, block); + else + { + RemoveFreeBlock(next); + MergeBlock(next, block); + InsertFreeBlock(next); + } +} + +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); + outInfo.offset = block->offset; + outInfo.size = block->size; + outInfo.pUserData = block->UserData(); +} + +void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); + return block->UserData(); +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const +{ + if (m_AllocCount == 0) + return VK_NULL_HANDLE; + + for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!"); + return VK_NULL_HANDLE; +} + +VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const +{ + Block* startBlock = (Block*)prevAlloc; + VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); + + for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + { + if (!block->IsFree()) + return (VmaAllocHandle)block; + } + return VK_NULL_HANDLE; +} + +VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const +{ + Block* block = (Block*)alloc; + VMA_ASSERT(!block->IsFree() && "Incorrect block!"); + + if (block->prevPhysical) + return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0; + return 0; +} + +void VmaBlockMetadata_TLSF::Clear() +{ + m_AllocCount = 0; + m_BlocksFreeCount = 0; + m_BlocksFreeSize = 0; + m_IsFreeBitmap = 0; + m_NullBlock->offset = 0; + m_NullBlock->size = GetSize(); + Block* block = m_NullBlock->prevPhysical; + m_NullBlock->prevPhysical = VMA_NULL; + while (block) + { + Block* prev = block->prevPhysical; + m_BlockAllocator.Free(block); + block = prev; + } + memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); + m_GranularityHandler.Clear(); +} + +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +{ + Block* block = (Block*)allocHandle; + VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); + block->UserData() = userData; +} + +void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const +{ + for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + if (!block->IsFree()) + DebugLogAllocation(block->offset, block->size, block->UserData()); +} + +uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const +{ + if (size > SMALL_BUFFER_SIZE) + return VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT; + return 0; +} + +uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const +{ + if (memoryClass == 0) + { + if (IsVirtual()) + return static_cast((size - 1) / 8); + else + return static_cast((size - 1) / 64); + } + return static_cast((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX)); +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const +{ + if (memoryClass == 0) + return secondIndex; + + const uint32_t index = static_cast(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex; + if (IsVirtual()) + return index + (1 << SECOND_LEVEL_INDEX); + else + return index + 4; +} + +uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); +} + +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(block->IsFree()); + + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block->PrevFree(); + if (block->PrevFree() != VMA_NULL) + block->PrevFree()->NextFree() = block->NextFree(); + else + { + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(m_FreeList[index] == block); + m_FreeList[index] = block->NextFree(); + if (block->NextFree() == VMA_NULL) + { + m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex); + if (m_InnerIsFreeBitmap[memClass] == 0) + m_IsFreeBitmap &= ~(1UL << memClass); + } + } + block->MarkTaken(); + block->UserData() = VMA_NULL; + --m_BlocksFreeCount; + m_BlocksFreeSize -= block->size; +} + +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +{ + VMA_ASSERT(block != m_NullBlock); + VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); + + uint8_t memClass = SizeToMemoryClass(block->size); + uint16_t secondIndex = SizeToSecondIndex(block->size, memClass); + uint32_t index = GetListIndex(memClass, secondIndex); + VMA_ASSERT(index < m_ListsCount); + block->PrevFree() = VMA_NULL; + block->NextFree() = m_FreeList[index]; + m_FreeList[index] = block; + if (block->NextFree() != VMA_NULL) + block->NextFree()->PrevFree() = block; + else + { + m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex; + m_IsFreeBitmap |= 1UL << memClass; + } + ++m_BlocksFreeCount; + m_BlocksFreeSize += block->size; +} + +void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +{ + VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!"); + VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); + + block->offset = prev->offset; + block->size += prev->size; + block->prevPhysical = prev->prevPhysical; + if (block->prevPhysical) + block->prevPhysical->nextPhysical = block; + m_BlockAllocator.Free(prev); +} + +VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +{ + uint8_t memoryClass = SizeToMemoryClass(size); + uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); + if (!innerFreeMap) + { + // Check higher levels for available blocks + uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1)); + if (!freeMap) + return VMA_NULL; // No more memory available + + // Find lowest free region + memoryClass = VMA_BITSCAN_LSB(freeMap); + innerFreeMap = m_InnerIsFreeBitmap[memoryClass]; + VMA_ASSERT(innerFreeMap != 0); + } + // Find lowest free subregion + listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap)); + VMA_ASSERT(m_FreeList[listIndex]); + return m_FreeList[listIndex]; +} + +bool VmaBlockMetadata_TLSF::CheckBlock( + Block& block, + uint32_t listIndex, + VkDeviceSize allocSize, + VkDeviceSize allocAlignment, + VmaSuballocationType allocType, + VmaAllocationRequest* pAllocationRequest) +{ + VMA_ASSERT(block.IsFree() && "Block is already taken!"); + + VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment); + if (block.size < allocSize + alignedOffset - block.offset) + return false; + + // Check for granularity conflicts + if (!IsVirtual() && + m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType)) + return false; + + // Alloc successful + pAllocationRequest->type = VmaAllocationRequestType::TLSF; + pAllocationRequest->allocHandle = (VmaAllocHandle)█ + pAllocationRequest->size = allocSize - GetDebugMargin(); + pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->algorithmData = alignedOffset; + + // Place block at the start of list if it's normal block + if (listIndex != m_ListsCount && block.PrevFree()) + { + block.PrevFree()->NextFree() = block.NextFree(); + if (block.NextFree()) + block.NextFree()->PrevFree() = block.PrevFree(); + block.PrevFree() = VMA_NULL; + block.NextFree() = m_FreeList[listIndex]; + m_FreeList[listIndex] = █ + if (block.NextFree()) + block.NextFree()->PrevFree() = █ + } + + return true; +} +#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS +#endif // _VMA_BLOCK_METADATA_TLSF + +#ifndef _VMA_BLOCK_VECTOR +/* +Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific +Vulkan memory type. + +Synchronized internally with a mutex. +*/ +class VmaBlockVector +{ + friend struct VmaDefragmentationContext_T; + VMA_CLASS_NO_COPY(VmaBlockVector) +public: + VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext); + ~VmaBlockVector(); + + VmaAllocator GetAllocator() const { return m_hAllocator; } + VmaPool GetParentPool() const { return m_hParentPool; } + bool IsCustomPool() const { return m_hParentPool != VMA_NULL; } + uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } + VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; } + VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } + uint32_t GetAlgorithm() const { return m_Algorithm; } + bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } + float GetPriority() const { return m_Priority; } + const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + // To be used only while the m_Mutex is locked. Used during defragmentation. + size_t GetBlockCount() const { return m_Blocks.size(); } + // To be used only while the m_Mutex is locked. Used during defragmentation. + VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VMA_RW_MUTEX &GetMutex() { return m_Mutex; } + + VkResult CreateMinBlocks(); + void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + bool IsEmpty(); + bool IsCorruptionDetectionEnabled() const; + + VkResult Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + void Free(const VmaAllocation hAllocation); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + VkResult CheckCorruption(); + +private: + const VmaAllocator m_hAllocator; + const VmaPool m_hParentPool; + const uint32_t m_MemoryTypeIndex; + const VkDeviceSize m_PreferredBlockSize; + const size_t m_MinBlockCount; + const size_t m_MaxBlockCount; + const VkDeviceSize m_BufferImageGranularity; + const bool m_ExplicitBlockSize; + const uint32_t m_Algorithm; + const float m_Priority; + const VkDeviceSize m_MinAllocationAlignment; + + void* const m_pMemoryAllocateNext; + VMA_RW_MUTEX m_Mutex; + // Incrementally sorted by sumFreeSize, ascending. + VmaVector> m_Blocks; + uint32_t m_NextBlockId; + bool m_IncrementalSort = true; + + void SetIncrementalSort(bool val) { m_IncrementalSort = val; } + + VkDeviceSize CalcMaxBlockSize() const; + // Finds and removes given block from vector. + void Remove(VmaDeviceMemoryBlock* pBlock); + // Performs single step in sorting m_Blocks. They may not be fully sorted + // after this call. + void IncrementallySortBlocks(); + void SortByFreeSize(); + + VkResult AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation); + + VkResult CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation); + + VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + bool HasEmptyBlock(); +}; +#endif // _VMA_BLOCK_VECTOR + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT +struct VmaDefragmentationContext_T +{ + VMA_CLASS_NO_COPY(VmaDefragmentationContext_T) +public: + VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info); + ~VmaDefragmentationContext_T(); + + void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } + + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); + +private: + // Max number of allocations to ignore due to size constraints before ending single pass + static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; + enum class CounterStatus { Pass, Ignore, End }; + + struct FragmentedBlock + { + uint32_t data; + VmaDeviceMemoryBlock* block; + }; + struct StateBalanced + { + VkDeviceSize avgFreeSize = 0; + VkDeviceSize avgAllocSize = UINT64_MAX; + }; + struct StateExtensive + { + enum class Operation : uint8_t + { + FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, + MoveBuffers, MoveTextures, MoveAll, + Cleanup, Done + }; + + Operation operation = Operation::FindFreeBlockTexture; + size_t firstFreeBlock = SIZE_MAX; + }; + struct MoveAllocationData + { + VkDeviceSize size; + VkDeviceSize alignment; + VmaSuballocationType type; + VmaAllocationCreateFlags flags; + VmaDefragmentationMove move = {}; + }; + + const VkDeviceSize m_MaxPassBytes; + const uint32_t m_MaxPassAllocations; + + VmaStlAllocator m_MoveAllocator; + VmaVector> m_Moves; + + uint8_t m_IgnoredAllocs = 0; + uint32_t m_Algorithm; + uint32_t m_BlockVectorCount; + VmaBlockVector* m_PoolBlockVector; + VmaBlockVector** m_pBlockVectors; + size_t m_ImmovableBlockCount = 0; + VmaDefragmentationStats m_GlobalStats = { 0 }; + VmaDefragmentationStats m_PassStats = { 0 }; + void* m_AlgorithmState = VMA_NULL; + + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + CounterStatus CheckCounters(VkDeviceSize bytes); + bool IncrementCounters(VkDeviceSize bytes); + bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + + bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector& vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); + bool ComputeDefragmentation_Full(VmaBlockVector& vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + + void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); + bool MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent); +}; +#endif // _VMA_DEFRAGMENTATION_CONTEXT + +#ifndef _VMA_POOL_T +struct VmaPool_T +{ + friend struct VmaPoolListItemTraits; + VMA_CLASS_NO_COPY(VmaPool_T) +public: + VmaBlockVector m_BlockVector; + VmaDedicatedAllocationList m_DedicatedAllocations; + + VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize); + ~VmaPool_T(); + + uint32_t GetId() const { return m_Id; } + void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + + const char* GetName() const { return m_Name; } + void SetName(const char* pName); + +#if VMA_STATS_STRING_ENABLED + //void PrintDetailedMap(class VmaStringBuilder& sb); +#endif + +private: + uint32_t m_Id; + char* m_Name; + VmaPool_T* m_PrevPool = VMA_NULL; + VmaPool_T* m_NextPool = VMA_NULL; +}; + +struct VmaPoolListItemTraits +{ + typedef VmaPool_T ItemType; + + static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } + static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } + static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } + static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } +}; +#endif // _VMA_POOL_T + +#ifndef _VMA_CURRENT_BUDGET_DATA +struct VmaCurrentBudgetData +{ + VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; + VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS]; + +#if VMA_MEMORY_BUDGET + VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch; + VMA_RW_MUTEX m_BudgetMutex; + uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS]; + uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS]; + uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS]; +#endif // VMA_MEMORY_BUDGET + + VmaCurrentBudgetData(); + + void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); + void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize); +}; + +#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +VmaCurrentBudgetData::VmaCurrentBudgetData() +{ + for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex) + { + m_BlockCount[heapIndex] = 0; + m_AllocationCount[heapIndex] = 0; + m_BlockBytes[heapIndex] = 0; + m_AllocationBytes[heapIndex] = 0; +#if VMA_MEMORY_BUDGET + m_VulkanUsage[heapIndex] = 0; + m_VulkanBudget[heapIndex] = 0; + m_BlockBytesAtBudgetFetch[heapIndex] = 0; +#endif + } + +#if VMA_MEMORY_BUDGET + m_OperationsSinceBudgetFetch = 0; +#endif +} + +void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + m_AllocationBytes[heapIndex] += allocationSize; + ++m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} + +void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize) +{ + VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize); + m_AllocationBytes[heapIndex] -= allocationSize; + VMA_ASSERT(m_AllocationCount[heapIndex] > 0); + --m_AllocationCount[heapIndex]; +#if VMA_MEMORY_BUDGET + ++m_OperationsSinceBudgetFetch; +#endif +} +#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS +#endif // _VMA_CURRENT_BUDGET_DATA + +#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR +/* +Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects. +*/ +class VmaAllocationObjectAllocator +{ + VMA_CLASS_NO_COPY(VmaAllocationObjectAllocator) +public: + VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) + : m_Allocator(pAllocationCallbacks, 1024) {} + + template VmaAllocation Allocate(Types&&... args); + void Free(VmaAllocation hAlloc); + +private: + VMA_MUTEX m_Mutex; + VmaPoolAllocator m_Allocator; +}; + +template +VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) +{ + VmaMutexLock mutexLock(m_Mutex); + return m_Allocator.Alloc(std::forward(args)...); +} + +void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc) +{ + VmaMutexLock mutexLock(m_Mutex); + m_Allocator.Free(hAlloc); +} +#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR + +#ifndef _VMA_VIRTUAL_BLOCK_T +struct VmaVirtualBlock_T +{ + VMA_CLASS_NO_COPY(VmaVirtualBlock_T) +public: + const bool m_AllocationCallbacksSpecified; + const VkAllocationCallbacks m_AllocationCallbacks; + + VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); + ~VmaVirtualBlock_T(); + + VkResult Init() { return VK_SUCCESS; } + bool IsEmpty() const { return m_Metadata->IsEmpty(); } + void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } + void Clear() { m_Metadata->Clear(); } + + const VkAllocationCallbacks* GetAllocationCallbacks() const; + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset); + void GetStatistics(VmaStatistics& outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; +#if VMA_STATS_STRING_ENABLED + void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; +#endif + +private: + VmaBlockMetadata* m_Metadata; +}; + +#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) + : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) +{ + const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; + switch (algorithm) + { + case 0: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + break; + case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT: + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true); + break; + default: + VMA_ASSERT(0); + m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true); + } + + m_Metadata->Init(createInfo.size); +} + +VmaVirtualBlock_T::~VmaVirtualBlock_T() +{ + // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations + if (!m_Metadata->IsEmpty()) + m_Metadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased virtual allocations. + VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!"); + + vma_delete(GetAllocationCallbacks(), m_Metadata); +} + +const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const +{ + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; +} + +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) +{ + m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); +} + +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, + VkDeviceSize* outOffset) +{ + VmaAllocationRequest request = {}; + if (m_Metadata->CreateAllocationRequest( + createInfo.size, // allocSize + VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment + (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress + VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant + createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy + &request)) + { + m_Metadata->Alloc(request, + VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant + createInfo.pUserData); + outAllocation = (VmaVirtualAllocation)request.allocHandle; + if(outOffset) + *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); + return VK_SUCCESS; + } + outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE; + if (outOffset) + *outOffset = UINT64_MAX; + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +{ + VmaClearStatistics(outStats); + m_Metadata->AddStatistics(outStats); +} + +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const +{ + VmaClearDetailedStatistics(outStats); + m_Metadata->AddDetailedStatistics(outStats); +} + +#if VMA_STATS_STRING_ENABLED +void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const +{ + VmaJsonWriter json(GetAllocationCallbacks(), sb); + json.BeginObject(); + + VmaDetailedStatistics stats; + CalculateDetailedStatistics(stats); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats); + + if (detailedMap) + { + json.WriteString("Details"); + json.BeginObject(); + m_Metadata->PrintDetailedMap(json); + json.EndObject(); + } + + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS +#endif // _VMA_VIRTUAL_BLOCK_T + + +// Main allocator object. +struct VmaAllocator_T +{ + VMA_CLASS_NO_COPY(VmaAllocator_T) +public: + bool m_UseMutex; + uint32_t m_VulkanApiVersion; + bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseExtMemoryBudget; + bool m_UseAmdDeviceCoherentMemory; + bool m_UseKhrBufferDeviceAddress; + bool m_UseExtMemoryPriority; + VkDevice m_hDevice; + VkInstance m_hInstance; + bool m_AllocationCallbacksSpecified; + VkAllocationCallbacks m_AllocationCallbacks; + VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks; + VmaAllocationObjectAllocator m_AllocationObjectAllocator; + + // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size. + uint32_t m_HeapSizeLimitMask; + + VkPhysicalDeviceProperties m_PhysicalDeviceProperties; + VkPhysicalDeviceMemoryProperties m_MemProps; + + // Default pools. + VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; + + VmaCurrentBudgetData m_Budget; + VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. + + VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + ~VmaAllocator_T(); + + const VkAllocationCallbacks* GetAllocationCallbacks() const + { + return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; + } + const VmaVulkanFunctions& GetVulkanFunctions() const + { + return m_VulkanFunctions; + } + + VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; } + + VkDeviceSize GetBufferImageGranularity() const + { + return VMA_MAX( + static_cast(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY), + m_PhysicalDeviceProperties.limits.bufferImageGranularity); + } + + uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; } + uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; } + + uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const + { + VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount); + return m_MemProps.memoryTypes[memTypeIndex].heapIndex; + } + // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT. + bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const + { + return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + } + // Minimum alignment for all allocations in specific memory type. + VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const + { + return IsMemoryTypeNonCoherent(memTypeIndex) ? + VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : + (VkDeviceSize)VMA_MIN_ALIGNMENT; + } + + bool IsIntegratedGpu() const + { + return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU; + } + + uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; } + + void GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + void GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const; + VkResult FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. + uint32_t* pMemoryTypeIndex) const; + + // Main allocation function. + VkResult AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown. + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Main deallocation function. + void FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations); + + void CalculateStatistics(VmaTotalStatistics* pStats); + + void GetHeapBudgets( + VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); + +#if VMA_STATS_STRING_ENABLED + void PrintDetailedMap(class VmaJsonWriter& json); +#endif + + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + + VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + void DestroyPool(VmaPool pool); + void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); + void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); + + void SetCurrentFrameIndex(uint32_t frameIndex); + uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } + + VkResult CheckPoolCorruption(VmaPool hPool); + VkResult CheckCorruption(uint32_t memoryTypeBits); + + // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. + void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); + // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. + VkResult BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext); + // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. + VkResult BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext); + + VkResult Map(VmaAllocation hAllocation, void** ppData); + void Unmap(VmaAllocation hAllocation); + + VkResult BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext); + VkResult BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext); + + VkResult FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op); + VkResult FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op); + + void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); + + /* + Returns bit mask of memory types that can support defragmentation on GPU as + they support creation of required buffer for copy operations. + */ + uint32_t GetGpuDefragmentationMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const + { + return m_TypeExternalMemoryHandleTypes[memTypeIndex]; + } +#endif // #if VMA_EXTERNAL_MEMORY + +private: + VkDeviceSize m_PreferredLargeHeapBlockSize; + + VkPhysicalDevice m_PhysicalDevice; + VMA_ATOMIC_UINT32 m_CurrentFrameIndex; + VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized. +#if VMA_EXTERNAL_MEMORY + VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES]; +#endif // #if VMA_EXTERNAL_MEMORY + + VMA_RW_MUTEX m_PoolsMutex; + typedef VmaIntrusiveLinkedList PoolList; + // Protected by m_PoolsMutex. + PoolList m_Pools; + uint32_t m_NextPoolId; + + VmaVulkanFunctions m_VulkanFunctions; + + // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. + uint32_t m_GlobalMemoryTypeBits; + + void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Static(); +#endif + + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + void ImportVulkanFunctions_Dynamic(); +#endif + + void ValidateVulkanFunctions(); + + VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex); + + VkResult AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations); + + // Helper function only to be used inside AllocateDedicatedMemory. + VkResult AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation); + + // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. + VkResult AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain = nullptr); + + void FreeDedicatedMemory(const VmaAllocation allocation); + + VkResult CalcMemTypeParams( + VmaAllocationCreateInfo& outCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount); + VkResult CalcAllocationParams( + VmaAllocationCreateInfo& outCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred); + + /* + Calculates and returns bit mask of memory types that can support defragmentation + on GPU as they support creation of required buffer for copy operations. + */ + uint32_t CalculateGpuDefragmentationMemoryTypeBits() const; + uint32_t CalculateGlobalMemoryTypeBits() const; + + bool GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const; + +#if VMA_MEMORY_BUDGET + void UpdateVulkanBudget(); +#endif // #if VMA_MEMORY_BUDGET +}; + + +#ifndef _VMA_MEMORY_FUNCTIONS +static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +{ + return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); +} + +static void VmaFree(VmaAllocator hAllocator, void* ptr) +{ + VmaFree(&hAllocator->m_AllocationCallbacks, ptr); +} + +template +static T* VmaAllocate(VmaAllocator hAllocator) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); +} + +template +static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +{ + return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); +} + +template +static void vma_delete(VmaAllocator hAllocator, T* ptr) +{ + if(ptr != VMA_NULL) + { + ptr->~T(); + VmaFree(hAllocator, ptr); + } +} + +template +static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +{ + if(ptr != VMA_NULL) + { + for(size_t i = count; i--; ) + ptr[i].~T(); + VmaFree(hAllocator, ptr); + } +} +#endif // _VMA_MEMORY_FUNCTIONS + +#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS +VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) + : m_pMetadata(VMA_NULL), + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) {} + +VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() +{ + VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped."); + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); +} + +void VmaDeviceMemoryBlock::Init( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t newMemoryTypeIndex, + VkDeviceMemory newMemory, + VkDeviceSize newSize, + uint32_t id, + uint32_t algorithm, + VkDeviceSize bufferImageGranularity) +{ + VMA_ASSERT(m_hMemory == VK_NULL_HANDLE); + + m_hParentPool = hParentPool; + m_MemoryTypeIndex = newMemoryTypeIndex; + m_Id = id; + m_hMemory = newMemory; + + switch (algorithm) + { + case 0: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + break; + default: + VMA_ASSERT(0); + m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), + bufferImageGranularity, false); // isVirtual + } + m_pMetadata->Init(newSize); +} + +void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator) +{ + // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations + if (!m_pMetadata->IsEmpty()) + m_pMetadata->DebugLogAllAllocations(); + // This is the most important assert in the entire library. + // Hitting it means you have some memory leak - unreleased VmaAllocation objects. + VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!"); + + VMA_ASSERT(m_hMemory != VK_NULL_HANDLE); + allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory); + m_hMemory = VK_NULL_HANDLE; + + vma_delete(allocator, m_pMetadata); + m_pMetadata = VMA_NULL; +} + +void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + m_MappingHysteresis.PostAlloc(); +} + +void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) +{ + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if(m_MappingHysteresis.PostFree()) + { + VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); + if (m_MapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + } +} + +bool VmaDeviceMemoryBlock::Validate() const +{ + VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && + (m_pMetadata->GetSize() != 0)); + + return m_pMetadata->Validate(); +} + +VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) +{ + void* pData = nullptr; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + res = m_pMetadata->CheckCorruption(pData); + + Unmap(hAllocator, 1); + + return res; +} + +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +{ + if (count == 0) + { + return VK_SUCCESS; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + m_MappingHysteresis.PostMap(); + if (oldTotalMapCount != 0) + { + m_MapCount += count; + VMA_ASSERT(m_pMappedData != VMA_NULL); + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + return VK_SUCCESS; + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + &m_pMappedData); + if (result == VK_SUCCESS) + { + if (ppData != VMA_NULL) + { + *ppData = m_pMappedData; + } + m_MapCount = count; + } + return result; + } +} + +void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count) +{ + if (count == 0) + { + return; + } + + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + if (m_MapCount >= count) + { + m_MapCount -= count; + const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping(); + if (totalMapCount == 0) + { + m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory); + } + m_MappingHysteresis.PostUnmap(); + } + else + { + VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped."); + } +} + +VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + VmaWriteMagicValue(pData, allocOffset + allocSize); + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize) +{ + VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); + + void* pData; + VkResult res = Map(hAllocator, 1, &pData); + if (res != VK_SUCCESS) + { + return res; + } + + if (!VmaValidateMagicValue(pData, allocOffset + allocSize)) + { + VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!"); + } + + Unmap(hAllocator, 1); + return VK_SUCCESS; +} + +VkResult VmaDeviceMemoryBlock::BindBufferMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext); +} + +VkResult VmaDeviceMemoryBlock::BindImageMemory( + const VmaAllocator hAllocator, + const VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && + hAllocation->GetBlock() == this); + VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; + // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. + VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); + return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext); +} +#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS + +#ifndef _VMA_ALLOCATION_T_FUNCTIONS +VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) + : m_Alignment{ 1 }, + m_Size{ 0 }, + m_pUserData{ VMA_NULL }, + m_pName{ VMA_NULL }, + m_MemoryTypeIndex{ 0 }, + m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, + m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, + m_MapCount{ 0 }, + m_Flags{ 0 } +{ + if(mappingAllowed) + m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; + +#if VMA_STATS_STRING_ENABLED + m_BufferImageUsage = 0; +#endif +} + +VmaAllocation_T::~VmaAllocation_T() +{ + VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction."); + + // Check if owned string was freed. + VMA_ASSERT(m_pName == VMA_NULL); +} + +void VmaAllocation_T::InitBlockAllocation( + VmaDeviceMemoryBlock* block, + VmaAllocHandle allocHandle, + VkDeviceSize alignment, + VkDeviceSize size, + uint32_t memoryTypeIndex, + VmaSuballocationType suballocationType, + bool mapped) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(block != VMA_NULL); + m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK; + m_Alignment = alignment; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + if(mapped) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_SuballocationType = (uint8_t)suballocationType; + m_BlockAllocation.m_Block = block; + m_BlockAllocation.m_AllocHandle = allocHandle; +} + +void VmaAllocation_T::InitDedicatedAllocation( + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceMemory hMemory, + VmaSuballocationType suballocationType, + void* pMappedData, + VkDeviceSize size) +{ + VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); + VMA_ASSERT(hMemory != VK_NULL_HANDLE); + m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED; + m_Alignment = 0; + m_Size = size; + m_MemoryTypeIndex = memoryTypeIndex; + m_SuballocationType = (uint8_t)suballocationType; + if(pMappedData != VMA_NULL) + { + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; + } + m_DedicatedAllocation.m_hParentPool = hParentPool; + m_DedicatedAllocation.m_hMemory = hMemory; + m_DedicatedAllocation.m_pMappedData = pMappedData; + m_DedicatedAllocation.m_Prev = VMA_NULL; + m_DedicatedAllocation.m_Next = VMA_NULL; +} + +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) +{ + VMA_ASSERT(pName == VMA_NULL || pName != m_pName); + + FreeName(hAllocator); + + if (pName != VMA_NULL) + m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName); +} + +uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation) +{ + VMA_ASSERT(allocation != VMA_NULL); + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount != 0) + m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount); + + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation); + VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation); + m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this); + +#if VMA_STATS_STRING_ENABLED + VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage); +#endif + return m_MapCount; +} + +VmaAllocHandle VmaAllocation_T::GetAllocHandle() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_AllocHandle; + case ALLOCATION_TYPE_DEDICATED: + return VK_NULL_HANDLE; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceSize VmaAllocation_T::GetOffset() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle); + case ALLOCATION_TYPE_DEDICATED: + return 0; + default: + VMA_ASSERT(0); + return 0; + } +} + +VmaPool VmaAllocation_T::GetParentPool() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetParentPool(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hParentPool; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +VkDeviceMemory VmaAllocation_T::GetMemory() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + return m_BlockAllocation.m_Block->GetDeviceMemory(); + case ALLOCATION_TYPE_DEDICATED: + return m_DedicatedAllocation.m_hMemory; + default: + VMA_ASSERT(0); + return VK_NULL_HANDLE; + } +} + +void* VmaAllocation_T::GetMappedData() const +{ + switch (m_Type) + { + case ALLOCATION_TYPE_BLOCK: + if (m_MapCount != 0 || IsPersistentMap()) + { + void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + VMA_ASSERT(pBlockData != VMA_NULL); + return (char*)pBlockData + GetOffset(); + } + else + { + return VMA_NULL; + } + break; + case ALLOCATION_TYPE_DEDICATED: + VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap())); + return m_DedicatedAllocation.m_pMappedData; + default: + VMA_ASSERT(0); + return VMA_NULL; + } +} + +void VmaAllocation_T::BlockAllocMap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount < 0xFF) + { + ++m_MapCount; + } + else + { + VMA_ASSERT(0 && "Allocation mapped too many times simultaneously."); + } +} + +void VmaAllocation_T::BlockAllocUnmap() +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK); + + if (m_MapCount > 0) + { + --m_MapCount; + } + else + { + VMA_ASSERT(0 && "Unmapping allocation not previously mapped."); + } +} + +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); + + if (m_MapCount != 0 || IsPersistentMap()) + { + if (m_MapCount < 0xFF) + { + VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL); + *ppData = m_DedicatedAllocation.m_pMappedData; + ++m_MapCount; + return VK_SUCCESS; + } + else + { + VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously."); + return VK_ERROR_MEMORY_MAP_FAILED; + } + } + else + { + VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory, + 0, // offset + VK_WHOLE_SIZE, + 0, // flags + ppData); + if (result == VK_SUCCESS) + { + m_DedicatedAllocation.m_pMappedData = *ppData; + m_MapCount = 1; + } + return result; + } +} + +void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator) +{ + VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); + + if (m_MapCount > 0) + { + --m_MapCount; + if (m_MapCount == 0 && !IsPersistentMap()) + { + m_DedicatedAllocation.m_pMappedData = VMA_NULL; + (*hAllocator->GetVulkanFunctions().vkUnmapMemory)( + hAllocator->m_hDevice, + m_DedicatedAllocation.m_hMemory); + } + } + else + { + VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped."); + } +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage) +{ + VMA_ASSERT(m_BufferImageUsage == 0); + m_BufferImageUsage = bufferImageUsage; +} + +void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +{ + json.WriteString("Type"); + json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); + + json.WriteString("Size"); + json.WriteNumber(m_Size); + json.WriteString("Usage"); + json.WriteNumber(m_BufferImageUsage); + + if (m_pUserData != VMA_NULL) + { + json.WriteString("CustomData"); + json.BeginString(); + json.ContinueString_Pointer(m_pUserData); + json.EndString(); + } + if (m_pName != VMA_NULL) + { + json.WriteString("Name"); + json.WriteString(m_pName); + } +} +#endif // VMA_STATS_STRING_ENABLED + +void VmaAllocation_T::FreeName(VmaAllocator hAllocator) +{ + if(m_pName) + { + VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); + m_pName = VMA_NULL; + } +} +#endif // _VMA_ALLOCATION_T_FUNCTIONS + +#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS +VmaBlockVector::VmaBlockVector( + VmaAllocator hAllocator, + VmaPool hParentPool, + uint32_t memoryTypeIndex, + VkDeviceSize preferredBlockSize, + size_t minBlockCount, + size_t maxBlockCount, + VkDeviceSize bufferImageGranularity, + bool explicitBlockSize, + uint32_t algorithm, + float priority, + VkDeviceSize minAllocationAlignment, + void* pMemoryAllocateNext) + : m_hAllocator(hAllocator), + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_MinAllocationAlignment(minAllocationAlignment), + m_pMemoryAllocateNext(pMemoryAllocateNext), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) {} + +VmaBlockVector::~VmaBlockVector() +{ + for (size_t i = m_Blocks.size(); i--; ) + { + m_Blocks[i]->Destroy(m_hAllocator); + vma_delete(m_hAllocator, m_Blocks[i]); + } +} + +VkResult VmaBlockVector::CreateMinBlocks() +{ + for (size_t i = 0; i < m_MinBlockCount; ++i) + { + VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddStatistics(inoutStats); + } +} + +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + const size_t blockCount = m_Blocks.size(); + for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) + { + const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VMA_HEAVY_ASSERT(pBlock->Validate()); + pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); + } +} + +bool VmaBlockVector::IsEmpty() +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + return m_Blocks.empty(); +} + +bool VmaBlockVector::IsCorruptionDetectionEnabled() const +{ + const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; + return (VMA_DEBUG_DETECT_CORRUPTION != 0) && + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; +} + +VkResult VmaBlockVector::Allocate( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + size_t allocIndex; + VkResult res = VK_SUCCESS; + + alignment = VMA_MAX(alignment, m_MinAllocationAlignment); + + if (IsCorruptionDetectionEnabled()) + { + size = VmaAlignUp(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + alignment = VmaAlignUp(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE)); + } + + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocatePage( + size, + alignment, + createInfo, + suballocType, + pAllocations + allocIndex); + if (res != VK_SUCCESS) + { + break; + } + } + } + + if (res != VK_SUCCESS) + { + // Free all already created allocations. + while (allocIndex--) + Free(pAllocations[allocIndex]); + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaBlockVector::AllocatePage( + VkDeviceSize size, + VkDeviceSize alignment, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VkDeviceSize freeMemory; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0; + } + + const bool canFallbackToDedicated = !HasExplicitBlockSize() && + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; + const bool canCreateNewBlock = + ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && + (m_Blocks.size() < m_MaxBlockCount) && + (freeMemory >= size || !canFallbackToDedicated); + uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK; + + // Upper address can only be used with linear allocator and within single memory block. + if (isUpperAddress && + (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + // Early reject: requested allocation size is larger that maximum block size for this block vector. + if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + + // 1. Search existing allocations. Try to allocate. + if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Use only last block. + if (!m_Blocks.empty()) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from last block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + else + { + if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default + { + const bool isHostVisible = + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; + if(isHostVisible) + { + const bool isMappingAllowed = (createInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + /* + For non-mappable allocations, check blocks that are not mapped first. + For mappable allocations, check blocks that are already mapped first. + This way, having many blocks, we will separate mappable and non-mappable allocations, + hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. + */ + for(size_t mappingI = 0; mappingI < 2; ++mappingI) + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; + if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + { + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + } + else + { + // Forward order in m_Blocks - prefer blocks with smallest amount of free space. + for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock( + pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT + { + // Backward order in m_Blocks - prefer blocks with largest amount of free space. + for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) + { + VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pCurrBlock); + VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId()); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + } + } + } + + // 2. Try to create new block. + if (canCreateNewBlock) + { + // Calculate optimal size for new block. + VkDeviceSize newBlockSize = m_PreferredBlockSize; + uint32_t newBlockSizeShift = 0; + const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3; + + if (!m_ExplicitBlockSize) + { + // Allocate 1/8, 1/4, 1/2 as first blocks. + const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize(); + for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + } + else + { + break; + } + } + } + + size_t newBlockIndex = 0; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. + if (!m_ExplicitBlockSize) + { + while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX) + { + const VkDeviceSize smallerNewBlockSize = newBlockSize / 2; + if (smallerNewBlockSize >= size) + { + newBlockSize = smallerNewBlockSize; + ++newBlockSizeShift; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? + CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + else + { + break; + } + } + } + + if (res == VK_SUCCESS) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); + + res = AllocateFromBlock( + pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); + if (res == VK_SUCCESS) + { + VMA_DEBUG_LOG_FORMAT(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize); + IncrementallySortBlocks(); + return VK_SUCCESS; + } + else + { + // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + } + + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +void VmaBlockVector::Free(const VmaAllocation hAllocation) +{ + VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + + bool budgetExceeded = false; + { + const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex); + VmaBudget heapBudget = {}; + m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1); + budgetExceeded = heapBudget.usage >= heapBudget.budget; + } + + // Scope for lock. + { + VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); + + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize()); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value."); + } + + if (hAllocation->IsPersistentMap()) + { + pBlock->Unmap(m_hAllocator, 1); + } + + const bool hadEmptyBlockBeforeFree = HasEmptyBlock(); + pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle()); + pBlock->PostFree(m_hAllocator); + VMA_HEAVY_ASSERT(pBlock->Validate()); + + VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex); + + const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount; + // pBlock became empty after this deallocation. + if (pBlock->m_pMetadata->IsEmpty()) + { + // Already had empty block. We don't want to have two, so delete this one. + if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock) + { + pBlockToDelete = pBlock; + Remove(pBlock); + } + // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth. + } + // pBlock didn't become empty, but we have another empty block - find and free that one. + // (This is optional, heuristics.) + else if (hadEmptyBlockBeforeFree && canDeleteBlock) + { + VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + if (pLastBlock->m_pMetadata->IsEmpty()) + { + pBlockToDelete = pLastBlock; + m_Blocks.pop_back(); + } + } + + IncrementallySortBlocks(); + } + + // Destruction of a free block. Deferred until this point, outside of mutex + // lock, for performance reason. + if (pBlockToDelete != VMA_NULL) + { + VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%u", pBlockToDelete->GetId()); + pBlockToDelete->Destroy(m_hAllocator); + vma_delete(m_hAllocator, pBlockToDelete); + } + + m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize()); + m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation); +} + +VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const +{ + VkDeviceSize result = 0; + for (size_t i = m_Blocks.size(); i--; ) + { + result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); + if (result >= m_PreferredBlockSize) + { + break; + } + } + return result; +} + +void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +{ + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + if (m_Blocks[blockIndex] == pBlock) + { + VmaVectorRemove(m_Blocks, blockIndex); + return; + } + } + VMA_ASSERT(0); +} + +void VmaBlockVector::IncrementallySortBlocks() +{ + if (!m_IncrementalSort) + return; + if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + { + // Bubble sort only until first swap. + for (size_t i = 1; i < m_Blocks.size(); ++i) + { + if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize()) + { + VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]); + return; + } + } + } +} + +void VmaBlockVector::SortByFreeSize() +{ + VMA_SORT(m_Blocks.begin(), m_Blocks.end(), + [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); +} + +VkResult VmaBlockVector::AllocateFromBlock( + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize size, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + uint32_t strategy, + VmaAllocation* pAllocation) +{ + const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; + + VmaAllocationRequest currRequest = {}; + if (pBlock->m_pMetadata->CreateAllocationRequest( + size, + alignment, + isUpperAddress, + suballocType, + strategy, + &currRequest)) + { + return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); + } + return VK_ERROR_OUT_OF_DEVICE_MEMORY; +} + +VkResult VmaBlockVector::CommitAllocationRequest( + VmaAllocationRequest& allocRequest, + VmaDeviceMemoryBlock* pBlock, + VkDeviceSize alignment, + VmaAllocationCreateFlags allocFlags, + void* pUserData, + VmaSuballocationType suballocType, + VmaAllocation* pAllocation) +{ + const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; + const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; + const bool isMappingAllowed = (allocFlags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + + pBlock->PostAlloc(m_hAllocator); + // Allocate from pCurrBlock. + if (mapped) + { + VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL); + if (res != VK_SUCCESS) + { + return res; + } + } + + *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); + pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); + (*pAllocation)->InitBlockAllocation( + pBlock, + allocRequest.allocHandle, + alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, + suballocType, + mapped); + VMA_HEAVY_ASSERT(pBlock->Validate()); + if (isUserDataString) + (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + else + (*pAllocation)->SetUserData(m_hAllocator, pUserData); + m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + if (IsCorruptionDetectionEnabled()) + { + VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size); + VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value."); + } + return VK_SUCCESS; +} + +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +{ + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.pNext = m_pMemoryAllocateNext; + allocInfo.memoryTypeIndex = m_MemoryTypeIndex; + allocInfo.allocationSize = blockSize; + +#if VMA_BUFFER_DEVICE_ADDRESS + // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if (m_hAllocator->m_UseKhrBufferDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } +#endif // VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if (m_hAllocator->m_UseExtMemoryPriority) + { + VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); + priorityInfo.priority = m_Priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); + if (exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // VMA_EXTERNAL_MEMORY + + VkDeviceMemory mem = VK_NULL_HANDLE; + VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem); + if (res < 0) + { + return res; + } + + // New VkDeviceMemory successfully created. + + // Create new Allocation for it. + VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + pBlock->Init( + m_hAllocator, + m_hParentPool, + m_MemoryTypeIndex, + mem, + allocInfo.allocationSize, + m_NextBlockId++, + m_Algorithm, + m_BufferImageGranularity); + + m_Blocks.push_back(pBlock); + if (pNewBlockIndex != VMA_NULL) + { + *pNewBlockIndex = m_Blocks.size() - 1; + } + + return VK_SUCCESS; +} + +bool VmaBlockVector::HasEmptyBlock() +{ + for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + if (pBlock->m_pMetadata->IsEmpty()) + { + return true; + } + } + return false; +} + +#if VMA_STATS_STRING_ENABLED +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +{ + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + + + json.BeginObject(); + for (size_t i = 0; i < m_Blocks.size(); ++i) + { + json.BeginString(); + json.ContinueString(m_Blocks[i]->GetId()); + json.EndString(); + + json.BeginObject(); + json.WriteString("MapRefCount"); + json.WriteNumber(m_Blocks[i]->GetMapRefCount()); + + m_Blocks[i]->m_pMetadata->PrintDetailedMap(json); + json.EndObject(); + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED + +VkResult VmaBlockVector::CheckCorruption() +{ + if (!IsCorruptionDetectionEnabled()) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); + for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) + { + VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VMA_ASSERT(pBlock); + VkResult res = pBlock->CheckCorruption(m_hAllocator); + if (res != VK_SUCCESS) + { + return res; + } + } + return VK_SUCCESS; +} + +#endif // _VMA_BLOCK_VECTOR_FUNCTIONS + +#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS +VmaDefragmentationContext_T::VmaDefragmentationContext_T( + VmaAllocator hAllocator, + const VmaDefragmentationInfo& info) + : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator) +{ + m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; + + if (info.pool != VMA_NULL) + { + m_BlockVectorCount = 1; + m_PoolBlockVector = &info.pool->m_BlockVector; + m_pBlockVectors = &m_PoolBlockVector; + m_PoolBlockVector->SetIncrementalSort(false); + m_PoolBlockVector->SortByFreeSize(); + } + else + { + m_BlockVectorCount = hAllocator->GetMemoryTypeCount(); + m_PoolBlockVector = VMA_NULL; + m_pBlockVectors = hAllocator->m_pBlockVectors; + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + { + vector->SetIncrementalSort(false); + vector->SortByFreeSize(); + } + } + } + + switch (m_Algorithm) + { + case 0: // Default algorithm + m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT; + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + if (hAllocator->GetBufferImageGranularity() > 1) + { + m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount); + } + break; + } +} + +VmaDefragmentationContext_T::~VmaDefragmentationContext_T() +{ + if (m_PoolBlockVector != VMA_NULL) + { + m_PoolBlockVector->SetIncrementalSort(true); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + VmaBlockVector* vector = m_pBlockVectors[i]; + if (vector != VMA_NULL) + vector->SetIncrementalSort(true); + } + } + + if (m_AlgorithmState) + { + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + break; + default: + VMA_ASSERT(0); + } + } +} + +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) +{ + if (m_PoolBlockVector != VMA_NULL) + { + VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex); + + if (m_PoolBlockVector->GetBlockCount() > 1) + ComputeDefragmentation(*m_PoolBlockVector, 0); + else if (m_PoolBlockVector->GetBlockCount() == 1) + ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0)); + } + else + { + for (uint32_t i = 0; i < m_BlockVectorCount; ++i) + { + if (m_pBlockVectors[i] != VMA_NULL) + { + VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex); + + if (m_pBlockVectors[i]->GetBlockCount() > 1) + { + if (ComputeDefragmentation(*m_pBlockVectors[i], i)) + break; + } + else if (m_pBlockVectors[i]->GetBlockCount() == 1) + { + if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0))) + break; + } + } + } + } + + moveInfo.moveCount = static_cast(m_Moves.size()); + if (moveInfo.moveCount > 0) + { + moveInfo.pMoves = m_Moves.data(); + return VK_INCOMPLETE; + } + + moveInfo.pMoves = VMA_NULL; + return VK_SUCCESS; +} + +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) +{ + VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); + + VkResult result = VK_SUCCESS; + VmaStlAllocator blockAllocator(m_MoveAllocator.m_pCallbacks); + VmaVector> immovableBlocks(blockAllocator); + VmaVector> mappedBlocks(blockAllocator); + + VmaAllocator allocator = VMA_NULL; + for (uint32_t i = 0; i < moveInfo.moveCount; ++i) + { + VmaDefragmentationMove& move = moveInfo.pMoves[i]; + size_t prevCount = 0, currentCount = 0; + VkDeviceSize freedBlockSize = 0; + + uint32_t vectorIndex; + VmaBlockVector* vector; + if (m_PoolBlockVector != VMA_NULL) + { + vectorIndex = 0; + vector = m_PoolBlockVector; + } + else + { + vectorIndex = move.srcAllocation->GetMemoryTypeIndex(); + vector = m_pBlockVectors[vectorIndex]; + VMA_ASSERT(vector != VMA_NULL); + } + + switch (move.operation) + { + case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY: + { + uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation); + if (mapCount > 0) + { + allocator = vector->m_hAllocator; + VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (FragmentedBlock& block : mappedBlocks) + { + if (block.block == newMapBlock) + { + notPresent = false; + block.data += mapCount; + break; + } + } + if (notPresent) + mappedBlocks.push_back({ mapCount, newMapBlock }); + } + + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + vector->Free(move.dstTmpAllocation); + + VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + bool notPresent = true; + for (const FragmentedBlock& block : immovableBlocks) + { + if (block.block == newBlock) + { + notPresent = false; + break; + } + } + if (notPresent) + immovableBlocks.push_back({ vectorIndex, newBlock }); + break; + } + case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: + { + m_PassStats.bytesMoved -= move.srcAllocation->GetSize(); + --m_PassStats.allocationsMoved; + // Scope for locks, Free have it's own lock + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + prevCount = vector->GetBlockCount(); + freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.srcAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + currentCount = vector->GetBlockCount(); + } + freedBlockSize *= prevCount - currentCount; + + VkDeviceSize dstBlockSize; + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize(); + } + vector->Free(move.dstTmpAllocation); + { + VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount()); + currentCount = vector->GetBlockCount(); + } + + result = VK_INCOMPLETE; + break; + } + default: + VMA_ASSERT(0); + } + + if (prevCount > currentCount) + { + size_t freedBlocks = prevCount - currentCount; + m_PassStats.deviceMemoryBlocksFreed += static_cast(freedBlocks); + m_PassStats.bytesFreed += freedBlockSize; + } + + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + { + if (m_AlgorithmState != VMA_NULL) + { + // Avoid unnecessary tries to allocate when new free block is available + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; + if (state.firstFreeBlock != SIZE_MAX) + { + const size_t diff = prevCount - currentCount; + if (state.firstFreeBlock >= diff) + { + state.firstFreeBlock -= diff; + if (state.firstFreeBlock != 0) + state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty(); + } + else + state.firstFreeBlock = 0; + } + } + } + } + } + moveInfo.moveCount = 0; + moveInfo.pMoves = VMA_NULL; + m_Moves.clear(); + + // Update stats + m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved; + m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; + m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; + m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; + m_PassStats = { 0 }; + + // Move blocks with immovable allocations according to algorithm + if (immovableBlocks.size() > 0) + { + do + { + if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) + { + if (m_AlgorithmState != VMA_NULL) + { + bool swapped = false; + // Move to the start of free blocks range + for (const FragmentedBlock& block : immovableBlocks) + { + StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; + if (state.operation != StateExtensive::Operation::Cleanup) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) + { + if (vector->GetBlock(i) == block.block) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]); + if (state.firstFreeBlock != SIZE_MAX) + { + if (i + 1 < state.firstFreeBlock) + { + if (state.firstFreeBlock > 1) + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]); + else + --state.firstFreeBlock; + } + } + swapped = true; + break; + } + } + } + } + if (swapped) + result = VK_INCOMPLETE; + break; + } + } + + // Move to the beginning + for (const FragmentedBlock& block : immovableBlocks) + { + VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); + + for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) + { + if (vector->GetBlock(i) == block.block) + { + VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]); + break; + } + } + } + } while (false); + } + + // Bulk-map destination blocks + for (const FragmentedBlock& block : mappedBlocks) + { + VkResult res = block.block->Map(allocator, block.data, VMA_NULL); + VMA_ASSERT(res == VK_SUCCESS); + } + return result; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) +{ + switch (m_Algorithm) + { + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT: + return ComputeDefragmentation_Fast(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: + return ComputeDefragmentation_Balanced(vector, index, true); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT: + return ComputeDefragmentation_Full(vector); + case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: + return ComputeDefragmentation_Extensive(vector, index); + default: + VMA_ASSERT(0); + return ComputeDefragmentation_Balanced(vector, index, true); + } +} + +VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( + VmaAllocHandle handle, VmaBlockMetadata* metadata) +{ + MoveAllocationData moveData; + moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); + moveData.size = moveData.move.srcAllocation->GetSize(); + moveData.alignment = moveData.move.srcAllocation->GetAlignment(); + moveData.type = moveData.move.srcAllocation->GetSuballocationType(); + moveData.flags = 0; + + if (moveData.move.srcAllocation->IsPersistentMap()) + moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT; + if (moveData.move.srcAllocation->IsMappingAllowed()) + moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + + return moveData; +} + +VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes) +{ + // Ignore allocation if will exceed max size for copy + if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes) + { + if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE) + return CounterStatus::Ignore; + else + return CounterStatus::End; + } + return CounterStatus::Pass; +} + +bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) +{ + m_PassStats.bytesMoved += bytes; + // Early return when max found + if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) + { + VMA_ASSERT(m_PassStats.allocationsMoved == m_MaxPassAllocations || + m_PassStats.bytesMoved == m_MaxPassBytes && "Exceeded maximal pass threshold!"); + return true; + } + return false; +} + +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) +{ + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) +{ + for (; start < end; ++start) + { + VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) + { + if (vector.AllocateFromBlock(dstBlock, + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &data.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(data.move); + if (IncrementCounters(data.size)) + return true; + break; + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) +{ + // Move only between blocks + + // Go through allocations in last blocks and try to fit them inside first ones + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0), + // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block) + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + if (update && vectorState.avgAllocSize == UINT64_MAX) + UpdateVectorStatistics(vector, vectorState); + + const size_t startMoveCount = m_Moves.size(); + VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + VkDeviceSize prevFreeRegionSize = 0; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle); + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + // Check if realloc will make sense + if (prevFreeRegionSize >= minimalFreeRegion || + nextFreeRegionSize >= minimalFreeRegion || + moveData.size <= vectorState.avgFreeSize || + moveData.size <= vectorState.avgAllocSize) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + prevFreeRegionSize = nextFreeRegionSize; + } + } + + // No moves performed, update statistics to current vector state + if (startMoveCount == m_Moves.size() && !update) + { + vectorState.avgAllocSize = UINT64_MAX; + return ComputeDefragmentation_Balanced(vector, index, false); + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) +{ + // Go over every allocation and try to fit it in previous blocks at lowest offsets, + // if not possible: realloc within single block to minimize offset (exclude offset == 0) + + for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + const size_t prevMoveCount = m_Moves.size(); + if (AllocInOtherBlock(0, i, moveData, vector)) + return true; + + // If no room found then realloc within block for lower offset + VkDeviceSize offset = moveData.move.srcAllocation->GetOffset(); + if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size) + { + VmaAllocationRequest request = {}; + if (metadata->CreateAllocationRequest( + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) + { + if (metadata->GetAllocationOffset(request.allocHandle) < offset) + { + if (vector.CommitAllocationRequest( + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) + { + m_Moves.push_back(moveData.move); + if (IncrementCounters(moveData.size)) + return true; + } + } + } + } + } + } + return false; +} + +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) +{ + // First free single block, then populate it to the brim, then free another block, and so on + + // Fallback to previous algorithm since without granularity conflicts it can achieve max packing + if (vector.m_BufferImageGranularity == 1) + return ComputeDefragmentation_Full(vector); + + VMA_ASSERT(m_AlgorithmState != VMA_NULL); + + StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + + bool texturePresent = false, bufferPresent = false, otherPresent = false; + switch (vectorState.operation) + { + case StateExtensive::Operation::Done: // Vector defragmented + return false; + case StateExtensive::Operation::FindFreeBlockBuffer: + case StateExtensive::Operation::FindFreeBlockTexture: + case StateExtensive::Operation::FindFreeBlockAll: + { + // No more blocks to free, just perform fast realloc and move to cleanup + if (vectorState.firstFreeBlock == 0) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + return ComputeDefragmentation_Fast(vector); + } + + // No free blocks, have to clear last one + size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; + VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; + + const size_t prevMoveCount = m_Moves.size(); + for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, freeMetadata); + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Check all previous blocks for free space + if (AllocInOtherBlock(0, last, moveData, vector)) + { + // Full clear performed already + if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE) + reinterpret_cast(m_AlgorithmState)[index] = last; + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // Cannot perform full clear, have to move data in other blocks around + if (last != 0) + { + for (size_t i = last - 1; i; --i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + } + + if (prevMoveCount == m_Moves.size()) + { + // No possible reallocs within blocks, try to move them around fast + return ComputeDefragmentation_Fast(vector); + } + } + else + { + switch (vectorState.operation) + { + case StateExtensive::Operation::FindFreeBlockBuffer: + vectorState.operation = StateExtensive::Operation::MoveBuffers; + break; + case StateExtensive::Operation::FindFreeBlockTexture: + vectorState.operation = StateExtensive::Operation::MoveTextures; + break; + case StateExtensive::Operation::FindFreeBlockAll: + vectorState.operation = StateExtensive::Operation::MoveAll; + break; + default: + VMA_ASSERT(0); + vectorState.operation = StateExtensive::Operation::MoveTextures; + } + vectorState.firstFreeBlock = last; + // Nothing done, block found without reallocations, can perform another reallocs in same pass + return ComputeDefragmentation_Extensive(vector, index); + } + break; + } + case StateExtensive::Operation::MoveTextures: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (texturePresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!bufferPresent && !otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more textures to move, check buffers + vectorState.operation = StateExtensive::Operation::MoveBuffers; + bufferPresent = false; + otherPresent = false; + } + else + break; + } + case StateExtensive::Operation::MoveBuffers: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (bufferPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + + if (!otherPresent) + { + vectorState.operation = StateExtensive::Operation::Cleanup; + break; + } + + // No more buffers to move, check all others + vectorState.operation = StateExtensive::Operation::MoveAll; + otherPresent = false; + } + else + break; + } + case StateExtensive::Operation::MoveAll: + { + if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + { + if (otherPresent) + { + vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer; + return ComputeDefragmentation_Extensive(vector, index); + } + // Everything moved + vectorState.operation = StateExtensive::Operation::Cleanup; + } + break; + } + case StateExtensive::Operation::Cleanup: + // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062). + break; + } + + if (vectorState.operation == StateExtensive::Operation::Cleanup) + { + // All other work done, pack data in blocks even tighter if possible + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + if (ReallocWithinBlock(vector, vector.GetBlock(i))) + return true; + } + + if (prevMoveCount == m_Moves.size()) + vectorState.operation = StateExtensive::Operation::Done; + } + return false; +} + +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +{ + size_t allocCount = 0; + size_t freeCount = 0; + state.avgFreeSize = 0; + state.avgAllocSize = 0; + + for (size_t i = 0; i < vector.GetBlockCount(); ++i) + { + VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + + allocCount += metadata->GetAllocationCount(); + freeCount += metadata->GetFreeRegionsCount(); + state.avgFreeSize += metadata->GetSumFreeSize(); + state.avgAllocSize += metadata->GetSize(); + } + + state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount; + state.avgFreeSize /= freeCount; +} + +bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, + VmaBlockVector& vector, size_t firstFreeBlock, + bool& texturePresent, bool& bufferPresent, bool& otherPresent) +{ + const size_t prevMoveCount = m_Moves.size(); + for (size_t i = firstFreeBlock ; i;) + { + VmaDeviceMemoryBlock* block = vector.GetBlock(--i); + VmaBlockMetadata* metadata = block->m_pMetadata; + + for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) + { + MoveAllocationData moveData = GetMoveData(handle, metadata); + // Ignore newly created allocations by defragmentation algorithm + if (moveData.move.srcAllocation->GetUserData() == this) + continue; + switch (CheckCounters(moveData.move.srcAllocation->GetSize())) + { + case CounterStatus::Ignore: + continue; + case CounterStatus::End: + return true; + case CounterStatus::Pass: + break; + default: + VMA_ASSERT(0); + } + + // Move only single type of resources at once + if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType)) + { + // Try to fit allocation into free blocks + if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector)) + return false; + } + + if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)) + texturePresent = true; + else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER)) + bufferPresent = true; + else + otherPresent = true; + } + } + return prevMoveCount == m_Moves.size(); +} +#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS + +#ifndef _VMA_POOL_T_FUNCTIONS +VmaPool_T::VmaPool_T( + VmaAllocator hAllocator, + const VmaPoolCreateInfo& createInfo, + VkDeviceSize preferredBlockSize) + : m_BlockVector( + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), + m_Id(0), + m_Name(VMA_NULL) {} + +VmaPool_T::~VmaPool_T() +{ + VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); +} + +void VmaPool_T::SetName(const char* pName) +{ + const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + VmaFreeString(allocs, m_Name); + + if (pName != VMA_NULL) + { + m_Name = VmaCreateStringCopy(allocs, pName); + } + else + { + m_Name = VMA_NULL; + } +} +#endif // _VMA_POOL_T_FUNCTIONS + +#ifndef _VMA_ALLOCATOR_T_FUNCTIONS +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : + m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? + *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_DeviceMemoryCount(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +{ + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_UseKhrDedicatedAllocation = false; + m_UseKhrBindMemory2 = false; + } + + if(VMA_DEBUG_DETECT_CORRUPTION) + { + // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. + VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); + } + + VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); + + if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + { +#if !(VMA_DEDICATED_ALLOCATION) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); + } +#endif +#if !(VMA_BIND_MEMORY2) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); + } +#endif + } +#if !(VMA_MEMORY_BUDGET) + if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); + } +#endif +#if !(VMA_BUFFER_DEVICE_ADDRESS) + if(m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif +#if VMA_VULKAN_VERSION < 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1002000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if VMA_VULKAN_VERSION < 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); + } +#endif +#if !(VMA_MEMORY_PRIORITY) + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); + } +#endif + + memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); + memset(&m_MemProps, 0, sizeof(m_MemProps)); + + memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors)); + memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions)); + +#if VMA_EXTERNAL_MEMORY + memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + { + m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; + m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; + m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree; + } + + ImportVulkanFunctions(pCreateInfo->pVulkanFunctions); + + (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties); + (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps); + + VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT)); + VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); + VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); + + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? + pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + + m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); + +#if VMA_EXTERNAL_MEMORY + if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) + { + memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, + sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); + } +#endif // #if VMA_EXTERNAL_MEMORY + + if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + { + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; + if(limit != VK_WHOLE_SIZE) + { + m_HeapSizeLimitMask |= 1u << heapIndex; + if(limit < m_MemProps.memoryHeaps[heapIndex].size) + { + m_MemProps.memoryHeaps[heapIndex].size = limit; + } + } + } + } + + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + // Create only supported types + if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) + { + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); + m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( + this, + VK_NULL_HANDLE, // hParentPool + memTypeIndex, + preferredBlockSize, + 0, + SIZE_MAX, + GetBufferImageGranularity(), + false, // explicitBlockSize + 0, // algorithm + 0.5f, // priority (0.5 is the default per Vulkan spec) + GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment + VMA_NULL); // // pMemoryAllocateNext + // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, + // becase minBlockCount is 0. + } + } +} + +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +{ + VkResult res = VK_SUCCESS; + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET + + return res; +} + +VmaAllocator_T::~VmaAllocator_T() +{ + VMA_ASSERT(m_Pools.IsEmpty()); + + for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) + { + vma_delete(this, m_pBlockVectors[memTypeIndex]); + } +} + +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +{ +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Static(); +#endif + + if(pVulkanFunctions != VMA_NULL) + { + ImportVulkanFunctions_Custom(pVulkanFunctions); + } + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + ImportVulkanFunctions_Dynamic(); +#endif + + ValidateVulkanFunctions(); +} + +#if VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Static() +{ + // Vulkan 1.0 + m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr; + m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr; + m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties; + m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; + m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory; + m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory; + m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory; + m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges; + m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges; + m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory; + m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory; + m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements; + m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements; + m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer; + m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer; + m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage; + m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage; + m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer; + + // Vulkan 1.1 +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; + m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; + m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2; + m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2; + m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; + m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; + } +#endif +} + +#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +{ + VMA_ASSERT(pVulkanFunctions != VMA_NULL); + +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; + + VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties); + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties); + VMA_COPY_IF_NOT_NULL(vkAllocateMemory); + VMA_COPY_IF_NOT_NULL(vkFreeMemory); + VMA_COPY_IF_NOT_NULL(vkMapMemory); + VMA_COPY_IF_NOT_NULL(vkUnmapMemory); + VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges); + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory); + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkCreateBuffer); + VMA_COPY_IF_NOT_NULL(vkDestroyBuffer); + VMA_COPY_IF_NOT_NULL(vkCreateImage); + VMA_COPY_IF_NOT_NULL(vkDestroyImage); + VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR); + VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR); +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR); + VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR); +#endif + +#if VMA_MEMORY_BUDGET + VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR); +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements); + VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements); +#endif + +#undef VMA_COPY_IF_NOT_NULL +} + +#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ImportVulkanFunctions_Dynamic() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && + "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " + "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " + "Other members can be null."); + +#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); +#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ + if(m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ + (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); + + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties"); + VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory"); + VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory"); + VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory"); + VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory"); + VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory"); + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer"); + VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer"); + VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage"); + VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage"); + VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); + +#if VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2"); + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); + } +#endif + +#if VMA_DEDICATED_ALLOCATION + if(m_UseKhrDedicatedAllocation) + { + VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); + VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); + } +#endif + +#if VMA_BIND_MEMORY2 + if(m_UseKhrBindMemory2) + { + VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); + VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); + } +#endif // #if VMA_BIND_MEMORY2 + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); + } +#endif // #if VMA_MEMORY_BUDGET + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); + VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); + } +#endif + +#undef VMA_FETCH_DEVICE_FUNC +#undef VMA_FETCH_INSTANCE_FUNC +} + +#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 + +void VmaAllocator_T::ValidateVulkanFunctions() +{ + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + { + VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); + } +#endif + +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + { + VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); + } +#endif + +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); + } +#endif + +#if VMA_VULKAN_VERSION >= 1003000 + if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + { + VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); + VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); + } +#endif +} + +VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex) +{ + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE; + return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32); +} + +VkResult VmaAllocator_T::AllocateMemoryOfType( + VmaPool pool, + VkDeviceSize size, + VkDeviceSize alignment, + bool dedicatedPreferred, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + uint32_t memTypeIndex, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + VmaBlockVector& blockVector, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations != VMA_NULL); + VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); + + VmaAllocationCreateInfo finalCreateInfo = createInfo; + VkResult res = CalcMemTypeParams( + finalCreateInfo, + memTypeIndex, + size, + allocationCount); + if(res != VK_SUCCESS) + return res; + + if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + return AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + } + else + { + const bool canAllocateDedicated = + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && + (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); + + if(canAllocateDedicated) + { + // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. + if(size > blockVector.GetPreferredBlockSize() / 2) + { + dedicatedPreferred = true; + } + // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, + // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above + // 3/4 of the maximum allocation count. + if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && + m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) + { + dedicatedPreferred = false; + } + + if(dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + } + + res = blockVector.Allocate( + size, + alignment, + finalCreateInfo, + suballocType, + allocationCount, + pAllocations); + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Try dedicated memory. + if(canAllocateDedicated && !dedicatedPreferred) + { + res = AllocateDedicatedMemory( + pool, + size, + suballocType, + dedicatedAllocations, + memTypeIndex, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, + (finalCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, + finalCreateInfo.pUserData, + finalCreateInfo.priority, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + allocationCount, + pAllocations, + blockVector.GetAllocationNextPtr()); + if(res == VK_SUCCESS) + { + // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. + VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); + return VK_SUCCESS; + } + } + // Everything failed: Return error code. + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } +} + +VkResult VmaAllocator_T::AllocateDedicatedMemory( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + VmaDedicatedAllocationList& dedicatedAllocations, + uint32_t memTypeIndex, + bool map, + bool isUserDataString, + bool isMappingAllowed, + bool canAliasMemory, + void* pUserData, + float priority, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + size_t allocationCount, + VmaAllocation* pAllocations, + const void* pNextChain) +{ + VMA_ASSERT(allocationCount > 0 && pAllocations); + + VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + allocInfo.memoryTypeIndex = memTypeIndex; + allocInfo.allocationSize = size; + allocInfo.pNext = pNextChain; + +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; + if(!canAliasMemory) + { + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + if(dedicatedBuffer != VK_NULL_HANDLE) + { + VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); + dedicatedAllocInfo.buffer = dedicatedBuffer; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + dedicatedAllocInfo.image = dedicatedImage; + VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); + } + } + } +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + +#if VMA_BUFFER_DEVICE_ADDRESS + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + if(m_UseKhrBufferDeviceAddress) + { + bool canContainBufferWithDeviceAddress = true; + if(dedicatedBuffer != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown + (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; + } + else if(dedicatedImage != VK_NULL_HANDLE) + { + canContainBufferWithDeviceAddress = false; + } + if(canContainBufferWithDeviceAddress) + { + allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; + VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); + } + } +#endif // #if VMA_BUFFER_DEVICE_ADDRESS + +#if VMA_MEMORY_PRIORITY + VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + if(m_UseExtMemoryPriority) + { + VMA_ASSERT(priority >= 0.f && priority <= 1.f); + priorityInfo.priority = priority; + VmaPnextChainPushFront(&allocInfo, &priorityInfo); + } +#endif // #if VMA_MEMORY_PRIORITY + +#if VMA_EXTERNAL_MEMORY + // Attach VkExportMemoryAllocateInfoKHR if necessary. + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); + if(exportMemoryAllocInfo.handleTypes != 0) + { + VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); + } +#endif // #if VMA_EXTERNAL_MEMORY + + size_t allocIndex; + VkResult res = VK_SUCCESS; + for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + res = AllocateDedicatedMemoryPage( + pool, + size, + suballocType, + memTypeIndex, + allocInfo, + map, + isUserDataString, + isMappingAllowed, + pUserData, + pAllocations + allocIndex); + if(res != VK_SUCCESS) + { + break; + } + } + + if(res == VK_SUCCESS) + { + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + dedicatedAllocations.Register(pAllocations[allocIndex]); + } + VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex); + } + else + { + // Free all already created allocations. + while(allocIndex--) + { + VmaAllocation currAlloc = pAllocations[allocIndex]; + VkDeviceMemory hMemory = currAlloc->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(currAlloc->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory); + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize()); + m_AllocationObjectAllocator.Free(currAlloc); + } + + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + } + + return res; +} + +VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( + VmaPool pool, + VkDeviceSize size, + VmaSuballocationType suballocType, + uint32_t memTypeIndex, + const VkMemoryAllocateInfo& allocInfo, + bool map, + bool isUserDataString, + bool isMappingAllowed, + void* pUserData, + VmaAllocation* pAllocation) +{ + VkDeviceMemory hMemory = VK_NULL_HANDLE; + VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); + if(res < 0) + { + VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); + return res; + } + + void* pMappedData = VMA_NULL; + if(map) + { + res = (*m_VulkanFunctions.vkMapMemory)( + m_hDevice, + hMemory, + 0, + VK_WHOLE_SIZE, + 0, + &pMappedData); + if(res < 0) + { + VMA_DEBUG_LOG(" vkMapMemory FAILED"); + FreeVulkanMemory(memTypeIndex, size, hMemory); + return res; + } + } + + *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); + (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); + if (isUserDataString) + (*pAllocation)->SetName(this, (const char*)pUserData); + else + (*pAllocation)->SetUserData(this, pUserData); + m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::GetBufferMemoryRequirements( + VkBuffer hBuffer, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.buffer = hBuffer; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +void VmaAllocator_T::GetImageMemoryRequirements( + VkImage hImage, + VkMemoryRequirements& memReq, + bool& requiresDedicatedAllocation, + bool& prefersDedicatedAllocation) const +{ +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + { + VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + memReqInfo.image = hImage; + + VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + + VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VmaPnextChainPushFront(&memReq2, &memDedicatedReq); + + (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); + + memReq = memReq2.memoryRequirements; + requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + } + else +#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + { + (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); + requiresDedicatedAllocation = false; + prefersDedicatedAllocation = false; + } +} + +VkResult VmaAllocator_T::FindMemoryTypeIndex( + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkFlags bufImgUsage, + uint32_t* pMemoryTypeIndex) const +{ + memoryTypeBits &= GetGlobalMemoryTypeBits(); + + if(pAllocationCreateInfo->memoryTypeBits != 0) + { + memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; + } + + VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; + if(!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + *pMemoryTypeIndex = UINT32_MAX; + uint32_t minCost = UINT32_MAX; + for(uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) + { + // This memory type is acceptable according to memoryTypeBits bitmask. + if((memTypeBit & memoryTypeBits) != 0) + { + const VkMemoryPropertyFlags currFlags = + m_MemProps.memoryTypes[memTypeIndex].propertyFlags; + // This memory type contains requiredFlags. + if((requiredFlags & ~currFlags) == 0) + { + // Calculate cost as number of bits from preferredFlags not present in this memory type. + uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); + // Remember memory type with lowest cost. + if(currCost < minCost) + { + *pMemoryTypeIndex = memTypeIndex; + if(currCost == 0) + { + return VK_SUCCESS; + } + minCost = currCost; + } + } + } + } + return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT; +} + +VkResult VmaAllocator_T::CalcMemTypeParams( + VmaAllocationCreateInfo& inoutCreateInfo, + uint32_t memTypeIndex, + VkDeviceSize size, + size_t allocationCount) +{ + // If memory type is not HOST_VISIBLE, disable MAPPED. + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + { + inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) + { + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); + VmaBudget heapBudget = {}; + GetHeapBudgets(&heapBudget, heapIndex, 1); + if(heapBudget.usage + size * allocationCount > heapBudget.budget) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + } + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::CalcAllocationParams( + VmaAllocationCreateInfo& inoutCreateInfo, + bool dedicatedRequired, + bool dedicatedPreferred) +{ + VMA_ASSERT((inoutCreateInfo.flags & + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + { + VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + } + } + + // If memory is lazily allocated, it should be always dedicated. + if(dedicatedRequired || + inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + if(inoutCreateInfo.pool != VK_NULL_HANDLE) + { + if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); + } + + if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && + (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; + } + + // Non-auto USAGE values imply HOST_ACCESS flags. + // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. + // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. + // Otherwise they just protect from assert on mapping. + if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && + inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + { + if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + { + inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; + } + } + + return VK_SUCCESS; +} + +VkResult VmaAllocator_T::AllocateMemory( + const VkMemoryRequirements& vkMemReq, + bool requiresDedicatedAllocation, + bool prefersDedicatedAllocation, + VkBuffer dedicatedBuffer, + VkImage dedicatedImage, + VkFlags dedicatedBufferImageUsage, + const VmaAllocationCreateInfo& createInfo, + VmaSuballocationType suballocType, + size_t allocationCount, + VmaAllocation* pAllocations) +{ + memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); + + VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); + + if(vkMemReq.size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VmaAllocationCreateInfo createInfoFinal = createInfo; + VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); + if(res != VK_SUCCESS) + return res; + + if(createInfoFinal.pool != VK_NULL_HANDLE) + { + VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + return AllocateMemoryOfType( + createInfoFinal.pool, + vkMemReq.size, + vkMemReq.alignment, + prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + blockVector.GetMemoryTypeIndex(), + suballocType, + createInfoFinal.pool->m_DedicatedAllocations, + blockVector, + allocationCount, + pAllocations); + } + else + { + // Bit mask of memory Vulkan types acceptable for this allocation. + uint32_t memoryTypeBits = vkMemReq.memoryTypeBits; + uint32_t memTypeIndex = UINT32_MAX; + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. + if(res != VK_SUCCESS) + return res; + do + { + VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); + res = AllocateMemoryOfType( + VK_NULL_HANDLE, + vkMemReq.size, + vkMemReq.alignment, + requiresDedicatedAllocation || prefersDedicatedAllocation, + dedicatedBuffer, + dedicatedImage, + dedicatedBufferImageUsage, + createInfoFinal, + memTypeIndex, + suballocType, + m_DedicatedAllocations[memTypeIndex], + *blockVector, + allocationCount, + pAllocations); + // Allocation succeeded + if(res == VK_SUCCESS) + return VK_SUCCESS; + + // Remove old memTypeIndex from list of possibilities. + memoryTypeBits &= ~(1u << memTypeIndex); + // Find alternative memTypeIndex. + res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); + } while(res == VK_SUCCESS); + + // No other matching memory type index could be found. + // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } +} + +void VmaAllocator_T::FreeMemory( + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + VMA_ASSERT(pAllocations); + + for(size_t allocIndex = allocationCount; allocIndex--; ) + { + VmaAllocation allocation = pAllocations[allocIndex]; + + if(allocation != VK_NULL_HANDLE) + { + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + { + FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); + } + + allocation->FreeName(this); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector* pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetParentPool(); + if(hPool != VK_NULL_HANDLE) + { + pBlockVector = &hPool->m_BlockVector; + } + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); + } + pBlockVector->Free(allocation); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + FreeDedicatedMemory(allocation); + break; + default: + VMA_ASSERT(0); + } + } + } +} + +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) +{ + // Initialize. + VmaClearDetailedStatistics(pStats->total); + for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + VmaClearDetailedStatistics(pStats->memoryType[i]); + for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + VmaClearDetailedStatistics(pStats->memoryHeap[i]); + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if (pBlockVector != VMA_NULL) + pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); + blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + } + + // Process dedicated allocations. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); + } + + // Sum from memory types to memory heaps. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; + VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); + } + + // Sum from memory heaps to total. + for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); + + VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + VMA_ASSERT(pStats->total.unusedRangeCount == 0 || + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); +} + +void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) +{ +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + if(m_Budget.m_OperationsSinceBudgetFetch < 30) + { + VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + { + outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + else + { + outBudgets->usage = 0; + } + + // Have to take MIN with heap size because explicit HeapSizeLimit is included in it. + outBudgets->budget = VMA_MIN( + m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size); + } + } + else + { + UpdateVulkanBudget(); // Outside of mutex lock + GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion + } + } + else +#endif + { + for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + { + const uint32_t heapIndex = firstHeap + i; + + outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex]; + outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex]; + outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; + outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; + + outBudgets->usage = outBudgets->statistics.blockBytes; + outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + } +} + +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +{ + pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); + pAllocationInfo->deviceMemory = hAllocation->GetMemory(); + pAllocationInfo->offset = hAllocation->GetOffset(); + pAllocationInfo->size = hAllocation->GetSize(); + pAllocationInfo->pMappedData = hAllocation->GetMappedData(); + pAllocationInfo->pUserData = hAllocation->GetUserData(); + pAllocationInfo->pName = hAllocation->GetName(); +} + +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +{ + VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); + + VmaPoolCreateInfo newCreateInfo = *pCreateInfo; + + // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. + if(pCreateInfo->pMemoryAllocateNext) + { + VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); + } + + if(newCreateInfo.maxBlockCount == 0) + { + newCreateInfo.maxBlockCount = SIZE_MAX; + } + if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + // Memory type index out of range or forbidden. + if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) + { + return VK_ERROR_FEATURE_NOT_PRESENT; + } + if(newCreateInfo.minAllocationAlignment > 0) + { + VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); + } + + const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex); + + *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); + + VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); + if(res != VK_SUCCESS) + { + vma_delete(this, *pPool); + *pPool = VMA_NULL; + return res; + } + + // Add to m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + (*pPool)->SetId(m_NextPoolId++); + m_Pools.PushBack(*pPool); + } + + return VK_SUCCESS; +} + +void VmaAllocator_T::DestroyPool(VmaPool pool) +{ + // Remove from m_Pools. + { + VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex); + m_Pools.Remove(pool); + } + + vma_delete(this, pool); +} + +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) +{ + VmaClearStatistics(*pPoolStats); + pool->m_BlockVector.AddStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); +} + +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +{ + VmaClearDetailedStatistics(*pPoolStats); + pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); + pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats); +} + +void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) +{ + m_CurrentFrameIndex.store(frameIndex); + +#if VMA_MEMORY_BUDGET + if(m_UseExtMemoryBudget) + { + UpdateVulkanBudget(); + } +#endif // #if VMA_MEMORY_BUDGET +} + +VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool) +{ + return hPool->m_BlockVector.CheckCorruption(); +} + +VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) +{ + VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; + + // Process default pools. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + if(pBlockVector != VMA_NULL) + { + VkResult localRes = pBlockVector->CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + + // Process custom pools. + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + { + VkResult localRes = pool->m_BlockVector.CheckCorruption(); + switch(localRes) + { + case VK_ERROR_FEATURE_NOT_PRESENT: + break; + case VK_SUCCESS: + finalRes = VK_SUCCESS; + break; + default: + return localRes; + } + } + } + } + + return finalRes; +} + +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +{ + AtomicTransactionalIncrement deviceMemoryCountIncrement; + const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); +#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT + if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) + { + return VK_ERROR_TOO_MANY_OBJECTS; + } +#endif + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); + + // HeapSizeLimit is in effect for this heap. + if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + { + const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; + VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; + for(;;) + { + const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; + if(blockBytesAfterAllocation > heapSize) + { + return VK_ERROR_OUT_OF_DEVICE_MEMORY; + } + if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + { + break; + } + } + } + else + { + m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize; + } + ++m_Budget.m_BlockCount[heapIndex]; + + // VULKAN CALL vkAllocateMemory. + VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); + + if(res == VK_SUCCESS) + { +#if VMA_MEMORY_BUDGET + ++m_Budget.m_OperationsSinceBudgetFetch; +#endif + + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); + } + + deviceMemoryCountIncrement.Commit(); + } + else + { + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize; + } + + return res; +} + +void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) +{ + // Informative callback. + if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + { + (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); + } + + // VULKAN CALL vkFreeMemory. + (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks()); + + const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType); + --m_Budget.m_BlockCount[heapIndex]; + m_Budget.m_BlockBytes[heapIndex] -= size; + + --m_DeviceMemoryCount; +} + +VkResult VmaAllocator_T::BindVulkanBuffer( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkBuffer buffer, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) + { + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.buffer = buffer; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::BindVulkanImage( + VkDeviceMemory memory, + VkDeviceSize memoryOffset, + VkImage image, + const void* pNext) +{ + if(pNext != VMA_NULL) + { +#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 + if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) + { + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + bindBufferMemoryInfo.pNext = pNext; + bindBufferMemoryInfo.image = image; + bindBufferMemoryInfo.memory = memory; + bindBufferMemoryInfo.memoryOffset = memoryOffset; + return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo); + } + else +#endif // #if VMA_BIND_MEMORY2 + { + return VK_ERROR_EXTENSION_NOT_PRESENT; + } + } + else + { + return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset); + } +} + +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void**)&pBytes); + if(res == VK_SUCCESS) + { + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); + } + return res; + } + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + return hAllocation->DedicatedAllocMap(this, ppData); + default: + VMA_ASSERT(0); + return VK_ERROR_MEMORY_MAP_FAILED; + } +} + +void VmaAllocator_T::Unmap(VmaAllocation hAllocation) +{ + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + hAllocation->DedicatedAllocUnmap(this); + break; + default: + VMA_ASSERT(0); + } +} + +VkResult VmaAllocator_T::BindBufferMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkBuffer hBuffer, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); + res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::BindImageMemory( + VmaAllocation hAllocation, + VkDeviceSize allocationLocalOffset, + VkImage hImage, + const void* pNext) +{ + VkResult res = VK_ERROR_UNKNOWN; + switch(hAllocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); + res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); + break; + } + default: + VMA_ASSERT(0); + } + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocation( + VmaAllocation hAllocation, + VkDeviceSize offset, VkDeviceSize size, + VMA_CACHE_OPERATION op) +{ + VkResult res = VK_SUCCESS; + + VkMappedMemoryRange memRange = {}; + if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +VkResult VmaAllocator_T::FlushOrInvalidateAllocations( + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, const VkDeviceSize* sizes, + VMA_CACHE_OPERATION op) +{ + typedef VmaStlAllocator RangeAllocator; + typedef VmaSmallVector RangeVector; + RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); + + for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + { + const VmaAllocation alloc = allocations[allocIndex]; + const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; + const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; + VkMappedMemoryRange newRange; + if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + { + ranges.push_back(newRange); + } + } + + VkResult res = VK_SUCCESS; + if(!ranges.empty()) + { + switch(op) + { + case VMA_CACHE_FLUSH: + res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + case VMA_CACHE_INVALIDATE: + res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); + break; + default: + VMA_ASSERT(0); + } + } + // else: Just ignore this call. + return res; +} + +void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) +{ + VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); + + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + VmaPool parentPool = allocation->GetParentPool(); + if(parentPool == VK_NULL_HANDLE) + { + // Default pool + m_DedicatedAllocations[memTypeIndex].Unregister(allocation); + } + else + { + // Custom pool + parentPool->m_DedicatedAllocations.Unregister(allocation); + } + + VkDeviceMemory hMemory = allocation->GetMemory(); + + /* + There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory + before vkFreeMemory. + + if(allocation->GetMappedData() != VMA_NULL) + { + (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory); + } + */ + + FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory); + + m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize()); + m_AllocationObjectAllocator.Free(allocation); + + VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex); +} + +uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const +{ + VkBufferCreateInfo dummyBufCreateInfo; + VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo); + + uint32_t memoryTypeBits = 0; + + // Create buffer. + VkBuffer buf = VK_NULL_HANDLE; + VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( + m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); + if(res == VK_SUCCESS) + { + // Query for supported memory types. + VkMemoryRequirements memReq; + (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq); + memoryTypeBits = memReq.memoryTypeBits; + + // Destroy buffer. + (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks()); + } + + return memoryTypeBits; +} + +uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const +{ + // Make sure memory information is already fetched. + VMA_ASSERT(GetMemoryTypeCount() > 0); + + uint32_t memoryTypeBits = UINT32_MAX; + + if(!m_UseAmdDeviceCoherentMemory) + { + // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. + for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + { + memoryTypeBits &= ~(1u << memTypeIndex); + } + } + } + + return memoryTypeBits; +} + +bool VmaAllocator_T::GetFlushOrInvalidateRange( + VmaAllocation allocation, + VkDeviceSize offset, VkDeviceSize size, + VkMappedMemoryRange& outRange) const +{ + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + { + const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; + const VkDeviceSize allocationSize = allocation->GetSize(); + VMA_ASSERT(offset <= allocationSize); + + outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE; + outRange.pNext = VMA_NULL; + outRange.memory = allocation->GetMemory(); + + switch(allocation->GetType()) + { + case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + outRange.size = allocationSize - outRange.offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + outRange.size = VMA_MIN( + VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize), + allocationSize - outRange.offset); + } + break; + case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + // 1. Still within this allocation. + outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); + if(size == VK_WHOLE_SIZE) + { + size = allocationSize - offset; + } + else + { + VMA_ASSERT(offset + size <= allocationSize); + } + outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize); + + // 2. Adjust to whole block. + const VkDeviceSize allocationOffset = allocation->GetOffset(); + VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0); + const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize(); + outRange.offset += allocationOffset; + outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset); + + break; + } + default: + VMA_ASSERT(0); + } + return true; + } + return false; +} + +#if VMA_MEMORY_BUDGET +void VmaAllocator_T::UpdateVulkanBudget() +{ + VMA_ASSERT(m_UseExtMemoryBudget); + + VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VmaPnextChainPushFront(&memProps, &budgetProps); + + GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); + + { + VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); + + for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + { + m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; + m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; + m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); + + // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. + if(m_Budget.m_VulkanBudget[heapIndex] == 0) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. + } + else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + { + m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; + } + if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + { + m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + } + } + m_Budget.m_OperationsSinceBudgetFetch = 0; + } +} +#endif // VMA_MEMORY_BUDGET + +void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) +{ + if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + hAllocation->IsMappingAllowed() && + (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + { + void* pData = VMA_NULL; + VkResult res = Map(hAllocation, &pData); + if(res == VK_SUCCESS) + { + memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); + FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); + Unmap(hAllocation); + } + else + { + VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation."); + } + } +} + +uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() +{ + uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); + if(memoryTypeBits == UINT32_MAX) + { + memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); + m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); + } + return memoryTypeBits; +} + +#if VMA_STATS_STRING_ENABLED +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +{ + json.WriteString("DefaultPools"); + json.BeginObject(); + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + if (pBlockVector != VMA_NULL) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("PreferredBlockSize"); + json.WriteNumber(pBlockVector->GetPreferredBlockSize()); + + json.WriteString("Blocks"); + pBlockVector->PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + dedicatedAllocList.BuildStatsString(json); + } + json.EndObject(); + } + } + } + json.EndObject(); + + json.WriteString("CustomPools"); + json.BeginObject(); + { + VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); + if (!m_Pools.IsEmpty()) + { + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + { + bool displayType = true; + size_t index = 0; + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + { + VmaBlockVector& blockVector = pool->m_BlockVector; + if (blockVector.GetMemoryTypeIndex() == memTypeIndex) + { + if (displayType) + { + json.BeginString("Type "); + json.ContinueString(memTypeIndex); + json.EndString(); + json.BeginArray(); + displayType = false; + } + + json.BeginObject(); + { + json.WriteString("Name"); + json.BeginString(); + json.ContinueString((uint64_t)index++); + if (pool->GetName()) + { + json.ContinueString(" - "); + json.ContinueString(pool->GetName()); + } + json.EndString(); + + json.WriteString("PreferredBlockSize"); + json.WriteNumber(blockVector.GetPreferredBlockSize()); + + json.WriteString("Blocks"); + blockVector.PrintDetailedMap(json); + + json.WriteString("DedicatedAllocations"); + pool->m_DedicatedAllocations.BuildStatsString(json); + } + json.EndObject(); + } + } + + if (!displayType) + json.EndArray(); + } + } + } + json.EndObject(); +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_ALLOCATOR_T_FUNCTIONS + + +#ifndef _VMA_PUBLIC_INTERFACE +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo* pCreateInfo, + VmaAllocator* pAllocator) +{ + VMA_ASSERT(pCreateInfo && pAllocator); + VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); + VMA_DEBUG_LOG("vmaCreateAllocator"); + *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); + VkResult result = (*pAllocator)->Init(pCreateInfo); + if(result < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); + *pAllocator = VK_NULL_HANDLE; + } + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator allocator) +{ + if(allocator != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyAllocator"); + VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, allocator); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +{ + VMA_ASSERT(allocator && pAllocatorInfo); + pAllocatorInfo->instance = allocator->m_hInstance; + pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice(); + pAllocatorInfo->device = allocator->m_hDevice; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator allocator, + const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceProperties); + *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator allocator, + const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) +{ + VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); + *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags* pFlags) +{ + VMA_ASSERT(allocator && pFlags); + VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); + *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator allocator, + uint32_t frameIndex) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->SetCurrentFrameIndex(frameIndex); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator allocator, + VmaTotalStatistics* pStats) +{ + VMA_ASSERT(allocator && pStats); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->CalculateStatistics(pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator allocator, + VmaBudget* pBudgets) +{ + VMA_ASSERT(allocator && pBudgets); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount()); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator allocator, + char** ppStatsString, + VkBool32 detailedMap) +{ + VMA_ASSERT(allocator && ppStatsString); + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VmaStringBuilder sb(allocator->GetAllocationCallbacks()); + { + VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; + allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount()); + + VmaTotalStatistics stats; + allocator->CalculateStatistics(&stats); + + VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb); + json.BeginObject(); + { + json.WriteString("General"); + json.BeginObject(); + { + const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; + + json.WriteString("API"); + json.WriteString("Vulkan"); + + json.WriteString("apiVersion"); + json.BeginString(); + json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion)); + json.ContinueString("."); + json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion)); + json.EndString(); + + json.WriteString("GPU"); + json.WriteString(deviceProperties.deviceName); + json.WriteString("deviceType"); + json.WriteNumber(static_cast(deviceProperties.deviceType)); + + json.WriteString("maxMemoryAllocationCount"); + json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount); + json.WriteString("bufferImageGranularity"); + json.WriteNumber(deviceProperties.limits.bufferImageGranularity); + json.WriteString("nonCoherentAtomSize"); + json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize); + + json.WriteString("memoryHeapCount"); + json.WriteNumber(memoryProperties.memoryHeapCount); + json.WriteString("memoryTypeCount"); + json.WriteNumber(memoryProperties.memoryTypeCount); + } + json.EndObject(); + } + { + json.WriteString("Total"); + VmaPrintDetailedStatistics(json, stats.total); + } + { + json.WriteString("MemoryInfo"); + json.BeginObject(); + { + for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex) + { + json.BeginString("Heap "); + json.ContinueString(heapIndex); + json.EndString(); + json.BeginObject(); + { + const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + json.WriteString("Flags"); + json.BeginArray(true); + { + if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + #if VMA_VULKAN_VERSION >= 1001000 + if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) + json.WriteString("MULTI_INSTANCE"); + #endif + + VkMemoryHeapFlags flags = heapInfo.flags & + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT + #endif + ); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Size"); + json.WriteNumber(heapInfo.size); + + json.WriteString("Budget"); + json.BeginObject(); + { + json.WriteString("BudgetBytes"); + json.WriteNumber(budgets[heapIndex].budget); + json.WriteString("UsageBytes"); + json.WriteNumber(budgets[heapIndex].usage); + } + json.EndObject(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]); + + json.WriteString("MemoryPools"); + json.BeginObject(); + { + for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex) + { + if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex) + { + json.BeginString("Type "); + json.ContinueString(typeIndex); + json.EndString(); + json.BeginObject(); + { + json.WriteString("Flags"); + json.BeginArray(true); + { + VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags; + if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) + json.WriteString("DEVICE_LOCAL"); + if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) + json.WriteString("HOST_VISIBLE"); + if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) + json.WriteString("HOST_COHERENT"); + if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT) + json.WriteString("HOST_CACHED"); + if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) + json.WriteString("LAZILY_ALLOCATED"); + #if VMA_VULKAN_VERSION >= 1001000 + if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) + json.WriteString("PROTECTED"); + #endif + #if VK_AMD_device_coherent_memory + if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) + json.WriteString("DEVICE_COHERENT_AMD"); + if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) + json.WriteString("DEVICE_UNCACHED_AMD"); + #endif + + flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT + #if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT + #endif + #if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY + | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY + #endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT + | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT + | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); + if (flags != 0) + json.WriteNumber(flags); + } + json.EndArray(); + + json.WriteString("Stats"); + VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]); + } + json.EndObject(); + } + } + + } + json.EndObject(); + } + json.EndObject(); + } + } + json.EndObject(); + } + + if (detailedMap == VK_TRUE) + allocator->PrintDetailedMap(json); + + json.EndObject(); + } + + *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator allocator, + char* pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(allocator); + VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); + } +} + +#endif // VMA_STATS_STRING_ENABLED + +/* +This function is not protected by any mutex because it just reads immutable data. +*/ +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pBufferCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceBufferMemoryRequirements) + { + // Can query straight from VkBufferCreateInfo :) + VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS}; + devBufMemReq.pCreateInfo = pBufferCreateInfo; + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy buffer to query :( + VkBuffer hBuffer = VK_NULL_HANDLE; + res = funcs->vkCreateBuffer( + hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex); + + funcs->vkDestroyBuffer( + hDev, hBuffer, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + uint32_t* pMemoryTypeIndex) +{ + VMA_ASSERT(allocator != VK_NULL_HANDLE); + VMA_ASSERT(pImageCreateInfo != VMA_NULL); + VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); + VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); + + const VkDevice hDev = allocator->m_hDevice; + const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + VkResult res; + +#if VMA_VULKAN_VERSION >= 1003000 + if(funcs->vkGetDeviceImageMemoryRequirements) + { + // Can query straight from VkImageCreateInfo :) + VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS}; + devImgMemReq.pCreateInfo = pImageCreateInfo; + VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + + VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; + (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); + } + else +#endif // #if VMA_VULKAN_VERSION >= 1003000 + { + // Must create a dummy image to query :( + VkImage hImage = VK_NULL_HANDLE; + res = funcs->vkCreateImage( + hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); + if(res == VK_SUCCESS) + { + VkMemoryRequirements memReq = {}; + funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); + + res = allocator->FindMemoryTypeIndex( + memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex); + + funcs->vkDestroyImage( + hDev, hImage, allocator->GetAllocationCallbacks()); + } + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator allocator, + const VmaPoolCreateInfo* pCreateInfo, + VmaPool* pPool) +{ + VMA_ASSERT(allocator && pCreateInfo && pPool); + + VMA_DEBUG_LOG("vmaCreatePool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CreatePool(pCreateInfo, pPool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator allocator, + VmaPool pool) +{ + VMA_ASSERT(allocator); + + if(pool == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyPool"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->DestroyPool(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetPoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator allocator, + VmaPool pool, + VmaDetailedStatistics* pPoolStats) +{ + VMA_ASSERT(allocator && pool && pPoolStats); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->CalculatePoolStatistics(pool, pPoolStats); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VMA_DEBUG_LOG("vmaCheckPoolCorruption"); + + return allocator->CheckPoolCorruption(pool); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char** ppName) +{ + VMA_ASSERT(allocator && pool && ppName); + + VMA_DEBUG_LOG("vmaGetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *ppName = pool->GetName(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator allocator, + VmaPool pool, + const char* pName) +{ + VMA_ASSERT(allocator && pool); + + VMA_DEBUG_LOG("vmaSetPoolName"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + pool->SetName(pName); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator allocator, + const VkMemoryRequirements* pVkMemoryRequirements, + const VmaAllocationCreateInfo* pCreateInfo, + size_t allocationCount, + VmaAllocation* pAllocations, + VmaAllocationInfo* pAllocationInfo) +{ + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations); + + VMA_DEBUG_LOG("vmaAllocateMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkResult result = allocator->AllocateMemory( + *pVkMemoryRequirements, + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation + VK_NULL_HANDLE, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_UNKNOWN, + allocationCount, + pAllocations); + + if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + { + for(size_t i = 0; i < allocationCount; ++i) + { + allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); + } + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator allocator, + VkBuffer buffer, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(buffer, vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + buffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator allocator, + VkImage image, + const VmaAllocationCreateInfo* pCreateInfo, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); + + VMA_DEBUG_LOG("vmaAllocateMemoryForImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(image, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + VkResult result = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + image, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage + *pCreateInfo, + VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, + 1, // allocationCount + pAllocation); + + if(pAllocationInfo && result == VK_SUCCESS) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return result; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaFreeMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory( + 1, // allocationCount + &allocation); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator allocator, + size_t allocationCount, + const VmaAllocation* pAllocations) +{ + if(allocationCount == 0) + { + return; + } + + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaFreeMemoryPages"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->FreeMemory(allocationCount, pAllocations); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator allocator, + VmaAllocation allocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && allocation && pAllocationInfo); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->GetAllocationInfo(allocation, pAllocationInfo); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator allocator, + VmaAllocation allocation, + void* pUserData) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocation->SetUserData(allocator, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char* VMA_NULLABLE pName) +{ + allocation->SetName(allocator, pName); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) +{ + VMA_ASSERT(allocator && allocation && pFlags); + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator allocator, + VmaAllocation allocation, + void** ppData) +{ + VMA_ASSERT(allocator && allocation && ppData); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->Map(allocation, ppData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator allocator, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + allocator->Unmap(allocation); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaFlushAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize offset, + VkDeviceSize size) +{ + VMA_ASSERT(allocator && allocation); + + VMA_DEBUG_LOG("vmaInvalidateAllocation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaFlushAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator allocator, + uint32_t allocationCount, + const VmaAllocation* allocations, + const VkDeviceSize* offsets, + const VkDeviceSize* sizes) +{ + VMA_ASSERT(allocator); + + if(allocationCount == 0) + { + return VK_SUCCESS; + } + + VMA_ASSERT(allocations); + + VMA_DEBUG_LOG("vmaInvalidateAllocations"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE); + + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator allocator, + uint32_t memoryTypeBits) +{ + VMA_ASSERT(allocator); + + VMA_DEBUG_LOG("vmaCheckCorruption"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->CheckCorruption(memoryTypeBits); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator allocator, + const VmaDefragmentationInfo* pInfo, + VmaDefragmentationContext* pContext) +{ + VMA_ASSERT(allocator && pInfo && pContext); + + VMA_DEBUG_LOG("vmaBeginDefragmentation"); + + if (pInfo->pool != VMA_NULL) + { + // Check if run on supported algorithms + if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) + return VK_ERROR_FEATURE_NOT_PRESENT; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo); + return VK_SUCCESS; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator allocator, + VmaDefragmentationContext context, + VmaDefragmentationStats* pStats) +{ + VMA_ASSERT(allocator && context); + + VMA_DEBUG_LOG("vmaEndDefragmentation"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if (pStats) + context->GetStats(*pStats); + vma_delete(allocator, context); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaBeginDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassBegin(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) +{ + VMA_ASSERT(context && pPassInfo); + + VMA_DEBUG_LOG("vmaEndDefragmentationPass"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return context->DefragmentPassEnd(*pPassInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkBuffer buffer) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer buffer, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && buffer); + + VMA_DEBUG_LOG("vmaBindBufferMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator allocator, + VmaAllocation allocation, + VkImage image) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, 0, image, VMA_NULL); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator allocator, + VmaAllocation allocation, + VkDeviceSize allocationLocalOffset, + VkImage image, + const void* pNext) +{ + VMA_ASSERT(allocator && allocation && image); + + VMA_DEBUG_LOG("vmaBindImageMemory2"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + pBufferCreateInfo->usage, // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator allocator, + const VkBufferCreateInfo* pBufferCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer* pBuffer, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); + + if(pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateBufferWithAlignment"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pBuffer = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if(res >= 0) + { + // 2. vkGetBufferMemoryRequirements. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + // 2a. Include minAlignment + vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); + + // 3. Allocate memory using allocator. + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage + pBufferCreateInfo->usage, // dedicatedBufferImageUsage + *pAllocationCreateInfo, + VMA_SUBALLOCATION_TYPE_BUFFER, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind buffer with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + *pBuffer = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) +{ + VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); + VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize()); + + VMA_DEBUG_LOG("vmaCreateAliasingBuffer2"); + + *pBuffer = VK_NULL_HANDLE; + + if (pBufferCreateInfo->size == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + !allocator->m_UseKhrBufferDeviceAddress) + { + VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkBuffer. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)( + allocator->m_hDevice, + pBufferCreateInfo, + allocator->GetAllocationCallbacks(), + pBuffer); + if (res >= 0) + { + // 2. Bind buffer with memory. + res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator allocator, + VkBuffer buffer, + VmaAllocation allocation) +{ + VMA_ASSERT(allocator); + + if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyBuffer"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(buffer != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); + } + + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator allocator, + const VkImageCreateInfo* pImageCreateInfo, + const VmaAllocationCreateInfo* pAllocationCreateInfo, + VkImage* pImage, + VmaAllocation* pAllocation, + VmaAllocationInfo* pAllocationInfo) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); + + if(pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_LOG("vmaCreateImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + *pImage = VK_NULL_HANDLE; + *pAllocation = VK_NULL_HANDLE; + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if(res >= 0) + { + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? + VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : + VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + + // 2. Allocate memory using allocator. + VkMemoryRequirements vkMemReq = {}; + bool requiresDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; + allocator->GetImageMemoryRequirements(*pImage, vkMemReq, + requiresDedicatedAllocation, prefersDedicatedAllocation); + + res = allocator->AllocateMemory( + vkMemReq, + requiresDedicatedAllocation, + prefersDedicatedAllocation, + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage + pImageCreateInfo->usage, // dedicatedBufferImageUsage + *pAllocationCreateInfo, + suballocType, + 1, // allocationCount + pAllocation); + + if(res >= 0) + { + // 3. Bind image with memory. + if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + { + res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); + } + if(res >= 0) + { + // All steps succeeded. + #if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); + #endif + if(pAllocationInfo != VMA_NULL) + { + allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); + } + + return VK_SUCCESS; + } + allocator->FreeMemory( + 1, // allocationCount + pAllocation); + *pAllocation = VK_NULL_HANDLE; + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + *pImage = VK_NULL_HANDLE; + return res; + } + return res; +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) +{ + VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); + + *pImage = VK_NULL_HANDLE; + + VMA_DEBUG_LOG("vmaCreateImage2"); + + if (pImageCreateInfo->extent.width == 0 || + pImageCreateInfo->extent.height == 0 || + pImageCreateInfo->extent.depth == 0 || + pImageCreateInfo->mipLevels == 0 || + pImageCreateInfo->arrayLayers == 0) + { + return VK_ERROR_INITIALIZATION_FAILED; + } + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + // 1. Create VkImage. + VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)( + allocator->m_hDevice, + pImageCreateInfo, + allocator->GetAllocationCallbacks(), + pImage); + if (res >= 0) + { + // 2. Bind image with memory. + res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL); + if (res >= 0) + { + return VK_SUCCESS; + } + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks()); + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation) +{ + VMA_ASSERT(allocator); + + if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + { + return; + } + + VMA_DEBUG_LOG("vmaDestroyImage"); + + VMA_DEBUG_GLOBAL_MUTEX_LOCK + + if(image != VK_NULL_HANDLE) + { + (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); + } + if(allocation != VK_NULL_HANDLE) + { + allocator->FreeMemory( + 1, // allocationCount + &allocation); + } +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) +{ + VMA_ASSERT(pCreateInfo && pVirtualBlock); + VMA_ASSERT(pCreateInfo->size > 0); + VMA_DEBUG_LOG("vmaCreateVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); + VkResult res = (*pVirtualBlock)->Init(); + if(res < 0) + { + vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); + *pVirtualBlock = VK_NULL_HANDLE; + } + return res; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) +{ + if(virtualBlock != VK_NULL_HANDLE) + { + VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying. + vma_delete(&allocationCallbacks, virtualBlock); + } +} + +VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE; +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo); +} + +VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, + VkDeviceSize* VMA_NULLABLE pOffset) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); + VMA_DEBUG_LOG("vmaVirtualAllocate"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) +{ + if(allocation != VK_NULL_HANDLE) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaVirtualFree"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Free(allocation); + } +} + +VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaClearVirtualBlock"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->Clear(); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->SetAllocationUserData(allocation, pUserData); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->GetStatistics(*pStats); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics* VMA_NOT_NULL pStats) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); + VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + virtualBlock->CalculateDetailedStatistics(*pStats); +} + +#if VMA_STATS_STRING_ENABLED + +VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) +{ + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); + VmaStringBuilder sb(allocationCallbacks); + virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); + *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); +} + +VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char* VMA_NULLABLE pStatsString) +{ + if(pStatsString != VMA_NULL) + { + VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); + VMA_DEBUG_GLOBAL_MUTEX_LOCK; + VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString); + } +} +#endif // VMA_STATS_STRING_ENABLED +#endif // _VMA_PUBLIC_INTERFACE +#endif // VMA_IMPLEMENTATION + +/** +\page quick_start Quick start + +\section quick_start_project_setup Project setup + +Vulkan Memory Allocator comes in form of a "stb-style" single header file. +You don't need to build it as a separate library project. +You can add this file directly to your project and submit it to code repository next to your other source files. + +"Single header" doesn't mean that everything is contained in C/C++ declarations, +like it tends to be in case of inline functions or C++ templates. +It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro. +If you don't do it properly, you will get linker errors. + +To do it properly: + +-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library. + This includes declarations of all members of the library. +-# In exactly one CPP file define following macro before this include. + It enables also internal definitions. + +\code +#define VMA_IMPLEMENTATION +#include "vk_mem_alloc.h" +\endcode + +It may be a good idea to create dedicated CPP file just for this purpose. + +This library includes header ``, which in turn +includes `` on Windows. If you need some specific macros defined +before including these headers (like `WIN32_LEAN_AND_MEAN` or +`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define +them before every `#include` of this library. + +This library is written in C++, but has C-compatible interface. +Thus you can include and use vk_mem_alloc.h in C or C++ code, but full +implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C. +Some features of C++14 are used. STL containers, RTTI, or C++ exceptions are not used. + + +\section quick_start_initialization Initialization + +At program startup: + +-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object. +-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by + calling vmaCreateAllocator(). + +Only members `physicalDevice`, `device`, `instance` are required. +However, you should inform the library which Vulkan version do you use by setting +VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable +by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address). +Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions. + +\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version + +VMA supports Vulkan version down to 1.0, for backward compatibility. +If you want to use higher version, you need to inform the library about it. +This is a two-step process. + +Step 1: Compile time. By default, VMA compiles with code supporting the highest +Vulkan version found in the included `` that is also supported by the library. +If this is OK, you don't need to do anything. +However, if you want to compile VMA as if only some lower Vulkan version was available, +define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`. +It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version. +For example, to compile against Vulkan 1.2: + +\code +#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2 +#include "vk_mem_alloc.h" +\endcode + +Step 2: Runtime. Even when compiled with higher Vulkan version available, +VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object. +By default, only Vulkan 1.0 is used. +To initialize the allocator with support for higher Vulkan version, you need to set member +VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`. +See code sample below. + +\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions + +You may need to configure importing Vulkan functions. There are 3 ways to do this: + +-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows): + - You don't need to do anything. + - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default. +-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`, + `vkGetDeviceProcAddr` (this is the option presented in the example below): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1. + - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr, + VmaVulkanFunctions::vkGetDeviceProcAddr. + - The library will fetch pointers to all other functions it needs internally. +-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like + [Volk](https://github.com/zeux/volk): + - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0. + - Pass these pointers via structure #VmaVulkanFunctions. + +Example for case 2: + +\code +#define VMA_STATIC_VULKAN_FUNCTIONS 0 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#include "vk_mem_alloc.h" + +... + +VmaVulkanFunctions vulkanFunctions = {}; +vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr; +vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr; + +VmaAllocatorCreateInfo allocatorCreateInfo = {}; +allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2; +allocatorCreateInfo.physicalDevice = physicalDevice; +allocatorCreateInfo.device = device; +allocatorCreateInfo.instance = instance; +allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions; + +VmaAllocator allocator; +vmaCreateAllocator(&allocatorCreateInfo, &allocator); +\endcode + + +\section quick_start_resource_allocation Resource allocation + +When you want to create a buffer or image: + +-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure. +-# Fill VmaAllocationCreateInfo structure. +-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory + already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +Don't forget to destroy your objects when no longer needed: + +\code +vmaDestroyBuffer(allocator, buffer, allocation); +vmaDestroyAllocator(allocator); +\endcode + + +\page choosing_memory_type Choosing memory type + +Physical devices in Vulkan support various combinations of memory heaps and +types. Help with choosing correct and optimal memory type for your specific +resource is one of the key features of this library. You can use it by filling +appropriate members of VmaAllocationCreateInfo structure, as described below. +You can also combine multiple methods. + +-# If you just want to find memory type index that meets your requirements, you + can use function: vmaFindMemoryTypeIndexForBufferInfo(), + vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex(). +-# If you want to allocate a region of device memory without association with any + specific image or buffer, you can use function vmaAllocateMemory(). Usage of + this function is not recommended and usually not needed. + vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once, + which may be useful for sparse binding. +-# If you already have a buffer or an image created, you want to allocate memory + for it and then you will bind it yourself, you can use function + vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(). + For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory() + or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2(). +-# **This is the easiest and recommended way to use this library:** + If you want to create a buffer or an image, allocate memory for it and bind + them together, all in one call, you can use function vmaCreateBuffer(), + vmaCreateImage(). + +When using 3. or 4., the library internally queries Vulkan for memory types +supported for that buffer or image (function `vkGetBufferMemoryRequirements()`) +and uses only one of these types. + +If no memory type can be found that meets all the requirements, these functions +return `VK_ERROR_FEATURE_NOT_PRESENT`. + +You can leave VmaAllocationCreateInfo structure completely filled with zeros. +It means no requirements are specified for memory type. +It is valid, although not very useful. + +\section choosing_memory_type_usage Usage + +The easiest way to specify memory requirements is to fill member +VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage. +It defines high level, common usage types. +Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically. + +For example, if you want to create a uniform buffer that will be filled using +transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can +do it using following code. The buffer will most likely end up in a memory type with +`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device. + +\code +VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufferInfo.size = 65536; +bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.usage = VMA_MEMORY_USAGE_AUTO; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory +on systems with discrete graphics card that have the memories separate, you can use +#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST. + +When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory, +you also need to specify one of the host access flags: +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +so you can map it. + +For example, a staging buffer that will be filled via mapped pointer and then +used as a source of transfer to the buffer described previously can be created like this. +It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT` +but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM). + +\code +VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +stagingBufferInfo.size = 65536; +stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo stagingAllocInfo = {}; +stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO; +stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; + +VkBuffer stagingBuffer; +VmaAllocation stagingAllocation; +vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr); +\endcode + +For more examples of creating different kinds of resources, see chapter \ref usage_patterns. + +Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows +about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed, +so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc. +If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting +memory type, as described below. + +\note +Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`, +`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`) +are still available and work same way as in previous versions of the library +for backward compatibility, but they are not recommended. + +\section choosing_memory_type_required_preferred_flags Required and preferred flags + +You can specify more detailed requirements by filling members +VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags +with a combination of bits from enum `VkMemoryPropertyFlags`. For example, +if you want to create a buffer that will be persistently mapped on host (so it +must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`, +use following code: + +\code +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; +allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; +allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + +A memory type is chosen that has all the required flags and as many preferred +flags set as possible. + +Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags, +plus some extra "magic" (heuristics). + +\section choosing_memory_type_explicit_memory_types Explicit memory types + +If you inspected memory types available on the physical device and you have +a preference for memory types that you want to use, you can fill member +VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set +means that a memory type with that index is allowed to be used for the +allocation. Special value 0, just like `UINT32_MAX`, means there are no +restrictions to memory type index. + +Please note that this member is NOT just a memory type index. +Still you can use it to choose just one, specific memory type. +For example, if you already determined that your buffer should be created in +memory type 2, use following code: + +\code +uint32_t memoryTypeIndex = 2; + +VmaAllocationCreateInfo allocInfo = {}; +allocInfo.memoryTypeBits = 1u << memoryTypeIndex; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr); +\endcode + + +\section choosing_memory_type_custom_memory_pools Custom memory pools + +If you allocate from custom memory pool, all the ways of specifying memory +requirements described above are not applicable and the aforementioned members +of VmaAllocationCreateInfo structure are ignored. Memory type is selected +explicitly when creating the pool and then used to make all the allocations from +that pool. For further details, see \ref custom_memory_pools. + +\section choosing_memory_type_dedicated_allocations Dedicated allocations + +Memory for allocations is reserved out of larger block of `VkDeviceMemory` +allocated from Vulkan internally. That is the main feature of this whole library. +You can still request a separate memory block to be created for an allocation, +just like you would do in a trivial solution without using any allocator. +In that case, a buffer or image is always bound to that memory at offset 0. +This is called a "dedicated allocation". +You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +The library can also internally decide to use dedicated allocation in some cases, e.g.: + +- When the size of the allocation is large. +- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled + and it reports that dedicated allocation is required or recommended for the resource. +- When allocation of next big memory block fails due to not enough device memory, + but allocation with the exact requested size succeeds. + + +\page memory_mapping Memory mapping + +To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`, +to be able to read from it or write to it in CPU code. +Mapping is possible only of memory allocated from a memory type that has +`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag. +Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose. +You can use them directly with memory allocated by this library, +but it is not recommended because of following issue: +Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed. +This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan. +Because of this, Vulkan Memory Allocator provides following facilities: + +\note If you want to be able to map an allocation, you need to specify one of the flags +#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT +in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable +when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values. +For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable, +but they can still be used for consistency. + +\section memory_mapping_mapping_functions Mapping functions + +The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory(). +They are safer and more convenient to use than standard Vulkan functions. +You can map an allocation multiple times simultaneously - mapping is reference-counted internally. +You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block. +The way it is implemented is that the library always maps entire memory block, not just region of the allocation. +For further details, see description of vmaMapMemory() function. +Example: + +\code +// Having these objects initialized: +struct ConstantBuffer +{ + ... +}; +ConstantBuffer constantBufferData = ... + +VmaAllocator allocator = ... +VkBuffer constantBuffer = ... +VmaAllocation constantBufferAllocation = ... + +// You can map and fill your buffer using following code: + +void* mappedData; +vmaMapMemory(allocator, constantBufferAllocation, &mappedData); +memcpy(mappedData, &constantBufferData, sizeof(constantBufferData)); +vmaUnmapMemory(allocator, constantBufferAllocation); +\endcode + +When mapping, you may see a warning from Vulkan validation layer similar to this one: + +Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used. + +It happens because the library maps entire `VkDeviceMemory` block, where different +types of images and buffers may end up together, especially on GPUs with unified memory like Intel. +You can safely ignore it if you are sure you access only memory of the intended +object that you wanted to map. + + +\section memory_mapping_persistently_mapped_memory Persistently mapped memory + +Keeping your memory persistently mapped is generally OK in Vulkan. +You don't need to unmap it before using its data on the GPU. +The library provides a special feature designed for that: +Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in +VmaAllocationCreateInfo::flags stay mapped all the time, +so you can just access CPU pointer to it any time +without a need to call any "map" or "unmap" function. +Example: + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = sizeof(ConstantBuffer); +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +// Buffer is already mapped. You can access its memory. +memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData)); +\endcode + +\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up +in a mappable memory type. +For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or +#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation. +For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading. + +\section memory_mapping_cache_control Cache flush and invalidate + +Memory in Vulkan doesn't need to be unmapped before using it on GPU, +but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set, +you need to manually **invalidate** cache before reading of mapped pointer +and **flush** cache after writing to mapped pointer. +Map/unmap operations don't do that automatically. +Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`, +`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient +functions that refer to given allocation object: vmaFlushAllocation(), +vmaInvalidateAllocation(), +or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations(). + +Regions of memory specified for flush/invalidate must be aligned to +`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library. +In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations +within blocks are aligned to this value, so their offsets are always multiply of +`nonCoherentAtomSize` and two different allocations never share same "line" of this size. + +Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA) +currently provide `HOST_COHERENT` flag on all memory types that are +`HOST_VISIBLE`, so on PC you may not need to bother. + + +\page staying_within_budget Staying within budget + +When developing a graphics-intensive game or program, it is important to avoid allocating +more GPU memory than it is physically available. When the memory is over-committed, +various bad things can happen, depending on the specific GPU, graphics driver, and +operating system: + +- It may just work without any problems. +- The application may slow down because some memory blocks are moved to system RAM + and the GPU has to access them through PCI Express bus. +- A new allocation may take very long time to complete, even few seconds, and possibly + freeze entire system. +- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST` + returned somewhere later. + +\section staying_within_budget_querying_for_budget Querying for budget + +To query for current memory usage and available budget, use function vmaGetHeapBudgets(). +Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap. + +Please note that this function returns different information and works faster than +vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every +allocation, while vmaCalculateStatistics() is intended to be used rarely, +only to obtain statistical information, e.g. for debugging purposes. + +It is recommended to use VK_EXT_memory_budget device extension to obtain information +about the budget from Vulkan device. VMA is able to use this extension automatically. +When not enabled, the allocator behaves same way, but then it estimates current usage +and available budget based on its internal information and Vulkan memory heap sizes, +which may be less precise. In order to use this extension: + +1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2 + required by it are available and enable them. Please note that the first is a device + extension and the second is instance extension! +2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object. +3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from + Vulkan inside of it to avoid overhead of querying it with every allocation. + +\section staying_within_budget_controlling_memory_usage Controlling memory usage + +There are many ways in which you can try to stay within the budget. + +First, when making new allocation requires allocating a new memory block, the library +tries not to exceed the budget automatically. If a block with default recommended size +(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even +dedicated memory for just this resource. + +If the size of the requested resource plus current memory usage is more than the +budget, by default the library still tries to create it, leaving it to the Vulkan +implementation whether the allocation succeeds or fails. You can change this behavior +by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is +not made if it would exceed the budget or if the budget is already exceeded. +VMA then tries to make the allocation from the next eligible Vulkan memory type. +The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag +when creating resources that are not essential for the application (e.g. the texture +of a specific object) and not to pass it when creating critically important resources +(e.g. render targets). + +On AMD graphics cards there is a custom vendor extension available: VK_AMD_memory_overallocation_behavior +that allows to control the behavior of the Vulkan implementation in out-of-memory cases - +whether it should fail with an error code or still allow the allocation. +Usage of this extension involves only passing extra structure on Vulkan device creation, +so it is out of scope of this library. + +Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure +a new allocation is created only when it fits inside one of the existing memory blocks. +If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. +This also ensures that the function call is very fast because it never goes to Vulkan +to obtain a new block. + +\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount +set to more than 0 will currently try to allocate memory blocks without checking whether they +fit within budget. + + +\page resource_aliasing Resource aliasing (overlap) + +New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory +management, give an opportunity to alias (overlap) multiple resources in the +same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL). +It can be useful to save video memory, but it must be used with caution. + +For example, if you know the flow of your whole render frame in advance, you +are going to use some intermediate textures or buffers only during a small range of render passes, +and you know these ranges don't overlap in time, you can bind these resources to +the same place in memory, even if they have completely different parameters (width, height, format etc.). + +![Resource aliasing (overlap)](../gfx/Aliasing.png) + +Such scenario is possible using VMA, but you need to create your images manually. +Then you need to calculate parameters of an allocation to be made using formula: + +- allocation size = max(size of each image) +- allocation alignment = max(alignment of each image) +- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image) + +Following example shows two different images bound to the same place in memory, +allocated to fit largest of them. + +\code +// A 512x512 texture to be sampled. +VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img1CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img1CreateInfo.extent.width = 512; +img1CreateInfo.extent.height = 512; +img1CreateInfo.extent.depth = 1; +img1CreateInfo.mipLevels = 10; +img1CreateInfo.arrayLayers = 1; +img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB; +img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT; +img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +// A full screen texture to be used as color attachment. +VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +img2CreateInfo.imageType = VK_IMAGE_TYPE_2D; +img2CreateInfo.extent.width = 1920; +img2CreateInfo.extent.height = 1080; +img2CreateInfo.extent.depth = 1; +img2CreateInfo.mipLevels = 1; +img2CreateInfo.arrayLayers = 1; +img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VkImage img1; +res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1); +VkImage img2; +res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2); + +VkMemoryRequirements img1MemReq; +vkGetImageMemoryRequirements(device, img1, &img1MemReq); +VkMemoryRequirements img2MemReq; +vkGetImageMemoryRequirements(device, img2, &img2MemReq); + +VkMemoryRequirements finalMemReq = {}; +finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size); +finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment); +finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits; +// Validate if(finalMemReq.memoryTypeBits != 0) + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; + +VmaAllocation alloc; +res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr); + +res = vmaBindImageMemory(allocator, alloc, img1); +res = vmaBindImageMemory(allocator, alloc, img2); + +// You can use img1, img2 here, but not at the same time! + +vmaFreeMemory(allocator, alloc); +vkDestroyImage(allocator, img2, nullptr); +vkDestroyImage(allocator, img1, nullptr); +\endcode + +Remember that using resources that alias in memory requires proper synchronization. +You need to issue a memory barrier to make sure commands that use `img1` and `img2` +don't overlap on GPU timeline. +You also need to treat a resource after aliasing as uninitialized - containing garbage data. +For example, if you use `img1` and then want to use `img2`, you need to issue +an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`. + +Additional considerations: + +- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases. +See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag. +- You can create more complex layout where different images and buffers are bound +at different offsets inside one large allocation. For example, one can imagine +a big texture used in some render passes, aliasing with a set of many small buffers +used between in some further passes. To bind a resource at non-zero offset in an allocation, +use vmaBindBufferMemory2() / vmaBindImageMemory2(). +- Before allocating memory for the resources you want to alias, check `memoryTypeBits` +returned in memory requirements of each resource to make sure the bits overlap. +Some GPUs may expose multiple memory types suitable e.g. only for buffers or +images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your +resources may be disjoint. Aliasing them is not possible in that case. + + +\page custom_memory_pools Custom memory pools + +A memory pool contains a number of `VkDeviceMemory` blocks. +The library automatically creates and manages default pool for each memory type available on the device. +Default memory pool automatically grows in size. +Size of allocated blocks is also variable and managed automatically. + +You can create custom pool and allocate memory out of it. +It can be useful if you want to: + +- Keep certain kind of allocations separate from others. +- Enforce particular, fixed size of Vulkan memory blocks. +- Limit maximum amount of Vulkan memory allocated for that pool. +- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool. +- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in + #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain. +- Perform defragmentation on a specific subset of your allocations. + +To use custom memory pools: + +-# Fill VmaPoolCreateInfo structure. +-# Call vmaCreatePool() to obtain #VmaPool handle. +-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle. + You don't need to specify any other parameters of this structure, like `usage`. + +Example: + +\code +// Find memoryTypeIndex for the pool. +VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +sampleBufCreateInfo.size = 0x10000; // Doesn't matter. +sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo sampleAllocCreateInfo = {}; +sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator, + &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex); +// Check res... + +// Create a pool that can have at most 2 blocks, 128 MiB each. +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +poolCreateInfo.blockSize = 128ull * 1024 * 1024; +poolCreateInfo.maxBlockCount = 2; + +VmaPool pool; +res = vmaCreatePool(allocator, &poolCreateInfo, &pool); +// Check res... + +// Allocate a buffer out of it. +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 1024; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.pool = pool; + +VkBuffer buf; +VmaAllocation alloc; +res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr); +// Check res... +\endcode + +You have to free all allocations made from this pool before destroying it. + +\code +vmaDestroyBuffer(allocator, buf, alloc); +vmaDestroyPool(allocator, pool); +\endcode + +New versions of this library support creating dedicated allocations in custom pools. +It is supported only when VmaPoolCreateInfo::blockSize = 0. +To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and +VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + +\note Excessive use of custom pools is a common mistake when using this library. +Custom pools may be useful for special purposes - when you want to +keep certain type of resources separate e.g. to reserve minimum amount of memory +for them or limit maximum amount of memory they can occupy. For most +resources this is not needed and so it is not recommended to create #VmaPool +objects and allocations out of them. Allocating from the default pool is sufficient. + + +\section custom_memory_pools_MemTypeIndex Choosing memory type index + +When creating a pool, you must explicitly specify memory type index. +To find the one suitable for your buffers or images, you can use helper functions +vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo(). +You need to provide structures with example parameters of buffers or images +that you are going to create in that pool. + +\code +VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +exampleBufCreateInfo.size = 1024; // Doesn't matter +exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + +uint32_t memTypeIndex; +vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex); + +VmaPoolCreateInfo poolCreateInfo = {}; +poolCreateInfo.memoryTypeIndex = memTypeIndex; +// ... +\endcode + +When creating buffers/images allocated in that pool, provide following parameters: + +- `VkBufferCreateInfo`: Prefer to pass same parameters as above. + Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior. + Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers + or the other way around. +- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member. + Other members are ignored anyway. + +\section linear_algorithm Linear allocation algorithm + +Each Vulkan memory block managed by this library has accompanying metadata that +keeps track of used and unused regions. By default, the metadata structure and +algorithm tries to find best place for new allocations among free regions to +optimize memory usage. This way you can allocate and free objects in any order. + +![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png) + +Sometimes there is a need to use simpler, linear allocation algorithm. You can +create custom pool that uses such algorithm by adding flag +#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating +#VmaPool object. Then an alternative metadata management is used. It always +creates new allocations after last one and doesn't reuse free regions after +allocations freed in the middle. It results in better allocation performance and +less memory consumed by metadata. + +![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png) + +With this one flag, you can create a custom pool that can be used in many ways: +free-at-once, stack, double stack, and ring buffer. See below for details. +You don't need to specify explicitly which of these options you are going to use - it is detected automatically. + +\subsection linear_algorithm_free_at_once Free-at-once + +In a pool that uses linear algorithm, you still need to free all the allocations +individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free +them in any order. New allocations are always made after last one - free space +in the middle is not reused. However, when you release all the allocation and +the pool becomes empty, allocation starts from the beginning again. This way you +can use linear algorithm to speed up creation of allocations that you are going +to release all at once. + +![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_stack Stack + +When you free an allocation that was created last, its space can be reused. +Thanks to this, if you always release allocations in the order opposite to their +creation (LIFO - Last In First Out), you can achieve behavior of a stack. + +![Stack](../gfx/Linear_allocator_4_stack.png) + +This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount +value that allows multiple memory blocks. + +\subsection linear_algorithm_double_stack Double stack + +The space reserved by a custom pool with linear algorithm may be used by two +stacks: + +- First, default one, growing up from offset 0. +- Second, "upper" one, growing down from the end towards lower offsets. + +To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT +to VmaAllocationCreateInfo::flags. + +![Double stack](../gfx/Linear_allocator_7_double_stack.png) + +Double stack is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +When the two stacks' ends meet so there is not enough space between them for a +new allocation, such allocation fails with usual +`VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + +\subsection linear_algorithm_ring_buffer Ring buffer + +When you free some allocations from the beginning and there is not enough free space +for a new one at the end of a pool, allocator's "cursor" wraps around to the +beginning and starts allocation there. Thanks to this, if you always release +allocations in the same order as you created them (FIFO - First In First Out), +you can achieve behavior of a ring buffer / queue. + +![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png) + +Ring buffer is available only in pools with one memory block - +VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined. + +\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page defragmentation Defragmentation + +Interleaved allocations and deallocations of many objects of varying size can +cause fragmentation over time, which can lead to a situation where the library is unable +to find a continuous range of free memory for a new allocation despite there is +enough free space, just scattered across many small free ranges between existing +allocations. + +To mitigate this problem, you can use defragmentation feature. +It doesn't happen automatically though and needs your cooperation, +because VMA is a low level library that only allocates memory. +It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures. +It cannot copy their contents as it doesn't record any commands to a command buffer. + +Example: + +\code +VmaDefragmentationInfo defragInfo = {}; +defragInfo.pool = myPool; +defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT; + +VmaDefragmentationContext defragCtx; +VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx); +// Check res... + +for(;;) +{ + VmaDefragmentationPassMoveInfo pass; + res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... + + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents. + VmaAllocationInfo allocInfo; + vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo); + MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData; + + // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset. + VkImageCreateInfo imgCreateInfo = ... + VkImage newImg; + res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg); + // Check res... + res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg); + // Check res... + + // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place. + vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...); + } + + // Make sure the copy commands finished executing. + vkWaitForFences(...); + + // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation. + for(uint32_t i = 0; i < pass.moveCount; ++i) + { + // ... + vkDestroyImage(device, resData->img, nullptr); + } + + // Update appropriate descriptors to point to the new places... + + res = vmaEndDefragmentationPass(allocator, defragCtx, &pass); + if(res == VK_SUCCESS) + break; + else if(res != VK_INCOMPLETE) + // Handle error... +} + +vmaEndDefragmentation(allocator, defragCtx, nullptr); +\endcode + +Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage() +create/destroy an allocation and a buffer/image at once, these are just a shortcut for +creating the resource, allocating memory, and binding them together. +Defragmentation works on memory allocations only. You must handle the rest manually. +Defragmentation is an iterative process that should repreat "passes" as long as related functions +return `VK_INCOMPLETE` not `VK_SUCCESS`. +In each pass: + +1. vmaBeginDefragmentationPass() function call: + - Calculates and returns the list of allocations to be moved in this pass. + Note this can be a time-consuming process. + - Reserves destination memory for them by creating temporary destination allocations + that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo(). +2. Inside the pass, **you should**: + - Inspect the returned list of allocations to be moved. + - Create new buffers/images and bind them at the returned destination temporary allocations. + - Copy data from source to destination resources if necessary. + - Destroy the source buffers/images, but NOT their allocations. +3. vmaEndDefragmentationPass() function call: + - Frees the source memory reserved for the allocations that are moved. + - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory. + - Frees `VkDeviceMemory` blocks that became empty. + +Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter. +Defragmentation algorithm tries to move all suitable allocations. +You can, however, refuse to move some of them inside a defragmentation pass, by setting +`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. +This is not recommended and may result in suboptimal packing of the allocations after defragmentation. +If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool. + +Inside a pass, for each allocation that should be moved: + +- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`. + - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass(). +- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared, + filled, and used temporarily in each rendering frame, you can just recreate this image + without copying its data. +- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU + using `memcpy()`. +- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. + This will cancel the move. + - vmaEndDefragmentationPass() will then free the destination memory + not the source memory of the allocation, leaving it unchanged. +- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time), + you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object. + +You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool +(like in the example above) or all the default pools by setting this member to null. + +Defragmentation is always performed in each pool separately. +Allocations are never moved between different Vulkan memory types. +The size of the destination memory reserved for a moved allocation is the same as the original one. +Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation. +Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones. + +You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved +in each pass, e.g. to call it in sync with render frames and not to experience too big hitches. +See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass. + +It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA +usage, possibly from multiple threads, with the exception that allocations +returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended. + +Mapping is preserved on allocations that are moved during defragmentation. +Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations +are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried +using VmaAllocationInfo::pMappedData. + +\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT. + + +\page statistics Statistics + +This library contains several functions that return information about its internal state, +especially the amount of memory allocated from Vulkan. + +\section statistics_numeric_statistics Numeric statistics + +If you need to obtain basic statistics about memory usage per heap, together with current budget, +you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget. +This is useful to keep track of memory usage and stay within budget +(see also \ref staying_within_budget). +Example: + +\code +uint32_t heapIndex = ... + +VmaBudget budgets[VK_MAX_MEMORY_HEAPS]; +vmaGetHeapBudgets(allocator, budgets); + +printf("My heap currently has %u allocations taking %llu B,\n", + budgets[heapIndex].statistics.allocationCount, + budgets[heapIndex].statistics.allocationBytes); +printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n", + budgets[heapIndex].statistics.blockCount, + budgets[heapIndex].statistics.blockBytes); +printf("Vulkan reports total usage %llu B with budget %llu B.\n", + budgets[heapIndex].usage, + budgets[heapIndex].budget); +\endcode + +You can query for more detailed statistics per memory heap, type, and totals, +including minimum and maximum allocation size and unused range size, +by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics. +This function is slower though, as it has to traverse all the internal data structures, +so it should be used only for debugging purposes. + +You can query for statistics of a custom pool using function vmaGetPoolStatistics() +or vmaCalculatePoolStatistics(). + +You can query for information about a specific allocation using function vmaGetAllocationInfo(). +It fill structure #VmaAllocationInfo. + +\section statistics_json_dump JSON dump + +You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString(). +The result is guaranteed to be correct JSON. +It uses ANSI encoding. +Any strings provided by user (see [Allocation names](@ref allocation_names)) +are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding, +this JSON string can be treated as using this encoding. +It must be freed using function vmaFreeStatsString(). + +The format of this JSON string is not part of official documentation of the library, +but it will not change in backward-incompatible way without increasing library major version number +and appropriate mention in changelog. + +The JSON string contains all the data that can be obtained using vmaCalculateStatistics(). +It can also contain detailed map of allocated memory blocks and their regions - +free and occupied by allocations. +This allows e.g. to visualize the memory or assess fragmentation. + + +\page allocation_annotation Allocation names and user data + +\section allocation_user_data Allocation user data + +You can annotate allocations with your own information, e.g. for debugging purposes. +To do that, fill VmaAllocationCreateInfo::pUserData field when creating +an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer, +some handle, index, key, ordinal number or any other value that would associate +the allocation with your custom metadata. +It is useful to identify appropriate data structures in your engine given #VmaAllocation, +e.g. when doing \ref defragmentation. + +\code +VkBufferCreateInfo bufCreateInfo = ... + +MyBufferMetadata* pMetadata = CreateBufferMetadata(); + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.pUserData = pMetadata; + +VkBuffer buffer; +VmaAllocation allocation; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr); +\endcode + +The pointer may be later retrieved as VmaAllocationInfo::pUserData: + +\code +VmaAllocationInfo allocInfo; +vmaGetAllocationInfo(allocator, allocation, &allocInfo); +MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData; +\endcode + +It can also be changed using function vmaSetAllocationUserData(). + +Values of (non-zero) allocations' `pUserData` are printed in JSON report created by +vmaBuildStatsString() in hexadecimal form. + +\section allocation_names Allocation names + +An allocation can also carry a null-terminated string, giving a name to the allocation. +To set it, call vmaSetAllocationName(). +The library creates internal copy of the string, so the pointer you pass doesn't need +to be valid for whole lifetime of the allocation. You can free it after the call. + +\code +std::string imageName = "Texture: "; +imageName += fileName; +vmaSetAllocationName(allocator, allocation, imageName.c_str()); +\endcode + +The string can be later retrieved by inspecting VmaAllocationInfo::pName. +It is also printed in JSON report created by vmaBuildStatsString(). + +\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it. +You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library. + + +\page virtual_allocator Virtual allocator + +As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator". +It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block". +You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan. +A common use case is sub-allocation of pieces of one large GPU buffer. + +\section virtual_allocator_creating_virtual_block Creating virtual block + +To use this functionality, there is no main "allocator" object. +You don't need to have #VmaAllocator object created. +All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator: + +-# Fill in #VmaVirtualBlockCreateInfo structure. +-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object. + +Example: + +\code +VmaVirtualBlockCreateInfo blockCreateInfo = {}; +blockCreateInfo.size = 1048576; // 1 MB + +VmaVirtualBlock block; +VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block); +\endcode + +\section virtual_allocator_making_virtual_allocations Making virtual allocations + +#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions +using the same code as the main Vulkan memory allocator. +Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type +that represents an opaque handle to an allocation within the virtual block. + +In order to make such allocation: + +-# Fill in #VmaVirtualAllocationCreateInfo structure. +-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation. + You can also receive `VkDeviceSize offset` that was assigned to the allocation. + +Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB + +VmaVirtualAllocation alloc; +VkDeviceSize offset; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset); +if(res == VK_SUCCESS) +{ + // Use the 4 KB of your memory starting at offset. +} +else +{ + // Allocation failed - no space for it could be found. Handle this error! +} +\endcode + +\section virtual_allocator_deallocation Deallocation + +When no longer needed, an allocation can be freed by calling vmaVirtualFree(). +You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate() +called for the same #VmaVirtualBlock. + +When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock(). +All allocations must be freed before the block is destroyed, which is checked internally by an assert. +However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once - +a feature not available in normal Vulkan memory allocator. Example: + +\code +vmaVirtualFree(block, alloc); +vmaDestroyVirtualBlock(block); +\endcode + +\section virtual_allocator_allocation_parameters Allocation parameters + +You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData(). +Its default value is null. +It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some +larger data structure containing more information. Example: + +\code +struct CustomAllocData +{ + std::string m_AllocName; +}; +CustomAllocData* allocData = new CustomAllocData(); +allocData->m_AllocName = "My allocation 1"; +vmaSetVirtualAllocationUserData(block, alloc, allocData); +\endcode + +The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function +vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo. +If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation! +Example: + +\code +VmaVirtualAllocationInfo allocInfo; +vmaGetVirtualAllocationInfo(block, alloc, &allocInfo); +delete (CustomAllocData*)allocInfo.pUserData; + +vmaVirtualFree(block, alloc); +\endcode + +\section virtual_allocator_alignment_and_units Alignment and units + +It feels natural to express sizes and offsets in bytes. +If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member +VmaVirtualAllocationCreateInfo::alignment to request it. Example: + +\code +VmaVirtualAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.size = 4096; // 4 KB +allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B + +VmaVirtualAllocation alloc; +res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr); +\endcode + +Alignments of different allocations made from one block may vary. +However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`, +you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes. +It might be more convenient, but you need to make sure to use this new unit consistently in all the places: + +- VmaVirtualBlockCreateInfo::size +- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment +- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset + +\section virtual_allocator_statistics Statistics + +You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics() +(to get brief statistics that are fast to calculate) +or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate). +The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator. +Example: + +\code +VmaStatistics stats; +vmaGetVirtualBlockStatistics(block, &stats); +printf("My virtual block has %llu bytes used by %u virtual allocations\n", + stats.allocationBytes, stats.allocationCount); +\endcode + +You can also request a full list of allocations and free regions as a string in JSON format by calling +vmaBuildVirtualBlockStatsString(). +Returned string must be later freed using vmaFreeVirtualBlockStatsString(). +The format of this string differs from the one returned by the main Vulkan allocator, but it is similar. + +\section virtual_allocator_additional_considerations Additional considerations + +The "virtual allocator" functionality is implemented on a level of individual memory blocks. +Keeping track of a whole collection of blocks, allocating new ones when out of free space, +deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user. + +Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory. +See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT). +You can find their description in chapter \ref custom_memory_pools. +Allocation strategies are also supported. +See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT). + +Following features are supported only by the allocator of the real GPU memory and not by virtual allocations: +buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`. + + +\page debugging_memory_usage Debugging incorrect memory usage + +If you suspect a bug with memory usage, like usage of uninitialized memory or +memory being overwritten out of bounds of an allocation, +you can use debug features of this library to verify this. + +\section debugging_memory_usage_initialization Memory initialization + +If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used, +you can enable automatic memory initialization to verify this. +To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1. + +\code +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1 +#include "vk_mem_alloc.h" +\endcode + +It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`. +Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`. +Memory is automatically mapped and unmapped if necessary. + +If you find these values while debugging your program, good chances are that you incorrectly +read Vulkan memory that is allocated but not initialized, or already freed, respectively. + +Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped. +It works also with dedicated allocations. + +\section debugging_memory_usage_margins Margins + +By default, allocations are laid out in memory blocks next to each other if possible +(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`). + +![Allocations without margin](../gfx/Margins_1.png) + +Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified +number of bytes as a margin after every allocation. + +\code +#define VMA_DEBUG_MARGIN 16 +#include "vk_mem_alloc.h" +\endcode + +![Allocations with margin](../gfx/Margins_2.png) + +If your bug goes away after enabling margins, it means it may be caused by memory +being overwritten outside of allocation boundaries. It is not 100% certain though. +Change in application behavior may also be caused by different order and distribution +of allocations across memory blocks after margins are applied. + +Margins work with all types of memory. + +Margin is applied only to allocations made out of memory blocks and not to dedicated +allocations, which have their own memory block of specific size. +It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag +or those automatically decided to put into dedicated allocations, e.g. due to its +large size or recommended by VK_KHR_dedicated_allocation extension. + +Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space. + +Note that enabling margins increases memory usage and fragmentation. + +Margins do not apply to \ref virtual_allocator. + +\section debugging_memory_usage_corruption_detection Corruption detection + +You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation +of contents of the margins. + +\code +#define VMA_DEBUG_MARGIN 16 +#define VMA_DEBUG_DETECT_CORRUPTION 1 +#include "vk_mem_alloc.h" +\endcode + +When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN` +(it must be multiply of 4) after every allocation is filled with a magic number. +This idea is also know as "canary". +Memory is automatically mapped and unmapped if necessary. + +This number is validated automatically when the allocation is destroyed. +If it is not equal to the expected value, `VMA_ASSERT()` is executed. +It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation, +which indicates a serious bug. + +You can also explicitly request checking margins of all allocations in all memory blocks +that belong to specified memory types by using function vmaCheckCorruption(), +or in memory blocks that belong to specified custom pool, by using function +vmaCheckPoolCorruption(). + +Margin validation (corruption detection) works only for memory types that are +`HOST_VISIBLE` and `HOST_COHERENT`. + + +\page opengl_interop OpenGL Interop + +VMA provides some features that help with interoperability with OpenGL. + +\section opengl_interop_exporting_memory Exporting memory + +If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library: + +It is recommended to create \ref custom_memory_pools for such allocations. +Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext +while creating the custom pool. +Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool, +not only while creating it, as no copy of the structure is made, +but its original pointer is used for each allocation instead. + +If you want to export all memory allocated by the library from certain memory types, +also dedicated allocations or other allocations made from default pools, +an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes. +It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library +through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type. +Please note that new versions of the library also support dedicated allocations created in custom pools. + +You should not mix these two methods in a way that allows to apply both to the same memory type. +Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`. + + +\section opengl_interop_custom_alignment Custom alignment + +Buffers or images exported to a different API like OpenGL may require a different alignment, +higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`. +To impose such alignment: + +It is recommended to create \ref custom_memory_pools for such allocations. +Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation +to be made out of this pool. +The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image +from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically. + +If you want to create a buffer with a specific minimum alignment out of default pools, +use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`. + +Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated +allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block. +Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation. + + +\page usage_patterns Recommended usage patterns + +Vulkan gives great flexibility in memory allocation. +This chapter shows the most common patterns. + +See also slides from talk: +[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New) + + +\section usage_patterns_gpu_only GPU-only resource + +When: +Any resources that you frequently write and read on GPU, +e.g. images used as color attachments (aka "render targets"), depth-stencil attachments, +images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)"). + +What to do: +Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +Also consider: +Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, +especially if they are large or if you plan to destroy and recreate them with different sizes +e.g. when display resolution changes. +Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later. +When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation +to decrease chances to be evicted to system memory by the operating system. + +\section usage_patterns_staging_copy_upload Staging copy for upload + +When: +A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer +to some GPU resource. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +memcpy(allocInfo.pMappedData, myData, myDataSize); +\endcode + +Also consider: +You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped +using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above. + + +\section usage_patterns_readback Readback + +When: +Buffers for data written by or transferred from the GPU that you want to read back on the CPU, +e.g. results of some computations. + +What to do: +Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT. +Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` +and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +... + +const float* downloadedData = (const float*)allocInfo.pMappedData; +\endcode + + +\section usage_patterns_advanced_data_uploading Advanced data uploading + +For resources that you frequently write on CPU via mapped pointer and +frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible: + +-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory, + even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card, + and make the device reach out to that resource directly. + - Reads performed by the device will then go through PCI Express bus. + The performance of this access may be limited, but it may be fine depending on the size + of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity + of access. +-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips), + a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL` + (fast to access from the GPU). Then, it is likely the best choice for such type of resource. +-# Systems with a discrete graphics card and separate video memory may or may not expose + a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR). + If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS) + that is available to CPU for mapping. + - Writes performed by the host to that memory go through PCI Express bus. + The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0, + as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads. +-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory, + a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them. + +Thankfully, VMA offers an aid to create and use such resources in the the way optimal +for the current Vulkan device. To help the library make the best choice, +use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with +#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT. +It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR), +but if no such memory type is available or allocation from it fails +(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS), +it will fall back to `DEVICE_LOCAL` memory for fast GPU access. +It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`, +so you need to create another "staging" allocation and perform explicit transfers. + +\code +VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; +bufCreateInfo.size = 65536; +bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + +VkBuffer buf; +VmaAllocation alloc; +VmaAllocationInfo allocInfo; +vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo); + +VkMemoryPropertyFlags memPropFlags; +vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags); + +if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) +{ + // Allocation ended up in a mappable memory and is already mapped - write to it directly. + + // [Executed in runtime]: + memcpy(allocInfo.pMappedData, myData, myDataSize); +} +else +{ + // Allocation ended up in a non-mappable memory - need to transfer. + VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO }; + stagingBufCreateInfo.size = 65536; + stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT; + + VmaAllocationCreateInfo stagingAllocCreateInfo = {}; + stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; + stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | + VMA_ALLOCATION_CREATE_MAPPED_BIT; + + VkBuffer stagingBuf; + VmaAllocation stagingAlloc; + VmaAllocationInfo stagingAllocInfo; + vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo, + &stagingBuf, &stagingAlloc, stagingAllocInfo); + + // [Executed in runtime]: + memcpy(stagingAllocInfo.pMappedData, myData, myDataSize); + //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT + VkBufferCopy bufCopy = { + 0, // srcOffset + 0, // dstOffset, + myDataSize); // size + vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy); +} +\endcode + +\section usage_patterns_other_use_cases Other use cases + +Here are some other, less obvious use cases and their recommended settings: + +- An image that is used only as transfer source and destination, but it should stay on the device, + as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame, + for temporal antialiasing or other temporal effects. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO +- An image that is used only as transfer source and destination, but it should be placed + in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict + least recently used textures from VRAM. + - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST, + as VMA needs a hint here to differentiate from the previous case. +- A buffer that you want to map and write from the CPU, directly read from the GPU + (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or + host memory due to its large size. + - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT` + - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST + - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT + + +\page configuration Configuration + +Please check "CONFIGURATION SECTION" in the code to find macros that you can define +before each include of this file or change directly in this file to provide +your own implementation of basic facilities like assert, `min()` and `max()` functions, +mutex, atomic etc. +The library uses its own implementation of containers by default, but you can switch to using +STL containers instead. + +For example, define `VMA_ASSERT(expr)` before including the library to provide +custom implementation of the assertion, compatible with your project. +By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration +and empty otherwise. + +\section config_Vulkan_functions Pointers to Vulkan functions + +There are multiple ways to import pointers to Vulkan functions in the library. +In the simplest case you don't need to do anything. +If the compilation or linking of your program or the initialization of the #VmaAllocator +doesn't work for you, you can try to reconfigure it. + +First, the allocator tries to fetch pointers to Vulkan functions linked statically, +like this: + +\code +m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory; +\endcode + +If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`. + +Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions. +You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or +by using a helper library like [volk](https://github.com/zeux/volk). + +Third, VMA tries to fetch remaining pointers that are still null by calling +`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own. +You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr. +Other pointers will be fetched automatically. +If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`. + +Finally, all the function pointers required by the library (considering selected +Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null. + + +\section custom_memory_allocator Custom host memory allocator + +If you use custom allocator for CPU memory rather than default operator `new` +and `delete` from C++, you can make this library using your allocator as well +by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These +functions will be passed to Vulkan, as well as used by the library itself to +make any CPU-side allocations. + +\section allocation_callbacks Device memory allocation callbacks + +The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally. +You can setup callbacks to be informed about these calls, e.g. for the purpose +of gathering some statistics. To do it, fill optional member +VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + +\section heap_memory_limit Device heap memory limit + +When device memory of certain heap runs out of free space, new allocations may +fail (returning error code) or they may succeed, silently pushing some existing_ +memory blocks from GPU VRAM to system RAM (which degrades performance). This +behavior is implementation-dependent - it depends on GPU vendor and graphics +driver. + +On AMD cards it can be controlled while creating Vulkan device object by using +VK_AMD_memory_overallocation_behavior extension, if available. + +Alternatively, if you want to test how your program behaves with limited amount of Vulkan device +memory available without switching your graphics card to one that really has +smaller VRAM, you can use a feature of this library intended for this purpose. +To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit. + + + +\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation + +VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve +performance on some GPUs. It augments Vulkan API with possibility to query +driver whether it prefers particular buffer or image to have its own, dedicated +allocation (separate `VkDeviceMemory` block) for better efficiency - to be able +to do some internal optimizations. The extension is supported by this library. +It will be used automatically when enabled. + +It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version +and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion, +you are all set. + +Otherwise, if you want to use it as an extension: + +1 . When creating Vulkan device, check if following 2 device extensions are +supported (call `vkEnumerateDeviceExtensionProperties()`). +If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`). + +- VK_KHR_get_memory_requirements2 +- VK_KHR_dedicated_allocation + +If you enabled these extensions: + +2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating +your #VmaAllocator to inform the library that you enabled required extensions +and you want the library to use them. + +\code +allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT; + +vmaCreateAllocator(&allocatorInfo, &allocator); +\endcode + +That is all. The extension will be automatically used whenever you create a +buffer using vmaCreateBuffer() or image using vmaCreateImage(). + +When using the extension together with Vulkan Validation Layer, you will receive +warnings like this: + +_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._ + +It is OK, you should just ignore it. It happens because you use function +`vkGetBufferMemoryRequirements2KHR()` instead of standard +`vkGetBufferMemoryRequirements()`, while the validation layer seems to be +unaware of it. + +To learn more about this extension, see: + +- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation) +- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5) + + + +\page vk_ext_memory_priority VK_EXT_memory_priority + +VK_EXT_memory_priority is a device extension that allows to pass additional "priority" +value to Vulkan memory allocations that the implementation may use prefer certain +buffers and images that are critical for performance to stay in device-local memory +in cases when the memory is over-subscribed, while some others may be moved to the system memory. + +VMA offers convenient usage of this extension. +If you enable it, you can pass "priority" parameter when creating allocations or custom pools +and the library automatically passes the value to Vulkan using this extension. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_ext_memory_priority_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to +`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_ext_memory_priority_usage Usage + +When using this extension, you should initialize following member: + +- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +- VmaPoolCreateInfo::priority when creating a custom pool. + +It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`. +Memory allocated with higher value can be treated by the Vulkan implementation as higher priority +and so it can have lower chances of being pushed out to system memory, experiencing degraded performance. + +It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images +as dedicated and set high priority to them. For example: + +\code +VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO }; +imgCreateInfo.imageType = VK_IMAGE_TYPE_2D; +imgCreateInfo.extent.width = 3840; +imgCreateInfo.extent.height = 2160; +imgCreateInfo.extent.depth = 1; +imgCreateInfo.mipLevels = 1; +imgCreateInfo.arrayLayers = 1; +imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM; +imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; +imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; +imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT; +imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT; + +VmaAllocationCreateInfo allocCreateInfo = {}; +allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO; +allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; +allocCreateInfo.priority = 1.0f; + +VkImage img; +VmaAllocation alloc; +vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr); +\endcode + +`priority` member is ignored in the following situations: + +- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters + from the parametrs passed in #VmaPoolCreateInfo when the pool was created. +- Allocations created in default pools: They inherit the priority from the parameters + VMA used when creating default pools, which means `priority == 0.5f`. + + +\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory + +VK_AMD_device_coherent_memory is a device extension that enables access to +additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and +`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for +allocation of buffers intended for writing "breadcrumb markers" in between passes +or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases. + +When the extension is available but has not been enabled, Vulkan physical device +still exposes those memory types, but their usage is forbidden. VMA automatically +takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt +to allocate memory of such type is made. + +If you want to use this extension in connection with VMA, follow these steps: + +\section vk_amd_device_coherent_memory_initialization Initialization + +1) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true. + +3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory" +to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT +to VmaAllocatorCreateInfo::flags. + +\section vk_amd_device_coherent_memory_usage Usage + +After following steps described above, you can create VMA allocations and custom pools +out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible +devices. There are multiple ways to do it, for example: + +- You can request or prefer to allocate out of such memory types by adding + `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags + or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with + other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage. +- If you manually found memory type index to use for this purpose, force allocation + from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`. + +\section vk_amd_device_coherent_memory_more_information More information + +To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + + +\page enabling_buffer_device_address Enabling buffer device address + +Device extension VK_KHR_buffer_device_address +allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code. +It has been promoted to core Vulkan 1.2. + +If you want to use this feature in connection with VMA, follow these steps: + +\section enabling_buffer_device_address_initialization Initialization + +1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device. +Check if the extension is supported - if returned array of `VkExtensionProperties` contains +"VK_KHR_buffer_device_address". + +2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`. +Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned. +Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true. + +3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add +"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`. + +4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`. +Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`. +Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to +`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`. + +5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you +have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT +to VmaAllocatorCreateInfo::flags. + +\section enabling_buffer_device_address_usage Usage + +After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA. +The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to +allocated memory blocks wherever it might be needed. + +Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`. +The second part of this functionality related to "capture and replay" is not supported, +as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage. + +\section enabling_buffer_device_address_more_information More information + +To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address) + +Example use of this extension can be found in the code of the sample and test suite +accompanying this library. + +\page general_considerations General considerations + +\section general_considerations_thread_safety Thread safety + +- The library has no global state, so separate #VmaAllocator objects can be used + independently. + There should be no need to create multiple such objects though - one per `VkDevice` is enough. +- By default, all calls to functions that take #VmaAllocator as first parameter + are safe to call from multiple threads simultaneously because they are + synchronized internally when needed. + This includes allocation and deallocation from default memory pool, as well as custom #VmaPool. +- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT + flag, calls to functions that take such #VmaAllocator object must be + synchronized externally. +- Access to a #VmaAllocation object must be externally synchronized. For example, + you must not call vmaGetAllocationInfo() and vmaMapMemory() from different + threads at the same time if you pass the same #VmaAllocation object to these + functions. +- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously. + +\section general_considerations_versioning_and_compatibility Versioning and compatibility + +The library uses [**Semantic Versioning**](https://semver.org/), +which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where: + +- Incremented Patch version means a release is backward- and forward-compatible, + introducing only some internal improvements, bug fixes, optimizations etc. + or changes that are out of scope of the official API described in this documentation. +- Incremented Minor version means a release is backward-compatible, + so existing code that uses the library should continue to work, while some new + symbols could have been added: new structures, functions, new values in existing + enums and bit flags, new structure members, but not new function parameters. +- Incrementing Major version means a release could break some backward compatibility. + +All changes between official releases are documented in file "CHANGELOG.md". + +\warning Backward compatibility is considered on the level of C++ source code, not binary linkage. +Adding new members to existing structures is treated as backward compatible if initializing +the new members to binary zero results in the old behavior. +You should always fully initialize all library structures to zeros and not rely on their +exact binary size. + +\section general_considerations_validation_layer_warnings Validation layer warnings + +When using this library, you can meet following types of warnings issued by +Vulkan validation layer. They don't necessarily indicate a bug, so you may need +to just ignore them. + +- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.* + - It happens when VK_KHR_dedicated_allocation extension is enabled. + `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it. +- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.* + - It happens when you map a buffer or image, because the library maps entire + `VkDeviceMemory` block, where different types of images and buffers may end + up together, especially on GPUs with unified memory like Intel. +- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.* + - It may happen when you use [defragmentation](@ref defragmentation). + +\section general_considerations_allocation_algorithm Allocation algorithm + +The library uses following algorithm for allocation, in order: + +-# Try to find free range of memory in existing blocks. +-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size. +-# If failed, try to create such block with size / 2, size / 4, size / 8. +-# If failed, try to allocate separate `VkDeviceMemory` for this allocation, + just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. +-# If failed, choose other memory type that meets the requirements specified in + VmaAllocationCreateInfo and go to point 1. +-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + +\section general_considerations_features_not_supported Features not supported + +Features deliberately excluded from the scope of this library: + +-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images + between CPU and GPU memory and related synchronization is responsibility of the user. + Defining some "texture" object that would automatically stream its data from a + staging copy in CPU memory to GPU memory would rather be a feature of another, + higher-level library implemented on top of VMA. + VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory. +-# **Recreation of buffers and images.** Although the library has functions for + buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to + recreate these objects yourself after defragmentation. That is because the big + structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in + #VmaAllocation object. +-# **Handling CPU memory allocation failures.** When dynamically creating small C++ + objects in CPU memory (not Vulkan memory), allocation failures are not checked + and handled gracefully, because that would complicate code significantly and + is usually not needed in desktop PC applications anyway. + Success of an allocation is just checked with an assert. +-# **Code free of any compiler warnings.** Maintaining the library to compile and + work correctly on so many different platforms is hard enough. Being free of + any warnings, on any version of any compiler, is simply not feasible. + There are many preprocessor macros that make some variables unused, function parameters unreferenced, + or conditional expressions constant in some configurations. + The code of this library should not be bigger or more complicated just to silence these warnings. + It is recommended to disable such warnings instead. +-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but + are not going to be included into this repository. +*/ diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc.hpp b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc.hpp new file mode 100644 index 00000000..b3b9d435 --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc.hpp @@ -0,0 +1,135 @@ +#ifndef VULKAN_MEMORY_ALLOCATOR_HPP +#define VULKAN_MEMORY_ALLOCATOR_HPP + +#if !defined(AMD_VULKAN_MEMORY_ALLOCATOR_H) +#include +#endif + +#include + +#if !defined(VMA_HPP_NAMESPACE) +#define VMA_HPP_NAMESPACE vma +#endif + +#define VMA_HPP_NAMESPACE_STRING VULKAN_HPP_STRINGIFY(VMA_HPP_NAMESPACE) + +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VMA_HPP_NAMESPACE { + struct Dispatcher {}; // VMA uses function pointers from VmaAllocator instead + class Allocator; + + template + VULKAN_HPP_NAMESPACE::UniqueHandle createUniqueHandle(const T& t) VULKAN_HPP_NOEXCEPT { + return VULKAN_HPP_NAMESPACE::UniqueHandle(t); + } + template + VULKAN_HPP_NAMESPACE::UniqueHandle createUniqueHandle(const T& t, const O* o) VULKAN_HPP_NOEXCEPT { + return VULKAN_HPP_NAMESPACE::UniqueHandle(t, o); + } + template + std::pair, VULKAN_HPP_NAMESPACE::UniqueHandle> + createUniqueHandle(const std::pair& t, const O* o) VULKAN_HPP_NOEXCEPT { + return { + VULKAN_HPP_NAMESPACE::UniqueHandle(t.first, o), + VULKAN_HPP_NAMESPACE::UniqueHandle(t.second, o) + }; + } + + template + std::vector, UniqueVectorAllocator> + createUniqueHandleVector(const std::vector& vector, const O* o, + const UniqueVectorAllocator& vectorAllocator) VULKAN_HPP_NOEXCEPT { + std::vector, UniqueVectorAllocator> result(vectorAllocator); + result.reserve(vector.size()); + for (const T& t : vector) result.emplace_back(t, o); + return result; + } + + template class Deleter { + const Owner* owner; + public: + Deleter() = default; + Deleter(const Owner* owner) VULKAN_HPP_NOEXCEPT : owner(owner) {} + protected: + void destroy(const T& t) VULKAN_HPP_NOEXCEPT; // Implemented manually for each handle type + }; + template class Deleter { + protected: + void destroy(const T& t) VULKAN_HPP_NOEXCEPT { t.destroy(); } + }; +} +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { + using UniqueBuffer = VULKAN_HPP_NAMESPACE::UniqueHandle; + using UniqueImage = VULKAN_HPP_NAMESPACE::UniqueHandle; +} +#endif + +#include "vk_mem_alloc_enums.hpp" +#include "vk_mem_alloc_handles.hpp" +#include "vk_mem_alloc_structs.hpp" +#include "vk_mem_alloc_funcs.hpp" + +namespace VMA_HPP_NAMESPACE { + +#ifndef VULKAN_HPP_NO_SMART_HANDLE +# define VMA_HPP_DESTROY_IMPL(NAME) \ + template<> VULKAN_HPP_INLINE void VULKAN_HPP_NAMESPACE::UniqueHandleTraits::deleter::destroy(const NAME& t) VULKAN_HPP_NOEXCEPT + + VMA_HPP_DESTROY_IMPL(VULKAN_HPP_NAMESPACE::Buffer) { owner->destroyBuffer(t, nullptr); } + VMA_HPP_DESTROY_IMPL(VULKAN_HPP_NAMESPACE::Image) { owner->destroyImage(t, nullptr); } + VMA_HPP_DESTROY_IMPL(Pool) { owner->destroyPool(t); } + VMA_HPP_DESTROY_IMPL(Allocation) { owner->freeMemory(t); } + VMA_HPP_DESTROY_IMPL(VirtualAllocation) { owner->virtualFree(t); } + +# undef VMA_HPP_DESTROY_IMPL +#endif + + template + VULKAN_HPP_CONSTEXPR VulkanFunctions functionsFromDispatcher(InstanceDispatcher const * instance, + DeviceDispatcher const * device) VULKAN_HPP_NOEXCEPT { + return VulkanFunctions { + instance->vkGetInstanceProcAddr, + instance->vkGetDeviceProcAddr, + instance->vkGetPhysicalDeviceProperties, + instance->vkGetPhysicalDeviceMemoryProperties, + device->vkAllocateMemory, + device->vkFreeMemory, + device->vkMapMemory, + device->vkUnmapMemory, + device->vkFlushMappedMemoryRanges, + device->vkInvalidateMappedMemoryRanges, + device->vkBindBufferMemory, + device->vkBindImageMemory, + device->vkGetBufferMemoryRequirements, + device->vkGetImageMemoryRequirements, + device->vkCreateBuffer, + device->vkDestroyBuffer, + device->vkCreateImage, + device->vkDestroyImage, + device->vkCmdCopyBuffer, + device->vkGetBufferMemoryRequirements2KHR ? device->vkGetBufferMemoryRequirements2KHR : device->vkGetBufferMemoryRequirements2, + device->vkGetImageMemoryRequirements2KHR ? device->vkGetImageMemoryRequirements2KHR : device->vkGetImageMemoryRequirements2, + device->vkBindBufferMemory2KHR ? device->vkBindBufferMemory2KHR : device->vkBindBufferMemory2, + device->vkBindImageMemory2KHR ? device->vkBindImageMemory2KHR : device->vkBindImageMemory2, + instance->vkGetPhysicalDeviceMemoryProperties2KHR ? instance->vkGetPhysicalDeviceMemoryProperties2KHR : instance->vkGetPhysicalDeviceMemoryProperties2, + device->vkGetDeviceBufferMemoryRequirements, + device->vkGetDeviceImageMemoryRequirements + }; + } + + template + VULKAN_HPP_CONSTEXPR VulkanFunctions functionsFromDispatcher(Dispatch const & dispatch + VULKAN_HPP_DEFAULT_DISPATCHER_ASSIGNMENT) VULKAN_HPP_NOEXCEPT { + return functionsFromDispatcher(&dispatch, &dispatch); + } +} + +#endif diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_enums.hpp b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_enums.hpp new file mode 100644 index 00000000..e48490cc --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_enums.hpp @@ -0,0 +1,450 @@ +#ifndef VULKAN_MEMORY_ALLOCATOR_ENUMS_HPP +#define VULKAN_MEMORY_ALLOCATOR_ENUMS_HPP + +namespace VMA_HPP_NAMESPACE { + + enum class AllocatorCreateFlagBits : VmaAllocatorCreateFlags { + eExternallySynchronized = VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT, + eKhrDedicatedAllocation = VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT, + eKhrBindMemory2 = VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT, + eExtMemoryBudget = VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT, + eAmdDeviceCoherentMemory = VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT, + eBufferDeviceAddress = VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT, + eExtMemoryPriority = VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT + }; + + VULKAN_HPP_INLINE std::string to_string(AllocatorCreateFlagBits value) { + if (value == AllocatorCreateFlagBits::eExternallySynchronized) return "ExternallySynchronized"; + if (value == AllocatorCreateFlagBits::eKhrDedicatedAllocation) return "KhrDedicatedAllocation"; + if (value == AllocatorCreateFlagBits::eKhrBindMemory2) return "KhrBindMemory2"; + if (value == AllocatorCreateFlagBits::eExtMemoryBudget) return "ExtMemoryBudget"; + if (value == AllocatorCreateFlagBits::eAmdDeviceCoherentMemory) return "AmdDeviceCoherentMemory"; + if (value == AllocatorCreateFlagBits::eBufferDeviceAddress) return "BufferDeviceAddress"; + if (value == AllocatorCreateFlagBits::eExtMemoryPriority) return "ExtMemoryPriority"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VULKAN_HPP_NAMESPACE { + template<> struct FlagTraits { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR Flags allFlags = + VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eExternallySynchronized + | VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eKhrDedicatedAllocation + | VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eKhrBindMemory2 + | VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eExtMemoryBudget + | VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eAmdDeviceCoherentMemory + | VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eBufferDeviceAddress + | VMA_HPP_NAMESPACE::AllocatorCreateFlagBits::eExtMemoryPriority; + }; +} + +namespace VMA_HPP_NAMESPACE { + + using AllocatorCreateFlags = VULKAN_HPP_NAMESPACE::Flags; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator|(AllocatorCreateFlagBits bit0, AllocatorCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return AllocatorCreateFlags(bit0) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator&(AllocatorCreateFlagBits bit0, AllocatorCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return AllocatorCreateFlags(bit0) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator^(AllocatorCreateFlagBits bit0, AllocatorCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return AllocatorCreateFlags(bit0) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocatorCreateFlags operator~(AllocatorCreateFlagBits bits) VULKAN_HPP_NOEXCEPT { + return ~(AllocatorCreateFlags(bits)); + } + + VULKAN_HPP_INLINE std::string to_string(AllocatorCreateFlags value) { + if (!value) return "{}"; + std::string result; + if (value & AllocatorCreateFlagBits::eExternallySynchronized) result += "ExternallySynchronized | "; + if (value & AllocatorCreateFlagBits::eKhrDedicatedAllocation) result += "KhrDedicatedAllocation | "; + if (value & AllocatorCreateFlagBits::eKhrBindMemory2) result += "KhrBindMemory2 | "; + if (value & AllocatorCreateFlagBits::eExtMemoryBudget) result += "ExtMemoryBudget | "; + if (value & AllocatorCreateFlagBits::eAmdDeviceCoherentMemory) result += "AmdDeviceCoherentMemory | "; + if (value & AllocatorCreateFlagBits::eBufferDeviceAddress) result += "BufferDeviceAddress | "; + if (value & AllocatorCreateFlagBits::eExtMemoryPriority) result += "ExtMemoryPriority | "; + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class MemoryUsage { + eUnknown = VMA_MEMORY_USAGE_UNKNOWN, + eGpuOnly = VMA_MEMORY_USAGE_GPU_ONLY, + eCpuOnly = VMA_MEMORY_USAGE_CPU_ONLY, + eCpuToGpu = VMA_MEMORY_USAGE_CPU_TO_GPU, + eGpuToCpu = VMA_MEMORY_USAGE_GPU_TO_CPU, + eCpuCopy = VMA_MEMORY_USAGE_CPU_COPY, + eGpuLazilyAllocated = VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED, + eAuto = VMA_MEMORY_USAGE_AUTO, + eAutoPreferDevice = VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE, + eAutoPreferHost = VMA_MEMORY_USAGE_AUTO_PREFER_HOST + }; + + VULKAN_HPP_INLINE std::string to_string(MemoryUsage value) { + if (value == MemoryUsage::eUnknown) return "Unknown"; + if (value == MemoryUsage::eGpuOnly) return "GpuOnly"; + if (value == MemoryUsage::eCpuOnly) return "CpuOnly"; + if (value == MemoryUsage::eCpuToGpu) return "CpuToGpu"; + if (value == MemoryUsage::eGpuToCpu) return "GpuToCpu"; + if (value == MemoryUsage::eCpuCopy) return "CpuCopy"; + if (value == MemoryUsage::eGpuLazilyAllocated) return "GpuLazilyAllocated"; + if (value == MemoryUsage::eAuto) return "Auto"; + if (value == MemoryUsage::eAutoPreferDevice) return "AutoPreferDevice"; + if (value == MemoryUsage::eAutoPreferHost) return "AutoPreferHost"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class AllocationCreateFlagBits : VmaAllocationCreateFlags { + eDedicatedMemory = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT, + eNeverAllocate = VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT, + eMapped = VMA_ALLOCATION_CREATE_MAPPED_BIT, + eUserDataCopyString = VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT, + eUpperAddress = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + eDontBind = VMA_ALLOCATION_CREATE_DONT_BIND_BIT, + eWithinBudget = VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT, + eCanAlias = VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT, + eHostAccessSequentialWrite = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT, + eHostAccessRandom = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + eHostAccessAllowTransferInstead = VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT, + eStrategyMinMemory = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + eStrategyMinTime = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + eStrategyMinOffset = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + eStrategyBestFit = VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT, + eStrategyFirstFit = VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT + }; + + VULKAN_HPP_INLINE std::string to_string(AllocationCreateFlagBits value) { + if (value == AllocationCreateFlagBits::eDedicatedMemory) return "DedicatedMemory"; + if (value == AllocationCreateFlagBits::eNeverAllocate) return "NeverAllocate"; + if (value == AllocationCreateFlagBits::eMapped) return "Mapped"; + if (value == AllocationCreateFlagBits::eUserDataCopyString) return "UserDataCopyString"; + if (value == AllocationCreateFlagBits::eUpperAddress) return "UpperAddress"; + if (value == AllocationCreateFlagBits::eDontBind) return "DontBind"; + if (value == AllocationCreateFlagBits::eWithinBudget) return "WithinBudget"; + if (value == AllocationCreateFlagBits::eCanAlias) return "CanAlias"; + if (value == AllocationCreateFlagBits::eHostAccessSequentialWrite) return "HostAccessSequentialWrite"; + if (value == AllocationCreateFlagBits::eHostAccessRandom) return "HostAccessRandom"; + if (value == AllocationCreateFlagBits::eHostAccessAllowTransferInstead) return "HostAccessAllowTransferInstead"; + if (value == AllocationCreateFlagBits::eStrategyMinMemory) return "StrategyMinMemory"; + if (value == AllocationCreateFlagBits::eStrategyMinTime) return "StrategyMinTime"; + if (value == AllocationCreateFlagBits::eStrategyMinOffset) return "StrategyMinOffset"; + if (value == AllocationCreateFlagBits::eStrategyBestFit) return "StrategyBestFit"; + if (value == AllocationCreateFlagBits::eStrategyFirstFit) return "StrategyFirstFit"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VULKAN_HPP_NAMESPACE { + template<> struct FlagTraits { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR Flags allFlags = + VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eDedicatedMemory + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eNeverAllocate + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eMapped + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eUserDataCopyString + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eUpperAddress + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eDontBind + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eWithinBudget + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eCanAlias + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eHostAccessSequentialWrite + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eHostAccessRandom + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eHostAccessAllowTransferInstead + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyMinMemory + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyMinTime + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyMinOffset + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyBestFit + | VMA_HPP_NAMESPACE::AllocationCreateFlagBits::eStrategyFirstFit; + }; +} + +namespace VMA_HPP_NAMESPACE { + + using AllocationCreateFlags = VULKAN_HPP_NAMESPACE::Flags; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator|(AllocationCreateFlagBits bit0, AllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return AllocationCreateFlags(bit0) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator&(AllocationCreateFlagBits bit0, AllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return AllocationCreateFlags(bit0) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator^(AllocationCreateFlagBits bit0, AllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return AllocationCreateFlags(bit0) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR AllocationCreateFlags operator~(AllocationCreateFlagBits bits) VULKAN_HPP_NOEXCEPT { + return ~(AllocationCreateFlags(bits)); + } + + VULKAN_HPP_INLINE std::string to_string(AllocationCreateFlags value) { + if (!value) return "{}"; + std::string result; + if (value & AllocationCreateFlagBits::eDedicatedMemory) result += "DedicatedMemory | "; + if (value & AllocationCreateFlagBits::eNeverAllocate) result += "NeverAllocate | "; + if (value & AllocationCreateFlagBits::eMapped) result += "Mapped | "; + if (value & AllocationCreateFlagBits::eUserDataCopyString) result += "UserDataCopyString | "; + if (value & AllocationCreateFlagBits::eUpperAddress) result += "UpperAddress | "; + if (value & AllocationCreateFlagBits::eDontBind) result += "DontBind | "; + if (value & AllocationCreateFlagBits::eWithinBudget) result += "WithinBudget | "; + if (value & AllocationCreateFlagBits::eCanAlias) result += "CanAlias | "; + if (value & AllocationCreateFlagBits::eHostAccessSequentialWrite) result += "HostAccessSequentialWrite | "; + if (value & AllocationCreateFlagBits::eHostAccessRandom) result += "HostAccessRandom | "; + if (value & AllocationCreateFlagBits::eHostAccessAllowTransferInstead) result += "HostAccessAllowTransferInstead | "; + if (value & AllocationCreateFlagBits::eStrategyMinMemory) result += "StrategyMinMemory | "; + if (value & AllocationCreateFlagBits::eStrategyMinTime) result += "StrategyMinTime | "; + if (value & AllocationCreateFlagBits::eStrategyMinOffset) result += "StrategyMinOffset | "; + if (value & AllocationCreateFlagBits::eStrategyBestFit) result += "StrategyBestFit | "; + if (value & AllocationCreateFlagBits::eStrategyFirstFit) result += "StrategyFirstFit | "; + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class PoolCreateFlagBits : VmaPoolCreateFlags { + eIgnoreBufferImageGranularity = VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT, + eLinearAlgorithm = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT + }; + + VULKAN_HPP_INLINE std::string to_string(PoolCreateFlagBits value) { + if (value == PoolCreateFlagBits::eIgnoreBufferImageGranularity) return "IgnoreBufferImageGranularity"; + if (value == PoolCreateFlagBits::eLinearAlgorithm) return "LinearAlgorithm"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VULKAN_HPP_NAMESPACE { + template<> struct FlagTraits { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR Flags allFlags = + VMA_HPP_NAMESPACE::PoolCreateFlagBits::eIgnoreBufferImageGranularity + | VMA_HPP_NAMESPACE::PoolCreateFlagBits::eLinearAlgorithm; + }; +} + +namespace VMA_HPP_NAMESPACE { + + using PoolCreateFlags = VULKAN_HPP_NAMESPACE::Flags; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator|(PoolCreateFlagBits bit0, PoolCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return PoolCreateFlags(bit0) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator&(PoolCreateFlagBits bit0, PoolCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return PoolCreateFlags(bit0) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator^(PoolCreateFlagBits bit0, PoolCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return PoolCreateFlags(bit0) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR PoolCreateFlags operator~(PoolCreateFlagBits bits) VULKAN_HPP_NOEXCEPT { + return ~(PoolCreateFlags(bits)); + } + + VULKAN_HPP_INLINE std::string to_string(PoolCreateFlags value) { + if (!value) return "{}"; + std::string result; + if (value & PoolCreateFlagBits::eIgnoreBufferImageGranularity) result += "IgnoreBufferImageGranularity | "; + if (value & PoolCreateFlagBits::eLinearAlgorithm) result += "LinearAlgorithm | "; + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class DefragmentationFlagBits : VmaDefragmentationFlags { + eFlagAlgorithmFast = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT, + eFlagAlgorithmBalanced = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT, + eFlagAlgorithmFull = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT, + eFlagAlgorithmExtensive = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT + }; + + VULKAN_HPP_INLINE std::string to_string(DefragmentationFlagBits value) { + if (value == DefragmentationFlagBits::eFlagAlgorithmFast) return "FlagAlgorithmFast"; + if (value == DefragmentationFlagBits::eFlagAlgorithmBalanced) return "FlagAlgorithmBalanced"; + if (value == DefragmentationFlagBits::eFlagAlgorithmFull) return "FlagAlgorithmFull"; + if (value == DefragmentationFlagBits::eFlagAlgorithmExtensive) return "FlagAlgorithmExtensive"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VULKAN_HPP_NAMESPACE { + template<> struct FlagTraits { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR Flags allFlags = + VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmFast + | VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmBalanced + | VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmFull + | VMA_HPP_NAMESPACE::DefragmentationFlagBits::eFlagAlgorithmExtensive; + }; +} + +namespace VMA_HPP_NAMESPACE { + + using DefragmentationFlags = VULKAN_HPP_NAMESPACE::Flags; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator|(DefragmentationFlagBits bit0, DefragmentationFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return DefragmentationFlags(bit0) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator&(DefragmentationFlagBits bit0, DefragmentationFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return DefragmentationFlags(bit0) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator^(DefragmentationFlagBits bit0, DefragmentationFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return DefragmentationFlags(bit0) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR DefragmentationFlags operator~(DefragmentationFlagBits bits) VULKAN_HPP_NOEXCEPT { + return ~(DefragmentationFlags(bits)); + } + + VULKAN_HPP_INLINE std::string to_string(DefragmentationFlags value) { + if (!value) return "{}"; + std::string result; + if (value & DefragmentationFlagBits::eFlagAlgorithmFast) result += "FlagAlgorithmFast | "; + if (value & DefragmentationFlagBits::eFlagAlgorithmBalanced) result += "FlagAlgorithmBalanced | "; + if (value & DefragmentationFlagBits::eFlagAlgorithmFull) result += "FlagAlgorithmFull | "; + if (value & DefragmentationFlagBits::eFlagAlgorithmExtensive) result += "FlagAlgorithmExtensive | "; + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class DefragmentationMoveOperation { + eCopy = VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY, + eIgnore = VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE, + eDestroy = VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + }; + + VULKAN_HPP_INLINE std::string to_string(DefragmentationMoveOperation value) { + if (value == DefragmentationMoveOperation::eCopy) return "Copy"; + if (value == DefragmentationMoveOperation::eIgnore) return "Ignore"; + if (value == DefragmentationMoveOperation::eDestroy) return "Destroy"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class VirtualBlockCreateFlagBits : VmaVirtualBlockCreateFlags { + eLinearAlgorithm = VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT + }; + + VULKAN_HPP_INLINE std::string to_string(VirtualBlockCreateFlagBits value) { + if (value == VirtualBlockCreateFlagBits::eLinearAlgorithm) return "LinearAlgorithm"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VULKAN_HPP_NAMESPACE { + template<> struct FlagTraits { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR Flags allFlags = + VMA_HPP_NAMESPACE::VirtualBlockCreateFlagBits::eLinearAlgorithm; + }; +} + +namespace VMA_HPP_NAMESPACE { + + using VirtualBlockCreateFlags = VULKAN_HPP_NAMESPACE::Flags; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator|(VirtualBlockCreateFlagBits bit0, VirtualBlockCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return VirtualBlockCreateFlags(bit0) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator&(VirtualBlockCreateFlagBits bit0, VirtualBlockCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return VirtualBlockCreateFlags(bit0) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator^(VirtualBlockCreateFlagBits bit0, VirtualBlockCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return VirtualBlockCreateFlags(bit0) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualBlockCreateFlags operator~(VirtualBlockCreateFlagBits bits) VULKAN_HPP_NOEXCEPT { + return ~(VirtualBlockCreateFlags(bits)); + } + + VULKAN_HPP_INLINE std::string to_string(VirtualBlockCreateFlags value) { + if (!value) return "{}"; + std::string result; + if (value & VirtualBlockCreateFlagBits::eLinearAlgorithm) result += "LinearAlgorithm | "; + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } +} + +namespace VMA_HPP_NAMESPACE { + + enum class VirtualAllocationCreateFlagBits : VmaVirtualAllocationCreateFlags { + eUpperAddress = VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + eStrategyMinMemory = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + eStrategyMinTime = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + eStrategyMinOffset = VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT + }; + + VULKAN_HPP_INLINE std::string to_string(VirtualAllocationCreateFlagBits value) { + if (value == VirtualAllocationCreateFlagBits::eUpperAddress) return "UpperAddress"; + if (value == VirtualAllocationCreateFlagBits::eStrategyMinMemory) return "StrategyMinMemory"; + if (value == VirtualAllocationCreateFlagBits::eStrategyMinTime) return "StrategyMinTime"; + if (value == VirtualAllocationCreateFlagBits::eStrategyMinOffset) return "StrategyMinOffset"; + return "invalid ( " + VULKAN_HPP_NAMESPACE::toHexString(static_cast(value)) + " )"; + } +} + +namespace VULKAN_HPP_NAMESPACE { + template<> struct FlagTraits { + static VULKAN_HPP_CONST_OR_CONSTEXPR bool isBitmask = true; + static VULKAN_HPP_CONST_OR_CONSTEXPR Flags allFlags = + VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eUpperAddress + | VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eStrategyMinMemory + | VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eStrategyMinTime + | VMA_HPP_NAMESPACE::VirtualAllocationCreateFlagBits::eStrategyMinOffset; + }; +} + +namespace VMA_HPP_NAMESPACE { + + using VirtualAllocationCreateFlags = VULKAN_HPP_NAMESPACE::Flags; + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator|(VirtualAllocationCreateFlagBits bit0, VirtualAllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return VirtualAllocationCreateFlags(bit0) | bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator&(VirtualAllocationCreateFlagBits bit0, VirtualAllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return VirtualAllocationCreateFlags(bit0) & bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator^(VirtualAllocationCreateFlagBits bit0, VirtualAllocationCreateFlagBits bit1) VULKAN_HPP_NOEXCEPT { + return VirtualAllocationCreateFlags(bit0) ^ bit1; + } + + VULKAN_HPP_INLINE VULKAN_HPP_CONSTEXPR VirtualAllocationCreateFlags operator~(VirtualAllocationCreateFlagBits bits) VULKAN_HPP_NOEXCEPT { + return ~(VirtualAllocationCreateFlags(bits)); + } + + VULKAN_HPP_INLINE std::string to_string(VirtualAllocationCreateFlags value) { + if (!value) return "{}"; + std::string result; + if (value & VirtualAllocationCreateFlagBits::eUpperAddress) result += "UpperAddress | "; + if (value & VirtualAllocationCreateFlagBits::eStrategyMinMemory) result += "StrategyMinMemory | "; + if (value & VirtualAllocationCreateFlagBits::eStrategyMinTime) result += "StrategyMinTime | "; + if (value & VirtualAllocationCreateFlagBits::eStrategyMinOffset) result += "StrategyMinOffset | "; + return "{ " + result.substr( 0, result.size() - 3 ) + " }"; + } +} + +#endif diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_funcs.hpp b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_funcs.hpp new file mode 100644 index 00000000..e3f74f8c --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_funcs.hpp @@ -0,0 +1,1049 @@ +#ifndef VULKAN_MEMORY_ALLOCATOR_FUNCS_HPP +#define VULKAN_MEMORY_ALLOCATOR_FUNCS_HPP + +namespace VMA_HPP_NAMESPACE { + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::destroy() const { + vmaDestroyAllocator(m_allocator); + } +#else + VULKAN_HPP_INLINE void Allocator::destroy() const { + vmaDestroyAllocator(m_allocator); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE AllocatorInfo Allocator::getAllocatorInfo() const { + AllocatorInfo allocatorInfo; + vmaGetAllocatorInfo(m_allocator, reinterpret_cast(&allocatorInfo)); + return allocatorInfo; + } +#endif + VULKAN_HPP_INLINE void Allocator::getAllocatorInfo(AllocatorInfo* allocatorInfo) const { + vmaGetAllocatorInfo(m_allocator, reinterpret_cast(allocatorInfo)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* Allocator::getPhysicalDeviceProperties() const { + const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* physicalDeviceProperties; + vmaGetPhysicalDeviceProperties(m_allocator, reinterpret_cast(&physicalDeviceProperties)); + return physicalDeviceProperties; + } +#endif + VULKAN_HPP_INLINE void Allocator::getPhysicalDeviceProperties(const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties** physicalDeviceProperties) const { + vmaGetPhysicalDeviceProperties(m_allocator, reinterpret_cast(physicalDeviceProperties)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* Allocator::getMemoryProperties() const { + const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* physicalDeviceMemoryProperties; + vmaGetMemoryProperties(m_allocator, reinterpret_cast(&physicalDeviceMemoryProperties)); + return physicalDeviceMemoryProperties; + } +#endif + VULKAN_HPP_INLINE void Allocator::getMemoryProperties(const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties** physicalDeviceMemoryProperties) const { + vmaGetMemoryProperties(m_allocator, reinterpret_cast(physicalDeviceMemoryProperties)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryPropertyFlags Allocator::getMemoryTypeProperties(uint32_t memoryTypeIndex) const { + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags flags; + vmaGetMemoryTypeProperties(m_allocator, memoryTypeIndex, reinterpret_cast(&flags)); + return flags; + } +#endif + VULKAN_HPP_INLINE void Allocator::getMemoryTypeProperties(uint32_t memoryTypeIndex, + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags* flags) const { + vmaGetMemoryTypeProperties(m_allocator, memoryTypeIndex, reinterpret_cast(flags)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::setCurrentFrameIndex(uint32_t frameIndex) const { + vmaSetCurrentFrameIndex(m_allocator, frameIndex); + } +#else + VULKAN_HPP_INLINE void Allocator::setCurrentFrameIndex(uint32_t frameIndex) const { + vmaSetCurrentFrameIndex(m_allocator, frameIndex); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE TotalStatistics Allocator::calculateStatistics() const { + TotalStatistics stats; + vmaCalculateStatistics(m_allocator, reinterpret_cast(&stats)); + return stats; + } +#endif + VULKAN_HPP_INLINE void Allocator::calculateStatistics(TotalStatistics* stats) const { + vmaCalculateStatistics(m_allocator, reinterpret_cast(stats)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template::value, int>::type> + VULKAN_HPP_INLINE std::vector Allocator::getHeapBudgets(VectorAllocator& vectorAllocator) const { + std::vector budgets(getMemoryProperties()->memoryHeapCount, vectorAllocator); + vmaGetHeapBudgets(m_allocator, reinterpret_cast(budgets.data())); + return budgets; + } + + template + VULKAN_HPP_INLINE std::vector Allocator::getHeapBudgets() const { + std::vector budgets(getMemoryProperties()->memoryHeapCount); + vmaGetHeapBudgets(m_allocator, reinterpret_cast(budgets.data())); + return budgets; + } +#endif + VULKAN_HPP_INLINE void Allocator::getHeapBudgets(Budget* budgets) const { + vmaGetHeapBudgets(m_allocator, reinterpret_cast(budgets)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::findMemoryTypeIndex(uint32_t memoryTypeBits, + const AllocationCreateInfo& allocationCreateInfo) const { + uint32_t memoryTypeIndex; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFindMemoryTypeIndex(m_allocator, memoryTypeBits, reinterpret_cast(&allocationCreateInfo), &memoryTypeIndex) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::findMemoryTypeIndex"); + return createResultValueType(result, memoryTypeIndex); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::findMemoryTypeIndex(uint32_t memoryTypeBits, + const AllocationCreateInfo* allocationCreateInfo, + uint32_t* memoryTypeIndex) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFindMemoryTypeIndex(m_allocator, memoryTypeBits, reinterpret_cast(allocationCreateInfo), memoryTypeIndex) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::findMemoryTypeIndexForBufferInfo(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo) const { + uint32_t memoryTypeIndex; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFindMemoryTypeIndexForBufferInfo(m_allocator, reinterpret_cast(&bufferCreateInfo), reinterpret_cast(&allocationCreateInfo), &memoryTypeIndex) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::findMemoryTypeIndexForBufferInfo"); + return createResultValueType(result, memoryTypeIndex); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::findMemoryTypeIndexForBufferInfo(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + uint32_t* memoryTypeIndex) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFindMemoryTypeIndexForBufferInfo(m_allocator, reinterpret_cast(bufferCreateInfo), reinterpret_cast(allocationCreateInfo), memoryTypeIndex) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::findMemoryTypeIndexForImageInfo(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo, + const AllocationCreateInfo& allocationCreateInfo) const { + uint32_t memoryTypeIndex; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFindMemoryTypeIndexForImageInfo(m_allocator, reinterpret_cast(&imageCreateInfo), reinterpret_cast(&allocationCreateInfo), &memoryTypeIndex) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::findMemoryTypeIndexForImageInfo"); + return createResultValueType(result, memoryTypeIndex); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::findMemoryTypeIndexForImageInfo(const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + uint32_t* memoryTypeIndex) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFindMemoryTypeIndexForImageInfo(m_allocator, reinterpret_cast(imageCreateInfo), reinterpret_cast(allocationCreateInfo), memoryTypeIndex) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::createPool(const PoolCreateInfo& createInfo) const { + Pool pool; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreatePool(m_allocator, reinterpret_cast(&createInfo), reinterpret_cast(&pool)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createPool"); + return createResultValueType(result, pool); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::createPoolUnique(const PoolCreateInfo& createInfo) const { + Pool pool; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreatePool(m_allocator, reinterpret_cast(&createInfo), reinterpret_cast(&pool)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createPool"); + return createResultValueType(result, createUniqueHandle(pool, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::createPool(const PoolCreateInfo* createInfo, + Pool* pool) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreatePool(m_allocator, reinterpret_cast(createInfo), reinterpret_cast(pool)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::destroyPool(Pool pool) const { + vmaDestroyPool(m_allocator, static_cast(pool)); + } +#else + VULKAN_HPP_INLINE void Allocator::destroyPool(Pool pool) const { + vmaDestroyPool(m_allocator, static_cast(pool)); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE Statistics Allocator::getPoolStatistics(Pool pool) const { + Statistics poolStats; + vmaGetPoolStatistics(m_allocator, static_cast(pool), reinterpret_cast(&poolStats)); + return poolStats; + } +#endif + VULKAN_HPP_INLINE void Allocator::getPoolStatistics(Pool pool, + Statistics* poolStats) const { + vmaGetPoolStatistics(m_allocator, static_cast(pool), reinterpret_cast(poolStats)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE DetailedStatistics Allocator::calculatePoolStatistics(Pool pool) const { + DetailedStatistics poolStats; + vmaCalculatePoolStatistics(m_allocator, static_cast(pool), reinterpret_cast(&poolStats)); + return poolStats; + } +#endif + VULKAN_HPP_INLINE void Allocator::calculatePoolStatistics(Pool pool, + DetailedStatistics* poolStats) const { + vmaCalculatePoolStatistics(m_allocator, static_cast(pool), reinterpret_cast(poolStats)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::checkPoolCorruption(Pool pool) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCheckPoolCorruption(m_allocator, static_cast(pool)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::checkPoolCorruption"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::checkPoolCorruption(Pool pool) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCheckPoolCorruption(m_allocator, static_cast(pool)) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE const char* Allocator::getPoolName(Pool pool) const { + const char* name; + vmaGetPoolName(m_allocator, static_cast(pool), &name); + return name; + } +#endif + VULKAN_HPP_INLINE void Allocator::getPoolName(Pool pool, + const char** name) const { + vmaGetPoolName(m_allocator, static_cast(pool), name); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::setPoolName(Pool pool, + const char* name) const { + vmaSetPoolName(m_allocator, static_cast(pool), name); + } +#else + VULKAN_HPP_INLINE void Allocator::setPoolName(Pool pool, + const char* name) const { + vmaSetPoolName(m_allocator, static_cast(pool), name); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::allocateMemory(const VULKAN_HPP_NAMESPACE::MemoryRequirements& vkMemoryRequirements, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + Allocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemory(m_allocator, reinterpret_cast(&vkMemoryRequirements), reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemory"); + return createResultValueType(result, allocation); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::allocateMemoryUnique(const VULKAN_HPP_NAMESPACE::MemoryRequirements& vkMemoryRequirements, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + Allocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemory(m_allocator, reinterpret_cast(&vkMemoryRequirements), reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemory"); + return createResultValueType(result, createUniqueHandle(allocation, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::allocateMemory(const VULKAN_HPP_NAMESPACE::MemoryRequirements* vkMemoryRequirements, + const AllocationCreateInfo* createInfo, + Allocation* allocation, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemory(m_allocator, reinterpret_cast(vkMemoryRequirements), reinterpret_cast(createInfo), reinterpret_cast(allocation), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template::value, int>::type> + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::allocateMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo, + VectorAllocator& vectorAllocator) const { + size_t allocationCount = vkMemoryRequirements.size(); + std::vector allocations(allocationCount, vectorAllocator); + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryPages(m_allocator, reinterpret_cast(vkMemoryRequirements.data()), reinterpret_cast(createInfo.data()), allocationCount, reinterpret_cast(allocations.data()), reinterpret_cast(allocationInfo.data())) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryPages"); + return createResultValueType(result, allocations); + } + + template + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::allocateMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo) const { + size_t allocationCount = vkMemoryRequirements.size(); + std::vector allocations(allocationCount); + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryPages(m_allocator, reinterpret_cast(vkMemoryRequirements.data()), reinterpret_cast(createInfo.data()), allocationCount, reinterpret_cast(allocations.data()), reinterpret_cast(allocationInfo.data())) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryPages"); + return createResultValueType(result, allocations); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template::value, int>::type> + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::allocateMemoryPagesUnique(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo, + VectorAllocator& vectorAllocator) const { + size_t allocationCount = vkMemoryRequirements.size(); + std::vector allocations(allocationCount); + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryPages(m_allocator, reinterpret_cast(vkMemoryRequirements.data()), reinterpret_cast(createInfo.data()), allocationCount, reinterpret_cast(allocations.data()), reinterpret_cast(allocationInfo.data())) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryPages"); + return createResultValueType(result, createUniqueHandleVector(allocations, this, vectorAllocator)); + } + + template + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::allocateMemoryPagesUnique(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo) const { + size_t allocationCount = vkMemoryRequirements.size(); + std::vector allocations(allocationCount); + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryPages(m_allocator, reinterpret_cast(vkMemoryRequirements.data()), reinterpret_cast(createInfo.data()), allocationCount, reinterpret_cast(allocations.data()), reinterpret_cast(allocationInfo.data())) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryPages"); + return createResultValueType(result, createUniqueHandleVector(allocations, this, VectorAllocator())); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::allocateMemoryPages(const VULKAN_HPP_NAMESPACE::MemoryRequirements* vkMemoryRequirements, + const AllocationCreateInfo* createInfo, + size_t allocationCount, + Allocation* allocations, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryPages(m_allocator, reinterpret_cast(vkMemoryRequirements), reinterpret_cast(createInfo), allocationCount, reinterpret_cast(allocations), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::allocateMemoryForBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + Allocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryForBuffer(m_allocator, static_cast(buffer), reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryForBuffer"); + return createResultValueType(result, allocation); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::allocateMemoryForBufferUnique(VULKAN_HPP_NAMESPACE::Buffer buffer, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + Allocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryForBuffer(m_allocator, static_cast(buffer), reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryForBuffer"); + return createResultValueType(result, createUniqueHandle(allocation, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::allocateMemoryForBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + const AllocationCreateInfo* createInfo, + Allocation* allocation, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryForBuffer(m_allocator, static_cast(buffer), reinterpret_cast(createInfo), reinterpret_cast(allocation), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::allocateMemoryForImage(VULKAN_HPP_NAMESPACE::Image image, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + Allocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryForImage(m_allocator, static_cast(image), reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryForImage"); + return createResultValueType(result, allocation); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::allocateMemoryForImageUnique(VULKAN_HPP_NAMESPACE::Image image, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + Allocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryForImage(m_allocator, static_cast(image), reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::allocateMemoryForImage"); + return createResultValueType(result, createUniqueHandle(allocation, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::allocateMemoryForImage(VULKAN_HPP_NAMESPACE::Image image, + const AllocationCreateInfo* createInfo, + Allocation* allocation, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaAllocateMemoryForImage(m_allocator, static_cast(image), reinterpret_cast(createInfo), reinterpret_cast(allocation), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::freeMemory(const Allocation allocation) const { + vmaFreeMemory(m_allocator, static_cast(allocation)); + } +#else + VULKAN_HPP_INLINE void Allocator::freeMemory(const Allocation allocation) const { + vmaFreeMemory(m_allocator, static_cast(allocation)); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::freeMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy allocations) const { + size_t allocationCount = allocations.size(); + vmaFreeMemoryPages(m_allocator, allocationCount, reinterpret_cast(allocations.data())); + } +#endif + VULKAN_HPP_INLINE void Allocator::freeMemoryPages(size_t allocationCount, + const Allocation* allocations) const { + vmaFreeMemoryPages(m_allocator, allocationCount, reinterpret_cast(allocations)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE AllocationInfo Allocator::getAllocationInfo(Allocation allocation) const { + AllocationInfo allocationInfo; + vmaGetAllocationInfo(m_allocator, static_cast(allocation), reinterpret_cast(&allocationInfo)); + return allocationInfo; + } +#endif + VULKAN_HPP_INLINE void Allocator::getAllocationInfo(Allocation allocation, + AllocationInfo* allocationInfo) const { + vmaGetAllocationInfo(m_allocator, static_cast(allocation), reinterpret_cast(allocationInfo)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::setAllocationUserData(Allocation allocation, + void* userData) const { + vmaSetAllocationUserData(m_allocator, static_cast(allocation), userData); + } +#else + VULKAN_HPP_INLINE void Allocator::setAllocationUserData(Allocation allocation, + void* userData) const { + vmaSetAllocationUserData(m_allocator, static_cast(allocation), userData); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::setAllocationName(Allocation allocation, + const char* name) const { + vmaSetAllocationName(m_allocator, static_cast(allocation), name); + } +#else + VULKAN_HPP_INLINE void Allocator::setAllocationName(Allocation allocation, + const char* name) const { + vmaSetAllocationName(m_allocator, static_cast(allocation), name); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::MemoryPropertyFlags Allocator::getAllocationMemoryProperties(Allocation allocation) const { + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags flags; + vmaGetAllocationMemoryProperties(m_allocator, static_cast(allocation), reinterpret_cast(&flags)); + return flags; + } +#endif + VULKAN_HPP_INLINE void Allocator::getAllocationMemoryProperties(Allocation allocation, + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags* flags) const { + vmaGetAllocationMemoryProperties(m_allocator, static_cast(allocation), reinterpret_cast(flags)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::mapMemory(Allocation allocation) const { + void* data; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaMapMemory(m_allocator, static_cast(allocation), &data) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::mapMemory"); + return createResultValueType(result, data); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::mapMemory(Allocation allocation, + void** data) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaMapMemory(m_allocator, static_cast(allocation), data) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::unmapMemory(Allocation allocation) const { + vmaUnmapMemory(m_allocator, static_cast(allocation)); + } +#else + VULKAN_HPP_INLINE void Allocator::unmapMemory(Allocation allocation) const { + vmaUnmapMemory(m_allocator, static_cast(allocation)); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::flushAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFlushAllocation(m_allocator, static_cast(allocation), static_cast(offset), static_cast(size)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::flushAllocation"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::flushAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFlushAllocation(m_allocator, static_cast(allocation), static_cast(offset), static_cast(size)) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::invalidateAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaInvalidateAllocation(m_allocator, static_cast(allocation), static_cast(offset), static_cast(size)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::invalidateAllocation"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::invalidateAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaInvalidateAllocation(m_allocator, static_cast(allocation), static_cast(offset), static_cast(size)) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::flushAllocations(VULKAN_HPP_NAMESPACE::ArrayProxy allocations, + VULKAN_HPP_NAMESPACE::ArrayProxy offsets, + VULKAN_HPP_NAMESPACE::ArrayProxy sizes) const { + uint32_t allocationCount = allocations.size(); + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFlushAllocations(m_allocator, allocationCount, reinterpret_cast(allocations.data()), reinterpret_cast(offsets.data()), reinterpret_cast(sizes.data())) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::flushAllocations"); + return createResultValueType(result); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::flushAllocations(uint32_t allocationCount, + const Allocation* allocations, + const VULKAN_HPP_NAMESPACE::DeviceSize* offsets, + const VULKAN_HPP_NAMESPACE::DeviceSize* sizes) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaFlushAllocations(m_allocator, allocationCount, reinterpret_cast(allocations), reinterpret_cast(offsets), reinterpret_cast(sizes)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::invalidateAllocations(VULKAN_HPP_NAMESPACE::ArrayProxy allocations, + VULKAN_HPP_NAMESPACE::ArrayProxy offsets, + VULKAN_HPP_NAMESPACE::ArrayProxy sizes) const { + uint32_t allocationCount = allocations.size(); + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaInvalidateAllocations(m_allocator, allocationCount, reinterpret_cast(allocations.data()), reinterpret_cast(offsets.data()), reinterpret_cast(sizes.data())) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::invalidateAllocations"); + return createResultValueType(result); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::invalidateAllocations(uint32_t allocationCount, + const Allocation* allocations, + const VULKAN_HPP_NAMESPACE::DeviceSize* offsets, + const VULKAN_HPP_NAMESPACE::DeviceSize* sizes) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaInvalidateAllocations(m_allocator, allocationCount, reinterpret_cast(allocations), reinterpret_cast(offsets), reinterpret_cast(sizes)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::checkCorruption(uint32_t memoryTypeBits) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCheckCorruption(m_allocator, memoryTypeBits) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::checkCorruption"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::checkCorruption(uint32_t memoryTypeBits) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCheckCorruption(m_allocator, memoryTypeBits) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::beginDefragmentation(const DefragmentationInfo& info) const { + DefragmentationContext context; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBeginDefragmentation(m_allocator, reinterpret_cast(&info), reinterpret_cast(&context)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::beginDefragmentation"); + return createResultValueType(result, context); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::beginDefragmentation(const DefragmentationInfo* info, + DefragmentationContext* context) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBeginDefragmentation(m_allocator, reinterpret_cast(info), reinterpret_cast(context)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::endDefragmentation(DefragmentationContext context, + VULKAN_HPP_NAMESPACE::Optional stats) const { + vmaEndDefragmentation(m_allocator, static_cast(context), reinterpret_cast(static_cast(stats))); + } +#endif + VULKAN_HPP_INLINE void Allocator::endDefragmentation(DefragmentationContext context, + DefragmentationStats* stats) const { + vmaEndDefragmentation(m_allocator, static_cast(context), reinterpret_cast(stats)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::beginDefragmentationPass(DefragmentationContext context) const { + DefragmentationPassMoveInfo passInfo; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBeginDefragmentationPass(m_allocator, static_cast(context), reinterpret_cast(&passInfo)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::beginDefragmentationPass"); + return createResultValueType(result, passInfo); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::beginDefragmentationPass(DefragmentationContext context, + DefragmentationPassMoveInfo* passInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBeginDefragmentationPass(m_allocator, static_cast(context), reinterpret_cast(passInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::endDefragmentationPass(DefragmentationContext context) const { + DefragmentationPassMoveInfo passInfo; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaEndDefragmentationPass(m_allocator, static_cast(context), reinterpret_cast(&passInfo)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::endDefragmentationPass"); + return createResultValueType(result, passInfo); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::endDefragmentationPass(DefragmentationContext context, + DefragmentationPassMoveInfo* passInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaEndDefragmentationPass(m_allocator, static_cast(context), reinterpret_cast(passInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::bindBufferMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Buffer buffer) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindBufferMemory(m_allocator, static_cast(allocation), static_cast(buffer)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::bindBufferMemory"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::bindBufferMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Buffer buffer) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindBufferMemory(m_allocator, static_cast(allocation), static_cast(buffer)) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::bindBufferMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Buffer buffer, + const void* next) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindBufferMemory2(m_allocator, static_cast(allocation), static_cast(allocationLocalOffset), static_cast(buffer), next) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::bindBufferMemory2"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::bindBufferMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Buffer buffer, + const void* next) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindBufferMemory2(m_allocator, static_cast(allocation), static_cast(allocationLocalOffset), static_cast(buffer), next) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::bindImageMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Image image) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindImageMemory(m_allocator, static_cast(allocation), static_cast(image)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::bindImageMemory"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::bindImageMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Image image) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindImageMemory(m_allocator, static_cast(allocation), static_cast(image)) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::bindImageMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Image image, + const void* next) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindImageMemory2(m_allocator, static_cast(allocation), static_cast(allocationLocalOffset), static_cast(image), next) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::bindImageMemory2"); + return createResultValueType(result); + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::bindImageMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Image image, + const void* next) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaBindImageMemory2(m_allocator, static_cast(allocation), static_cast(allocationLocalOffset), static_cast(image), next) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::createBuffer(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + std::pair pair; + VULKAN_HPP_NAMESPACE::Buffer& buffer = pair.first; + Allocation& allocation = pair.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateBuffer(m_allocator, reinterpret_cast(&bufferCreateInfo), reinterpret_cast(&allocationCreateInfo), reinterpret_cast(&buffer), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createBuffer"); + return createResultValueType(result, pair); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::createBufferUnique(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + std::pair pair; + VULKAN_HPP_NAMESPACE::Buffer& buffer = pair.first; + Allocation& allocation = pair.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateBuffer(m_allocator, reinterpret_cast(&bufferCreateInfo), reinterpret_cast(&allocationCreateInfo), reinterpret_cast(&buffer), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createBuffer"); + return createResultValueType(result, createUniqueHandle(pair, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::createBuffer(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Buffer* buffer, + Allocation* allocation, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateBuffer(m_allocator, reinterpret_cast(bufferCreateInfo), reinterpret_cast(allocationCreateInfo), reinterpret_cast(buffer), reinterpret_cast(allocation), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::createBufferWithAlignment(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::DeviceSize minAlignment, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + std::pair pair; + VULKAN_HPP_NAMESPACE::Buffer& buffer = pair.first; + Allocation& allocation = pair.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateBufferWithAlignment(m_allocator, reinterpret_cast(&bufferCreateInfo), reinterpret_cast(&allocationCreateInfo), static_cast(minAlignment), reinterpret_cast(&buffer), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createBufferWithAlignment"); + return createResultValueType(result, pair); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::createBufferWithAlignmentUnique(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::DeviceSize minAlignment, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + std::pair pair; + VULKAN_HPP_NAMESPACE::Buffer& buffer = pair.first; + Allocation& allocation = pair.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateBufferWithAlignment(m_allocator, reinterpret_cast(&bufferCreateInfo), reinterpret_cast(&allocationCreateInfo), static_cast(minAlignment), reinterpret_cast(&buffer), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createBufferWithAlignment"); + return createResultValueType(result, createUniqueHandle(pair, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::createBufferWithAlignment(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + VULKAN_HPP_NAMESPACE::DeviceSize minAlignment, + VULKAN_HPP_NAMESPACE::Buffer* buffer, + Allocation* allocation, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateBufferWithAlignment(m_allocator, reinterpret_cast(bufferCreateInfo), reinterpret_cast(allocationCreateInfo), static_cast(minAlignment), reinterpret_cast(buffer), reinterpret_cast(allocation), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::createAliasingBuffer(Allocation allocation, + const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo) const { + VULKAN_HPP_NAMESPACE::Buffer buffer; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAliasingBuffer(m_allocator, static_cast(allocation), reinterpret_cast(&bufferCreateInfo), reinterpret_cast(&buffer)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createAliasingBuffer"); + return createResultValueType(result, buffer); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::createAliasingBuffer(Allocation allocation, + const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + VULKAN_HPP_NAMESPACE::Buffer* buffer) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAliasingBuffer(m_allocator, static_cast(allocation), reinterpret_cast(bufferCreateInfo), reinterpret_cast(buffer)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::destroyBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + Allocation allocation) const { + vmaDestroyBuffer(m_allocator, static_cast(buffer), static_cast(allocation)); + } +#else + VULKAN_HPP_INLINE void Allocator::destroyBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + Allocation allocation) const { + vmaDestroyBuffer(m_allocator, static_cast(buffer), static_cast(allocation)); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::createImage(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + std::pair pair; + VULKAN_HPP_NAMESPACE::Image& image = pair.first; + Allocation& allocation = pair.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateImage(m_allocator, reinterpret_cast(&imageCreateInfo), reinterpret_cast(&allocationCreateInfo), reinterpret_cast(&image), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createImage"); + return createResultValueType(result, pair); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType>::type Allocator::createImageUnique(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo) const { + std::pair pair; + VULKAN_HPP_NAMESPACE::Image& image = pair.first; + Allocation& allocation = pair.second; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateImage(m_allocator, reinterpret_cast(&imageCreateInfo), reinterpret_cast(&allocationCreateInfo), reinterpret_cast(&image), reinterpret_cast(&allocation), reinterpret_cast(static_cast(allocationInfo))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createImage"); + return createResultValueType(result, createUniqueHandle(pair, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::createImage(const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Image* image, + Allocation* allocation, + AllocationInfo* allocationInfo) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateImage(m_allocator, reinterpret_cast(imageCreateInfo), reinterpret_cast(allocationCreateInfo), reinterpret_cast(image), reinterpret_cast(allocation), reinterpret_cast(allocationInfo)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type Allocator::createAliasingImage(Allocation allocation, + const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo) const { + VULKAN_HPP_NAMESPACE::Image image; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAliasingImage(m_allocator, static_cast(allocation), reinterpret_cast(&imageCreateInfo), reinterpret_cast(&image)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::Allocator::createAliasingImage"); + return createResultValueType(result, image); + } +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result Allocator::createAliasingImage(Allocation allocation, + const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo, + VULKAN_HPP_NAMESPACE::Image* image) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAliasingImage(m_allocator, static_cast(allocation), reinterpret_cast(imageCreateInfo), reinterpret_cast(image)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::destroyImage(VULKAN_HPP_NAMESPACE::Image image, + Allocation allocation) const { + vmaDestroyImage(m_allocator, static_cast(image), static_cast(allocation)); + } +#else + VULKAN_HPP_INLINE void Allocator::destroyImage(VULKAN_HPP_NAMESPACE::Image image, + Allocation allocation) const { + vmaDestroyImage(m_allocator, static_cast(image), static_cast(allocation)); + } +#endif + +#if VMA_STATS_STRING_ENABLED +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE char* Allocator::buildStatsString(VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const { + char* statsString; + vmaBuildStatsString(m_allocator, &statsString, static_cast(detailedMap)); + return statsString; + } +#endif + VULKAN_HPP_INLINE void Allocator::buildStatsString(char** statsString, + VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const { + vmaBuildStatsString(m_allocator, statsString, static_cast(detailedMap)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void Allocator::freeStatsString(char* statsString) const { + vmaFreeStatsString(m_allocator, statsString); + } +#else + VULKAN_HPP_INLINE void Allocator::freeStatsString(char* statsString) const { + vmaFreeStatsString(m_allocator, statsString); + } +#endif + +#endif +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void VirtualBlock::destroy() const { + vmaDestroyVirtualBlock(m_virtualBlock); + } +#else + VULKAN_HPP_INLINE void VirtualBlock::destroy() const { + vmaDestroyVirtualBlock(m_virtualBlock); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 VirtualBlock::isVirtualBlockEmpty() const { + VULKAN_HPP_NAMESPACE::Bool32 result = static_cast( vmaIsVirtualBlockEmpty(m_virtualBlock) ); + return result; + } +#else + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Bool32 VirtualBlock::isVirtualBlockEmpty() const { + VULKAN_HPP_NAMESPACE::Bool32 result = static_cast( vmaIsVirtualBlockEmpty(m_virtualBlock) ); + return result; + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE VirtualAllocationInfo VirtualBlock::getVirtualAllocationInfo(VirtualAllocation allocation) const { + VirtualAllocationInfo virtualAllocInfo; + vmaGetVirtualAllocationInfo(m_virtualBlock, static_cast(allocation), reinterpret_cast(&virtualAllocInfo)); + return virtualAllocInfo; + } +#endif + VULKAN_HPP_INLINE void VirtualBlock::getVirtualAllocationInfo(VirtualAllocation allocation, + VirtualAllocationInfo* virtualAllocInfo) const { + vmaGetVirtualAllocationInfo(m_virtualBlock, static_cast(allocation), reinterpret_cast(virtualAllocInfo)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type VirtualBlock::virtualAllocate(const VirtualAllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional offset) const { + VirtualAllocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaVirtualAllocate(m_virtualBlock, reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(offset))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::VirtualBlock::virtualAllocate"); + return createResultValueType(result, allocation); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type VirtualBlock::virtualAllocateUnique(const VirtualAllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional offset) const { + VirtualAllocation allocation; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaVirtualAllocate(m_virtualBlock, reinterpret_cast(&createInfo), reinterpret_cast(&allocation), reinterpret_cast(static_cast(offset))) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::VirtualBlock::virtualAllocate"); + return createResultValueType(result, createUniqueHandle(allocation, this)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result VirtualBlock::virtualAllocate(const VirtualAllocationCreateInfo* createInfo, + VirtualAllocation* allocation, + VULKAN_HPP_NAMESPACE::DeviceSize* offset) const { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaVirtualAllocate(m_virtualBlock, reinterpret_cast(createInfo), reinterpret_cast(allocation), reinterpret_cast(offset)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void VirtualBlock::virtualFree(VirtualAllocation allocation) const { + vmaVirtualFree(m_virtualBlock, static_cast(allocation)); + } +#else + VULKAN_HPP_INLINE void VirtualBlock::virtualFree(VirtualAllocation allocation) const { + vmaVirtualFree(m_virtualBlock, static_cast(allocation)); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void VirtualBlock::clearVirtualBlock() const { + vmaClearVirtualBlock(m_virtualBlock); + } +#else + VULKAN_HPP_INLINE void VirtualBlock::clearVirtualBlock() const { + vmaClearVirtualBlock(m_virtualBlock); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void VirtualBlock::setVirtualAllocationUserData(VirtualAllocation allocation, + void* userData) const { + vmaSetVirtualAllocationUserData(m_virtualBlock, static_cast(allocation), userData); + } +#else + VULKAN_HPP_INLINE void VirtualBlock::setVirtualAllocationUserData(VirtualAllocation allocation, + void* userData) const { + vmaSetVirtualAllocationUserData(m_virtualBlock, static_cast(allocation), userData); + } +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE Statistics VirtualBlock::getVirtualBlockStatistics() const { + Statistics stats; + vmaGetVirtualBlockStatistics(m_virtualBlock, reinterpret_cast(&stats)); + return stats; + } +#endif + VULKAN_HPP_INLINE void VirtualBlock::getVirtualBlockStatistics(Statistics* stats) const { + vmaGetVirtualBlockStatistics(m_virtualBlock, reinterpret_cast(stats)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE DetailedStatistics VirtualBlock::calculateVirtualBlockStatistics() const { + DetailedStatistics stats; + vmaCalculateVirtualBlockStatistics(m_virtualBlock, reinterpret_cast(&stats)); + return stats; + } +#endif + VULKAN_HPP_INLINE void VirtualBlock::calculateVirtualBlockStatistics(DetailedStatistics* stats) const { + vmaCalculateVirtualBlockStatistics(m_virtualBlock, reinterpret_cast(stats)); + } + +#if VMA_STATS_STRING_ENABLED +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE char* VirtualBlock::buildVirtualBlockStatsString(VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const { + char* statsString; + vmaBuildVirtualBlockStatsString(m_virtualBlock, &statsString, static_cast(detailedMap)); + return statsString; + } +#endif + VULKAN_HPP_INLINE void VirtualBlock::buildVirtualBlockStatsString(char** statsString, + VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const { + vmaBuildVirtualBlockStatsString(m_virtualBlock, statsString, static_cast(detailedMap)); + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE void VirtualBlock::freeVirtualBlockStatsString(char* statsString) const { + vmaFreeVirtualBlockStatsString(m_virtualBlock, statsString); + } +#else + VULKAN_HPP_INLINE void VirtualBlock::freeVirtualBlockStatsString(char* statsString) const { + vmaFreeVirtualBlockStatsString(m_virtualBlock, statsString); + } +#endif + +#endif +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createAllocator(const AllocatorCreateInfo& createInfo) { + Allocator allocator; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAllocator(reinterpret_cast(&createInfo), reinterpret_cast(&allocator)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::createAllocator"); + return createResultValueType(result, allocator); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createAllocatorUnique(const AllocatorCreateInfo& createInfo) { + Allocator allocator; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAllocator(reinterpret_cast(&createInfo), reinterpret_cast(&allocator)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::createAllocator"); + return createResultValueType(result, createUniqueHandle(allocator)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result createAllocator(const AllocatorCreateInfo* createInfo, + Allocator* allocator) { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateAllocator(reinterpret_cast(createInfo), reinterpret_cast(allocator)) ); + return result; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createVirtualBlock(const VirtualBlockCreateInfo& createInfo) { + VirtualBlock virtualBlock; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateVirtualBlock(reinterpret_cast(&createInfo), reinterpret_cast(&virtualBlock)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::createVirtualBlock"); + return createResultValueType(result, virtualBlock); + } +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_INLINE typename VULKAN_HPP_NAMESPACE::ResultValueType::type createVirtualBlockUnique(const VirtualBlockCreateInfo& createInfo) { + VirtualBlock virtualBlock; + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateVirtualBlock(reinterpret_cast(&createInfo), reinterpret_cast(&virtualBlock)) ); + resultCheck(result, VMA_HPP_NAMESPACE_STRING "::createVirtualBlock"); + return createResultValueType(result, createUniqueHandle(virtualBlock)); + } +#endif +#endif + VULKAN_HPP_INLINE VULKAN_HPP_NAMESPACE::Result createVirtualBlock(const VirtualBlockCreateInfo* createInfo, + VirtualBlock* virtualBlock) { + VULKAN_HPP_NAMESPACE::Result result = static_cast( vmaCreateVirtualBlock(reinterpret_cast(createInfo), reinterpret_cast(virtualBlock)) ); + return result; + } +} +#endif diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_handles.hpp b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_handles.hpp new file mode 100644 index 00000000..27668984 --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_handles.hpp @@ -0,0 +1,929 @@ +#ifndef VULKAN_MEMORY_ALLOCATOR_HANDLES_HPP +#define VULKAN_MEMORY_ALLOCATOR_HANDLES_HPP + +namespace VMA_HPP_NAMESPACE { + + struct DeviceMemoryCallbacks; + struct VulkanFunctions; + struct AllocatorCreateInfo; + struct AllocatorInfo; + struct Statistics; + struct DetailedStatistics; + struct TotalStatistics; + struct Budget; + struct AllocationCreateInfo; + struct PoolCreateInfo; + struct AllocationInfo; + struct DefragmentationInfo; + struct DefragmentationMove; + struct DefragmentationPassMoveInfo; + struct DefragmentationStats; + struct VirtualBlockCreateInfo; + struct VirtualAllocationCreateInfo; + struct VirtualAllocationInfo; + + class Pool; + class Allocation; + class DefragmentationContext; + class VirtualAllocation; + class Allocator; + class VirtualBlock; +} + + +namespace VMA_HPP_NAMESPACE { + class Pool { + public: + using CType = VmaPool; + using NativeType = VmaPool; + public: + VULKAN_HPP_CONSTEXPR Pool() = default; + VULKAN_HPP_CONSTEXPR Pool(std::nullptr_t) VULKAN_HPP_NOEXCEPT {} + VULKAN_HPP_TYPESAFE_EXPLICIT Pool(VmaPool pool) VULKAN_HPP_NOEXCEPT : m_pool(pool) {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Pool& operator=(VmaPool pool) VULKAN_HPP_NOEXCEPT { + m_pool = pool; + return *this; + } +#endif + + Pool& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT { + m_pool = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>(Pool const &) const = default; +#else + bool operator==(Pool const & rhs) const VULKAN_HPP_NOEXCEPT { + return m_pool == rhs.m_pool; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaPool() const VULKAN_HPP_NOEXCEPT { + return m_pool; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT { + return m_pool != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT { + return m_pool == VK_NULL_HANDLE; + } + + private: + VmaPool m_pool = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(Pool) == sizeof(VmaPool), + "handle and wrapper have different size!"); +} +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { using UniquePool = VULKAN_HPP_NAMESPACE::UniqueHandle; } +#endif + +namespace VMA_HPP_NAMESPACE { + class Allocation { + public: + using CType = VmaAllocation; + using NativeType = VmaAllocation; + public: + VULKAN_HPP_CONSTEXPR Allocation() = default; + VULKAN_HPP_CONSTEXPR Allocation(std::nullptr_t) VULKAN_HPP_NOEXCEPT {} + VULKAN_HPP_TYPESAFE_EXPLICIT Allocation(VmaAllocation allocation) VULKAN_HPP_NOEXCEPT : m_allocation(allocation) {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Allocation& operator=(VmaAllocation allocation) VULKAN_HPP_NOEXCEPT { + m_allocation = allocation; + return *this; + } +#endif + + Allocation& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT { + m_allocation = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>(Allocation const &) const = default; +#else + bool operator==(Allocation const & rhs) const VULKAN_HPP_NOEXCEPT { + return m_allocation == rhs.m_allocation; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaAllocation() const VULKAN_HPP_NOEXCEPT { + return m_allocation; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT { + return m_allocation != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT { + return m_allocation == VK_NULL_HANDLE; + } + + private: + VmaAllocation m_allocation = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(Allocation) == sizeof(VmaAllocation), + "handle and wrapper have different size!"); +} +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { using UniqueAllocation = VULKAN_HPP_NAMESPACE::UniqueHandle; } +#endif + +namespace VMA_HPP_NAMESPACE { + class DefragmentationContext { + public: + using CType = VmaDefragmentationContext; + using NativeType = VmaDefragmentationContext; + public: + VULKAN_HPP_CONSTEXPR DefragmentationContext() = default; + VULKAN_HPP_CONSTEXPR DefragmentationContext(std::nullptr_t) VULKAN_HPP_NOEXCEPT {} + VULKAN_HPP_TYPESAFE_EXPLICIT DefragmentationContext(VmaDefragmentationContext defragmentationContext) VULKAN_HPP_NOEXCEPT : m_defragmentationContext(defragmentationContext) {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + DefragmentationContext& operator=(VmaDefragmentationContext defragmentationContext) VULKAN_HPP_NOEXCEPT { + m_defragmentationContext = defragmentationContext; + return *this; + } +#endif + + DefragmentationContext& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT { + m_defragmentationContext = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>(DefragmentationContext const &) const = default; +#else + bool operator==(DefragmentationContext const & rhs) const VULKAN_HPP_NOEXCEPT { + return m_defragmentationContext == rhs.m_defragmentationContext; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaDefragmentationContext() const VULKAN_HPP_NOEXCEPT { + return m_defragmentationContext; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT { + return m_defragmentationContext != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT { + return m_defragmentationContext == VK_NULL_HANDLE; + } + + private: + VmaDefragmentationContext m_defragmentationContext = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationContext) == sizeof(VmaDefragmentationContext), + "handle and wrapper have different size!"); +} +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { using UniqueDefragmentationContext = VULKAN_HPP_NAMESPACE::UniqueHandle; } +#endif + +namespace VMA_HPP_NAMESPACE { + class Allocator { + public: + using CType = VmaAllocator; + using NativeType = VmaAllocator; + public: + VULKAN_HPP_CONSTEXPR Allocator() = default; + VULKAN_HPP_CONSTEXPR Allocator(std::nullptr_t) VULKAN_HPP_NOEXCEPT {} + VULKAN_HPP_TYPESAFE_EXPLICIT Allocator(VmaAllocator allocator) VULKAN_HPP_NOEXCEPT : m_allocator(allocator) {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + Allocator& operator=(VmaAllocator allocator) VULKAN_HPP_NOEXCEPT { + m_allocator = allocator; + return *this; + } +#endif + + Allocator& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT { + m_allocator = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>(Allocator const &) const = default; +#else + bool operator==(Allocator const & rhs) const VULKAN_HPP_NOEXCEPT { + return m_allocator == rhs.m_allocator; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaAllocator() const VULKAN_HPP_NOEXCEPT { + return m_allocator; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT { + return m_allocator != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT { + return m_allocator == VK_NULL_HANDLE; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void destroy() const; +#else + void destroy() const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS AllocatorInfo getAllocatorInfo() const; +#endif + void getAllocatorInfo(AllocatorInfo* allocatorInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties* getPhysicalDeviceProperties() const; +#endif + void getPhysicalDeviceProperties(const VULKAN_HPP_NAMESPACE::PhysicalDeviceProperties** physicalDeviceProperties) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties* getMemoryProperties() const; +#endif + void getMemoryProperties(const VULKAN_HPP_NAMESPACE::PhysicalDeviceMemoryProperties** physicalDeviceMemoryProperties) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_NAMESPACE::MemoryPropertyFlags getMemoryTypeProperties(uint32_t memoryTypeIndex) const; +#endif + void getMemoryTypeProperties(uint32_t memoryTypeIndex, + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags* flags) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void setCurrentFrameIndex(uint32_t frameIndex) const; +#else + void setCurrentFrameIndex(uint32_t frameIndex) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS TotalStatistics calculateStatistics() const; +#endif + void calculateStatistics(TotalStatistics* stats) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template, + typename B = VectorAllocator, + typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS std::vector getHeapBudgets(VectorAllocator& vectorAllocator) const; + + template> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS std::vector getHeapBudgets() const; +#endif + void getHeapBudgets(Budget* budgets) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type findMemoryTypeIndex(uint32_t memoryTypeBits, + const AllocationCreateInfo& allocationCreateInfo) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result findMemoryTypeIndex(uint32_t memoryTypeBits, + const AllocationCreateInfo* allocationCreateInfo, + uint32_t* memoryTypeIndex) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type findMemoryTypeIndexForBufferInfo(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result findMemoryTypeIndexForBufferInfo(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + uint32_t* memoryTypeIndex) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type findMemoryTypeIndexForImageInfo(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo, + const AllocationCreateInfo& allocationCreateInfo) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result findMemoryTypeIndexForImageInfo(const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + uint32_t* memoryTypeIndex) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createPool(const PoolCreateInfo& createInfo) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createPoolUnique(const PoolCreateInfo& createInfo) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createPool(const PoolCreateInfo* createInfo, + Pool* pool) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void destroyPool(Pool pool) const; +#else + void destroyPool(Pool pool) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS Statistics getPoolStatistics(Pool pool) const; +#endif + void getPoolStatistics(Pool pool, + Statistics* poolStats) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS DetailedStatistics calculatePoolStatistics(Pool pool) const; +#endif + void calculatePoolStatistics(Pool pool, + DetailedStatistics* poolStats) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type checkPoolCorruption(Pool pool) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result checkPoolCorruption(Pool pool) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS const char* getPoolName(Pool pool) const; +#endif + void getPoolName(Pool pool, + const char** name) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void setPoolName(Pool pool, + const char* name) const; +#else + void setPoolName(Pool pool, + const char* name) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type allocateMemory(const VULKAN_HPP_NAMESPACE::MemoryRequirements& vkMemoryRequirements, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type allocateMemoryUnique(const VULKAN_HPP_NAMESPACE::MemoryRequirements& vkMemoryRequirements, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemory(const VULKAN_HPP_NAMESPACE::MemoryRequirements* vkMemoryRequirements, + const AllocationCreateInfo* createInfo, + Allocation* allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + template, + typename B = VectorAllocator, + typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type allocateMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo, + VectorAllocator& vectorAllocator) const; + + template> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type allocateMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + template, + typename B = VectorAllocator, + typename std::enable_if::value, int>::type = 0> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type allocateMemoryPagesUnique(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo, + VectorAllocator& vectorAllocator) const; + + template> + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type allocateMemoryPagesUnique(VULKAN_HPP_NAMESPACE::ArrayProxy vkMemoryRequirements, + VULKAN_HPP_NAMESPACE::ArrayProxy createInfo, + VULKAN_HPP_NAMESPACE::ArrayProxyNoTemporaries allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemoryPages(const VULKAN_HPP_NAMESPACE::MemoryRequirements* vkMemoryRequirements, + const AllocationCreateInfo* createInfo, + size_t allocationCount, + Allocation* allocations, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type allocateMemoryForBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type allocateMemoryForBufferUnique(VULKAN_HPP_NAMESPACE::Buffer buffer, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemoryForBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + const AllocationCreateInfo* createInfo, + Allocation* allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type allocateMemoryForImage(VULKAN_HPP_NAMESPACE::Image image, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type allocateMemoryForImageUnique(VULKAN_HPP_NAMESPACE::Image image, + const AllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result allocateMemoryForImage(VULKAN_HPP_NAMESPACE::Image image, + const AllocationCreateInfo* createInfo, + Allocation* allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void freeMemory(const Allocation allocation) const; +#else + void freeMemory(const Allocation allocation) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void freeMemoryPages(VULKAN_HPP_NAMESPACE::ArrayProxy allocations) const; +#endif + void freeMemoryPages(size_t allocationCount, + const Allocation* allocations) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS AllocationInfo getAllocationInfo(Allocation allocation) const; +#endif + void getAllocationInfo(Allocation allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void setAllocationUserData(Allocation allocation, + void* userData) const; +#else + void setAllocationUserData(Allocation allocation, + void* userData) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void setAllocationName(Allocation allocation, + const char* name) const; +#else + void setAllocationName(Allocation allocation, + const char* name) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_NAMESPACE::MemoryPropertyFlags getAllocationMemoryProperties(Allocation allocation) const; +#endif + void getAllocationMemoryProperties(Allocation allocation, + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags* flags) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type mapMemory(Allocation allocation) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result mapMemory(Allocation allocation, + void** data) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void unmapMemory(Allocation allocation) const; +#else + void unmapMemory(Allocation allocation) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type flushAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result flushAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type invalidateAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result invalidateAllocation(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize offset, + VULKAN_HPP_NAMESPACE::DeviceSize size) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type flushAllocations(VULKAN_HPP_NAMESPACE::ArrayProxy allocations, + VULKAN_HPP_NAMESPACE::ArrayProxy offsets, + VULKAN_HPP_NAMESPACE::ArrayProxy sizes) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result flushAllocations(uint32_t allocationCount, + const Allocation* allocations, + const VULKAN_HPP_NAMESPACE::DeviceSize* offsets, + const VULKAN_HPP_NAMESPACE::DeviceSize* sizes) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type invalidateAllocations(VULKAN_HPP_NAMESPACE::ArrayProxy allocations, + VULKAN_HPP_NAMESPACE::ArrayProxy offsets, + VULKAN_HPP_NAMESPACE::ArrayProxy sizes) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result invalidateAllocations(uint32_t allocationCount, + const Allocation* allocations, + const VULKAN_HPP_NAMESPACE::DeviceSize* offsets, + const VULKAN_HPP_NAMESPACE::DeviceSize* sizes) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type checkCorruption(uint32_t memoryTypeBits) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result checkCorruption(uint32_t memoryTypeBits) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type beginDefragmentation(const DefragmentationInfo& info) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result beginDefragmentation(const DefragmentationInfo* info, + DefragmentationContext* context) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void endDefragmentation(DefragmentationContext context, + VULKAN_HPP_NAMESPACE::Optional stats = nullptr) const; +#endif + void endDefragmentation(DefragmentationContext context, + DefragmentationStats* stats) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type beginDefragmentationPass(DefragmentationContext context) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result beginDefragmentationPass(DefragmentationContext context, + DefragmentationPassMoveInfo* passInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type endDefragmentationPass(DefragmentationContext context) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result endDefragmentationPass(DefragmentationContext context, + DefragmentationPassMoveInfo* passInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type bindBufferMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Buffer buffer) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindBufferMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Buffer buffer) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type bindBufferMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Buffer buffer, + const void* next) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindBufferMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Buffer buffer, + const void* next) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type bindImageMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Image image) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindImageMemory(Allocation allocation, + VULKAN_HPP_NAMESPACE::Image image) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + typename VULKAN_HPP_NAMESPACE::ResultValueType::type bindImageMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Image image, + const void* next) const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result bindImageMemory2(Allocation allocation, + VULKAN_HPP_NAMESPACE::DeviceSize allocationLocalOffset, + VULKAN_HPP_NAMESPACE::Image image, + const void* next) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type createBuffer(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type createBufferUnique(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createBuffer(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Buffer* buffer, + Allocation* allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type createBufferWithAlignment(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::DeviceSize minAlignment, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type createBufferWithAlignmentUnique(const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::DeviceSize minAlignment, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createBufferWithAlignment(const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + VULKAN_HPP_NAMESPACE::DeviceSize minAlignment, + VULKAN_HPP_NAMESPACE::Buffer* buffer, + Allocation* allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createAliasingBuffer(Allocation allocation, + const VULKAN_HPP_NAMESPACE::BufferCreateInfo& bufferCreateInfo) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createAliasingBuffer(Allocation allocation, + const VULKAN_HPP_NAMESPACE::BufferCreateInfo* bufferCreateInfo, + VULKAN_HPP_NAMESPACE::Buffer* buffer) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void destroyBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + Allocation allocation) const; +#else + void destroyBuffer(VULKAN_HPP_NAMESPACE::Buffer buffer, + Allocation allocation) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type createImage(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType>::type createImageUnique(const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo, + const AllocationCreateInfo& allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Optional allocationInfo = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createImage(const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo, + const AllocationCreateInfo* allocationCreateInfo, + VULKAN_HPP_NAMESPACE::Image* image, + Allocation* allocation, + AllocationInfo* allocationInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createAliasingImage(Allocation allocation, + const VULKAN_HPP_NAMESPACE::ImageCreateInfo& imageCreateInfo) const; +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createAliasingImage(Allocation allocation, + const VULKAN_HPP_NAMESPACE::ImageCreateInfo* imageCreateInfo, + VULKAN_HPP_NAMESPACE::Image* image) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void destroyImage(VULKAN_HPP_NAMESPACE::Image image, + Allocation allocation) const; +#else + void destroyImage(VULKAN_HPP_NAMESPACE::Image image, + Allocation allocation) const; +#endif + +#if VMA_STATS_STRING_ENABLED +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS char* buildStatsString(VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const; +#endif + void buildStatsString(char** statsString, + VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void freeStatsString(char* statsString) const; +#else + void freeStatsString(char* statsString) const; +#endif + +#endif + private: + VmaAllocator m_allocator = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(Allocator) == sizeof(VmaAllocator), + "handle and wrapper have different size!"); +} +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { using UniqueAllocator = VULKAN_HPP_NAMESPACE::UniqueHandle; } +#endif + +namespace VMA_HPP_NAMESPACE { + class VirtualAllocation { + public: + using CType = VmaVirtualAllocation; + using NativeType = VmaVirtualAllocation; + public: + VULKAN_HPP_CONSTEXPR VirtualAllocation() = default; + VULKAN_HPP_CONSTEXPR VirtualAllocation(std::nullptr_t) VULKAN_HPP_NOEXCEPT {} + VULKAN_HPP_TYPESAFE_EXPLICIT VirtualAllocation(VmaVirtualAllocation virtualAllocation) VULKAN_HPP_NOEXCEPT : m_virtualAllocation(virtualAllocation) {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + VirtualAllocation& operator=(VmaVirtualAllocation virtualAllocation) VULKAN_HPP_NOEXCEPT { + m_virtualAllocation = virtualAllocation; + return *this; + } +#endif + + VirtualAllocation& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT { + m_virtualAllocation = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>(VirtualAllocation const &) const = default; +#else + bool operator==(VirtualAllocation const & rhs) const VULKAN_HPP_NOEXCEPT { + return m_virtualAllocation == rhs.m_virtualAllocation; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaVirtualAllocation() const VULKAN_HPP_NOEXCEPT { + return m_virtualAllocation; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT { + return m_virtualAllocation != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT { + return m_virtualAllocation == VK_NULL_HANDLE; + } + + private: + VmaVirtualAllocation m_virtualAllocation = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualAllocation) == sizeof(VmaVirtualAllocation), + "handle and wrapper have different size!"); +} +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { using UniqueVirtualAllocation = VULKAN_HPP_NAMESPACE::UniqueHandle; } +#endif + +namespace VMA_HPP_NAMESPACE { + class VirtualBlock { + public: + using CType = VmaVirtualBlock; + using NativeType = VmaVirtualBlock; + public: + VULKAN_HPP_CONSTEXPR VirtualBlock() = default; + VULKAN_HPP_CONSTEXPR VirtualBlock(std::nullptr_t) VULKAN_HPP_NOEXCEPT {} + VULKAN_HPP_TYPESAFE_EXPLICIT VirtualBlock(VmaVirtualBlock virtualBlock) VULKAN_HPP_NOEXCEPT : m_virtualBlock(virtualBlock) {} + +#if defined(VULKAN_HPP_TYPESAFE_CONVERSION) + VirtualBlock& operator=(VmaVirtualBlock virtualBlock) VULKAN_HPP_NOEXCEPT { + m_virtualBlock = virtualBlock; + return *this; + } +#endif + + VirtualBlock& operator=(std::nullptr_t) VULKAN_HPP_NOEXCEPT { + m_virtualBlock = {}; + return *this; + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + auto operator<=>(VirtualBlock const &) const = default; +#else + bool operator==(VirtualBlock const & rhs) const VULKAN_HPP_NOEXCEPT { + return m_virtualBlock == rhs.m_virtualBlock; + } +#endif + + VULKAN_HPP_TYPESAFE_EXPLICIT operator VmaVirtualBlock() const VULKAN_HPP_NOEXCEPT { + return m_virtualBlock; + } + + explicit operator bool() const VULKAN_HPP_NOEXCEPT { + return m_virtualBlock != VK_NULL_HANDLE; + } + + bool operator!() const VULKAN_HPP_NOEXCEPT { + return m_virtualBlock == VK_NULL_HANDLE; + } + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void destroy() const; +#else + void destroy() const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VULKAN_HPP_NAMESPACE::Bool32 isVirtualBlockEmpty() const; +#else + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Bool32 isVirtualBlockEmpty() const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS VirtualAllocationInfo getVirtualAllocationInfo(VirtualAllocation allocation) const; +#endif + void getVirtualAllocationInfo(VirtualAllocation allocation, + VirtualAllocationInfo* virtualAllocInfo) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type virtualAllocate(const VirtualAllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional offset = nullptr) const; +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type virtualAllocateUnique(const VirtualAllocationCreateInfo& createInfo, + VULKAN_HPP_NAMESPACE::Optional offset = nullptr) const; +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result virtualAllocate(const VirtualAllocationCreateInfo* createInfo, + VirtualAllocation* allocation, + VULKAN_HPP_NAMESPACE::DeviceSize* offset) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void virtualFree(VirtualAllocation allocation) const; +#else + void virtualFree(VirtualAllocation allocation) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void clearVirtualBlock() const; +#else + void clearVirtualBlock() const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void setVirtualAllocationUserData(VirtualAllocation allocation, + void* userData) const; +#else + void setVirtualAllocationUserData(VirtualAllocation allocation, + void* userData) const; +#endif + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS Statistics getVirtualBlockStatistics() const; +#endif + void getVirtualBlockStatistics(Statistics* stats) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS DetailedStatistics calculateVirtualBlockStatistics() const; +#endif + void calculateVirtualBlockStatistics(DetailedStatistics* stats) const; + +#if VMA_STATS_STRING_ENABLED +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS char* buildVirtualBlockStatsString(VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const; +#endif + void buildVirtualBlockStatsString(char** statsString, + VULKAN_HPP_NAMESPACE::Bool32 detailedMap) const; + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + void freeVirtualBlockStatsString(char* statsString) const; +#else + void freeVirtualBlockStatsString(char* statsString) const; +#endif + +#endif + private: + VmaVirtualBlock m_virtualBlock = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualBlock) == sizeof(VmaVirtualBlock), + "handle and wrapper have different size!"); +} +#ifndef VULKAN_HPP_NO_SMART_HANDLE +namespace VULKAN_HPP_NAMESPACE { + template<> struct UniqueHandleTraits { + using deleter = VMA_HPP_NAMESPACE::Deleter; + }; +} +namespace VMA_HPP_NAMESPACE { using UniqueVirtualBlock = VULKAN_HPP_NAMESPACE::UniqueHandle; } +#endif + +namespace VMA_HPP_NAMESPACE { + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createAllocator(const AllocatorCreateInfo& createInfo); +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createAllocatorUnique(const AllocatorCreateInfo& createInfo); +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createAllocator(const AllocatorCreateInfo* createInfo, + Allocator* allocator); + +#ifndef VULKAN_HPP_DISABLE_ENHANCED_MODE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createVirtualBlock(const VirtualBlockCreateInfo& createInfo); +#ifndef VULKAN_HPP_NO_SMART_HANDLE + VULKAN_HPP_NODISCARD_WHEN_NO_EXCEPTIONS typename VULKAN_HPP_NAMESPACE::ResultValueType::type createVirtualBlockUnique(const VirtualBlockCreateInfo& createInfo); +#endif +#endif + VULKAN_HPP_NODISCARD VULKAN_HPP_NAMESPACE::Result createVirtualBlock(const VirtualBlockCreateInfo* createInfo, + VirtualBlock* virtualBlock); +} +#endif diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_raii.hpp b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_raii.hpp new file mode 100644 index 00000000..e69de29b diff --git a/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_structs.hpp b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_structs.hpp new file mode 100644 index 00000000..0cc4397f --- /dev/null +++ b/external/VulkanMemoryAllocator-Hpp/include/vk_mem_alloc_structs.hpp @@ -0,0 +1,1829 @@ +#ifndef VULKAN_MEMORY_ALLOCATOR_STRUCTS_HPP +#define VULKAN_MEMORY_ALLOCATOR_STRUCTS_HPP + +namespace VMA_HPP_NAMESPACE { + + struct DeviceMemoryCallbacks { + using NativeType = VmaDeviceMemoryCallbacks; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DeviceMemoryCallbacks( + PFN_vmaAllocateDeviceMemoryFunction pfnAllocate_ = {} + , PFN_vmaFreeDeviceMemoryFunction pfnFree_ = {} + , void* pUserData_ = {} + ) VULKAN_HPP_NOEXCEPT + : pfnAllocate(pfnAllocate_) + , pfnFree(pfnFree_) + , pUserData(pUserData_) + {} + + VULKAN_HPP_CONSTEXPR DeviceMemoryCallbacks(DeviceMemoryCallbacks const &) VULKAN_HPP_NOEXCEPT = default; + DeviceMemoryCallbacks(VmaDeviceMemoryCallbacks const & rhs) VULKAN_HPP_NOEXCEPT : DeviceMemoryCallbacks(*reinterpret_cast(&rhs)) {} +#endif + + DeviceMemoryCallbacks& operator=(DeviceMemoryCallbacks const &) VULKAN_HPP_NOEXCEPT = default; + DeviceMemoryCallbacks& operator=(VmaDeviceMemoryCallbacks const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaDeviceMemoryCallbacks const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaDeviceMemoryCallbacks&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(DeviceMemoryCallbacks const &) const = default; +#else + bool operator==(DeviceMemoryCallbacks const & rhs) const VULKAN_HPP_NOEXCEPT { + return pfnAllocate == rhs.pfnAllocate + && pfnFree == rhs.pfnFree + && pUserData == rhs.pUserData + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 DeviceMemoryCallbacks& setPfnAllocate(PFN_vmaAllocateDeviceMemoryFunction pfnAllocate_) VULKAN_HPP_NOEXCEPT { + pfnAllocate = pfnAllocate_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DeviceMemoryCallbacks& setPfnFree(PFN_vmaFreeDeviceMemoryFunction pfnFree_) VULKAN_HPP_NOEXCEPT { + pfnFree = pfnFree_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DeviceMemoryCallbacks& setPUserData(void* pUserData_) VULKAN_HPP_NOEXCEPT { + pUserData = pUserData_; + return *this; + } +#endif + + public: + PFN_vmaAllocateDeviceMemoryFunction pfnAllocate = {}; + PFN_vmaFreeDeviceMemoryFunction pfnFree = {}; + void* pUserData = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DeviceMemoryCallbacks) == sizeof(VmaDeviceMemoryCallbacks), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "DeviceMemoryCallbacks is not nothrow_move_constructible!"); + + struct VulkanFunctions { + using NativeType = VmaVulkanFunctions; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VulkanFunctions( + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr_ = {} + , PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr_ = {} + , PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties_ = {} + , PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties_ = {} + , PFN_vkAllocateMemory vkAllocateMemory_ = {} + , PFN_vkFreeMemory vkFreeMemory_ = {} + , PFN_vkMapMemory vkMapMemory_ = {} + , PFN_vkUnmapMemory vkUnmapMemory_ = {} + , PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges_ = {} + , PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges_ = {} + , PFN_vkBindBufferMemory vkBindBufferMemory_ = {} + , PFN_vkBindImageMemory vkBindImageMemory_ = {} + , PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements_ = {} + , PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements_ = {} + , PFN_vkCreateBuffer vkCreateBuffer_ = {} + , PFN_vkDestroyBuffer vkDestroyBuffer_ = {} + , PFN_vkCreateImage vkCreateImage_ = {} + , PFN_vkDestroyImage vkDestroyImage_ = {} + , PFN_vkCmdCopyBuffer vkCmdCopyBuffer_ = {} +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + , PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR_ = {} + , PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR_ = {} +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + , PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR_ = {} + , PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR_ = {} +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + , PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR_ = {} +#endif +#if VMA_VULKAN_VERSION >= 1003000 + , PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements_ = {} + , PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements_ = {} +#endif + ) VULKAN_HPP_NOEXCEPT + : vkGetInstanceProcAddr(vkGetInstanceProcAddr_) + , vkGetDeviceProcAddr(vkGetDeviceProcAddr_) + , vkGetPhysicalDeviceProperties(vkGetPhysicalDeviceProperties_) + , vkGetPhysicalDeviceMemoryProperties(vkGetPhysicalDeviceMemoryProperties_) + , vkAllocateMemory(vkAllocateMemory_) + , vkFreeMemory(vkFreeMemory_) + , vkMapMemory(vkMapMemory_) + , vkUnmapMemory(vkUnmapMemory_) + , vkFlushMappedMemoryRanges(vkFlushMappedMemoryRanges_) + , vkInvalidateMappedMemoryRanges(vkInvalidateMappedMemoryRanges_) + , vkBindBufferMemory(vkBindBufferMemory_) + , vkBindImageMemory(vkBindImageMemory_) + , vkGetBufferMemoryRequirements(vkGetBufferMemoryRequirements_) + , vkGetImageMemoryRequirements(vkGetImageMemoryRequirements_) + , vkCreateBuffer(vkCreateBuffer_) + , vkDestroyBuffer(vkDestroyBuffer_) + , vkCreateImage(vkCreateImage_) + , vkDestroyImage(vkDestroyImage_) + , vkCmdCopyBuffer(vkCmdCopyBuffer_) +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + , vkGetBufferMemoryRequirements2KHR(vkGetBufferMemoryRequirements2KHR_) + , vkGetImageMemoryRequirements2KHR(vkGetImageMemoryRequirements2KHR_) +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + , vkBindBufferMemory2KHR(vkBindBufferMemory2KHR_) + , vkBindImageMemory2KHR(vkBindImageMemory2KHR_) +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + , vkGetPhysicalDeviceMemoryProperties2KHR(vkGetPhysicalDeviceMemoryProperties2KHR_) +#endif +#if VMA_VULKAN_VERSION >= 1003000 + , vkGetDeviceBufferMemoryRequirements(vkGetDeviceBufferMemoryRequirements_) + , vkGetDeviceImageMemoryRequirements(vkGetDeviceImageMemoryRequirements_) +#endif + {} + + VULKAN_HPP_CONSTEXPR VulkanFunctions(VulkanFunctions const &) VULKAN_HPP_NOEXCEPT = default; + VulkanFunctions(VmaVulkanFunctions const & rhs) VULKAN_HPP_NOEXCEPT : VulkanFunctions(*reinterpret_cast(&rhs)) {} +#endif + + VulkanFunctions& operator=(VulkanFunctions const &) VULKAN_HPP_NOEXCEPT = default; + VulkanFunctions& operator=(VmaVulkanFunctions const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaVulkanFunctions const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaVulkanFunctions&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(VulkanFunctions const &) const = default; +#else + bool operator==(VulkanFunctions const & rhs) const VULKAN_HPP_NOEXCEPT { + return vkGetInstanceProcAddr == rhs.vkGetInstanceProcAddr + && vkGetDeviceProcAddr == rhs.vkGetDeviceProcAddr + && vkGetPhysicalDeviceProperties == rhs.vkGetPhysicalDeviceProperties + && vkGetPhysicalDeviceMemoryProperties == rhs.vkGetPhysicalDeviceMemoryProperties + && vkAllocateMemory == rhs.vkAllocateMemory + && vkFreeMemory == rhs.vkFreeMemory + && vkMapMemory == rhs.vkMapMemory + && vkUnmapMemory == rhs.vkUnmapMemory + && vkFlushMappedMemoryRanges == rhs.vkFlushMappedMemoryRanges + && vkInvalidateMappedMemoryRanges == rhs.vkInvalidateMappedMemoryRanges + && vkBindBufferMemory == rhs.vkBindBufferMemory + && vkBindImageMemory == rhs.vkBindImageMemory + && vkGetBufferMemoryRequirements == rhs.vkGetBufferMemoryRequirements + && vkGetImageMemoryRequirements == rhs.vkGetImageMemoryRequirements + && vkCreateBuffer == rhs.vkCreateBuffer + && vkDestroyBuffer == rhs.vkDestroyBuffer + && vkCreateImage == rhs.vkCreateImage + && vkDestroyImage == rhs.vkDestroyImage + && vkCmdCopyBuffer == rhs.vkCmdCopyBuffer +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + && vkGetBufferMemoryRequirements2KHR == rhs.vkGetBufferMemoryRequirements2KHR + && vkGetImageMemoryRequirements2KHR == rhs.vkGetImageMemoryRequirements2KHR +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + && vkBindBufferMemory2KHR == rhs.vkBindBufferMemory2KHR + && vkBindImageMemory2KHR == rhs.vkBindImageMemory2KHR +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + && vkGetPhysicalDeviceMemoryProperties2KHR == rhs.vkGetPhysicalDeviceMemoryProperties2KHR +#endif +#if VMA_VULKAN_VERSION >= 1003000 + && vkGetDeviceBufferMemoryRequirements == rhs.vkGetDeviceBufferMemoryRequirements + && vkGetDeviceImageMemoryRequirements == rhs.vkGetDeviceImageMemoryRequirements +#endif + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetInstanceProcAddr(PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr_) VULKAN_HPP_NOEXCEPT { + vkGetInstanceProcAddr = vkGetInstanceProcAddr_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetDeviceProcAddr(PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr_) VULKAN_HPP_NOEXCEPT { + vkGetDeviceProcAddr = vkGetDeviceProcAddr_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetPhysicalDeviceProperties(PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties_) VULKAN_HPP_NOEXCEPT { + vkGetPhysicalDeviceProperties = vkGetPhysicalDeviceProperties_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetPhysicalDeviceMemoryProperties(PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties_) VULKAN_HPP_NOEXCEPT { + vkGetPhysicalDeviceMemoryProperties = vkGetPhysicalDeviceMemoryProperties_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkAllocateMemory(PFN_vkAllocateMemory vkAllocateMemory_) VULKAN_HPP_NOEXCEPT { + vkAllocateMemory = vkAllocateMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkFreeMemory(PFN_vkFreeMemory vkFreeMemory_) VULKAN_HPP_NOEXCEPT { + vkFreeMemory = vkFreeMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkMapMemory(PFN_vkMapMemory vkMapMemory_) VULKAN_HPP_NOEXCEPT { + vkMapMemory = vkMapMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkUnmapMemory(PFN_vkUnmapMemory vkUnmapMemory_) VULKAN_HPP_NOEXCEPT { + vkUnmapMemory = vkUnmapMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkFlushMappedMemoryRanges(PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges_) VULKAN_HPP_NOEXCEPT { + vkFlushMappedMemoryRanges = vkFlushMappedMemoryRanges_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkInvalidateMappedMemoryRanges(PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges_) VULKAN_HPP_NOEXCEPT { + vkInvalidateMappedMemoryRanges = vkInvalidateMappedMemoryRanges_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkBindBufferMemory(PFN_vkBindBufferMemory vkBindBufferMemory_) VULKAN_HPP_NOEXCEPT { + vkBindBufferMemory = vkBindBufferMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkBindImageMemory(PFN_vkBindImageMemory vkBindImageMemory_) VULKAN_HPP_NOEXCEPT { + vkBindImageMemory = vkBindImageMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetBufferMemoryRequirements(PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements_) VULKAN_HPP_NOEXCEPT { + vkGetBufferMemoryRequirements = vkGetBufferMemoryRequirements_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetImageMemoryRequirements(PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements_) VULKAN_HPP_NOEXCEPT { + vkGetImageMemoryRequirements = vkGetImageMemoryRequirements_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkCreateBuffer(PFN_vkCreateBuffer vkCreateBuffer_) VULKAN_HPP_NOEXCEPT { + vkCreateBuffer = vkCreateBuffer_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkDestroyBuffer(PFN_vkDestroyBuffer vkDestroyBuffer_) VULKAN_HPP_NOEXCEPT { + vkDestroyBuffer = vkDestroyBuffer_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkCreateImage(PFN_vkCreateImage vkCreateImage_) VULKAN_HPP_NOEXCEPT { + vkCreateImage = vkCreateImage_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkDestroyImage(PFN_vkDestroyImage vkDestroyImage_) VULKAN_HPP_NOEXCEPT { + vkDestroyImage = vkDestroyImage_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkCmdCopyBuffer(PFN_vkCmdCopyBuffer vkCmdCopyBuffer_) VULKAN_HPP_NOEXCEPT { + vkCmdCopyBuffer = vkCmdCopyBuffer_; + return *this; + } +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetBufferMemoryRequirements2KHR(PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR_) VULKAN_HPP_NOEXCEPT { + vkGetBufferMemoryRequirements2KHR = vkGetBufferMemoryRequirements2KHR_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetImageMemoryRequirements2KHR(PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR_) VULKAN_HPP_NOEXCEPT { + vkGetImageMemoryRequirements2KHR = vkGetImageMemoryRequirements2KHR_; + return *this; + } +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkBindBufferMemory2KHR(PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR_) VULKAN_HPP_NOEXCEPT { + vkBindBufferMemory2KHR = vkBindBufferMemory2KHR_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkBindImageMemory2KHR(PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR_) VULKAN_HPP_NOEXCEPT { + vkBindImageMemory2KHR = vkBindImageMemory2KHR_; + return *this; + } +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetPhysicalDeviceMemoryProperties2KHR(PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR_) VULKAN_HPP_NOEXCEPT { + vkGetPhysicalDeviceMemoryProperties2KHR = vkGetPhysicalDeviceMemoryProperties2KHR_; + return *this; + } +#endif +#if VMA_VULKAN_VERSION >= 1003000 + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetDeviceBufferMemoryRequirements(PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements_) VULKAN_HPP_NOEXCEPT { + vkGetDeviceBufferMemoryRequirements = vkGetDeviceBufferMemoryRequirements_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VulkanFunctions& setVkGetDeviceImageMemoryRequirements(PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements_) VULKAN_HPP_NOEXCEPT { + vkGetDeviceImageMemoryRequirements = vkGetDeviceImageMemoryRequirements_; + return *this; + } +#endif +#endif + + public: + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = {}; + PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr = {}; + PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties = {}; + PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties = {}; + PFN_vkAllocateMemory vkAllocateMemory = {}; + PFN_vkFreeMemory vkFreeMemory = {}; + PFN_vkMapMemory vkMapMemory = {}; + PFN_vkUnmapMemory vkUnmapMemory = {}; + PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges = {}; + PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges = {}; + PFN_vkBindBufferMemory vkBindBufferMemory = {}; + PFN_vkBindImageMemory vkBindImageMemory = {}; + PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements = {}; + PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements = {}; + PFN_vkCreateBuffer vkCreateBuffer = {}; + PFN_vkDestroyBuffer vkDestroyBuffer = {}; + PFN_vkCreateImage vkCreateImage = {}; + PFN_vkDestroyImage vkDestroyImage = {}; + PFN_vkCmdCopyBuffer vkCmdCopyBuffer = {}; +#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR = {}; + PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR = {}; +#endif +#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 + PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR = {}; + PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR = {}; +#endif +#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 + PFN_vkGetPhysicalDeviceMemoryProperties2KHR vkGetPhysicalDeviceMemoryProperties2KHR = {}; +#endif +#if VMA_VULKAN_VERSION >= 1003000 + PFN_vkGetDeviceBufferMemoryRequirements vkGetDeviceBufferMemoryRequirements = {}; + PFN_vkGetDeviceImageMemoryRequirements vkGetDeviceImageMemoryRequirements = {}; +#endif + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(VulkanFunctions) == sizeof(VmaVulkanFunctions), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "VulkanFunctions is not nothrow_move_constructible!"); + + struct AllocatorCreateInfo { + using NativeType = VmaAllocatorCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AllocatorCreateInfo( + AllocatorCreateFlags flags_ = {} + , VULKAN_HPP_NAMESPACE::PhysicalDevice physicalDevice_ = {} + , VULKAN_HPP_NAMESPACE::Device device_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize preferredLargeHeapBlockSize_ = {} + , const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocationCallbacks_ = {} + , const DeviceMemoryCallbacks* pDeviceMemoryCallbacks_ = {} + , const VULKAN_HPP_NAMESPACE::DeviceSize* pHeapSizeLimit_ = {} + , const VulkanFunctions* pVulkanFunctions_ = {} + , VULKAN_HPP_NAMESPACE::Instance instance_ = {} + , uint32_t vulkanApiVersion_ = {} +#if VMA_EXTERNAL_MEMORY + , const VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsKHR* pTypeExternalMemoryHandleTypes_ = {} +#endif + ) VULKAN_HPP_NOEXCEPT + : flags(flags_) + , physicalDevice(physicalDevice_) + , device(device_) + , preferredLargeHeapBlockSize(preferredLargeHeapBlockSize_) + , pAllocationCallbacks(pAllocationCallbacks_) + , pDeviceMemoryCallbacks(pDeviceMemoryCallbacks_) + , pHeapSizeLimit(pHeapSizeLimit_) + , pVulkanFunctions(pVulkanFunctions_) + , instance(instance_) + , vulkanApiVersion(vulkanApiVersion_) +#if VMA_EXTERNAL_MEMORY + , pTypeExternalMemoryHandleTypes(pTypeExternalMemoryHandleTypes_) +#endif + {} + + VULKAN_HPP_CONSTEXPR AllocatorCreateInfo(AllocatorCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocatorCreateInfo(VmaAllocatorCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT : AllocatorCreateInfo(*reinterpret_cast(&rhs)) {} +#endif + + AllocatorCreateInfo& operator=(AllocatorCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocatorCreateInfo& operator=(VmaAllocatorCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaAllocatorCreateInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaAllocatorCreateInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(AllocatorCreateInfo const &) const = default; +#else + bool operator==(AllocatorCreateInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return flags == rhs.flags + && physicalDevice == rhs.physicalDevice + && device == rhs.device + && preferredLargeHeapBlockSize == rhs.preferredLargeHeapBlockSize + && pAllocationCallbacks == rhs.pAllocationCallbacks + && pDeviceMemoryCallbacks == rhs.pDeviceMemoryCallbacks + && pHeapSizeLimit == rhs.pHeapSizeLimit + && pVulkanFunctions == rhs.pVulkanFunctions + && instance == rhs.instance + && vulkanApiVersion == rhs.vulkanApiVersion +#if VMA_EXTERNAL_MEMORY + && pTypeExternalMemoryHandleTypes == rhs.pTypeExternalMemoryHandleTypes +#endif + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setFlags(AllocatorCreateFlags flags_) VULKAN_HPP_NOEXCEPT { + flags = flags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPhysicalDevice(VULKAN_HPP_NAMESPACE::PhysicalDevice physicalDevice_) VULKAN_HPP_NOEXCEPT { + physicalDevice = physicalDevice_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setDevice(VULKAN_HPP_NAMESPACE::Device device_) VULKAN_HPP_NOEXCEPT { + device = device_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPreferredLargeHeapBlockSize(VULKAN_HPP_NAMESPACE::DeviceSize preferredLargeHeapBlockSize_) VULKAN_HPP_NOEXCEPT { + preferredLargeHeapBlockSize = preferredLargeHeapBlockSize_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPAllocationCallbacks(const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocationCallbacks_) VULKAN_HPP_NOEXCEPT { + pAllocationCallbacks = pAllocationCallbacks_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPDeviceMemoryCallbacks(const DeviceMemoryCallbacks* pDeviceMemoryCallbacks_) VULKAN_HPP_NOEXCEPT { + pDeviceMemoryCallbacks = pDeviceMemoryCallbacks_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPHeapSizeLimit(const VULKAN_HPP_NAMESPACE::DeviceSize* pHeapSizeLimit_) VULKAN_HPP_NOEXCEPT { + pHeapSizeLimit = pHeapSizeLimit_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPVulkanFunctions(const VulkanFunctions* pVulkanFunctions_) VULKAN_HPP_NOEXCEPT { + pVulkanFunctions = pVulkanFunctions_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setInstance(VULKAN_HPP_NAMESPACE::Instance instance_) VULKAN_HPP_NOEXCEPT { + instance = instance_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setVulkanApiVersion(uint32_t vulkanApiVersion_) VULKAN_HPP_NOEXCEPT { + vulkanApiVersion = vulkanApiVersion_; + return *this; + } +#if VMA_EXTERNAL_MEMORY + + VULKAN_HPP_CONSTEXPR_14 AllocatorCreateInfo& setPTypeExternalMemoryHandleTypes(const VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsKHR* pTypeExternalMemoryHandleTypes_) VULKAN_HPP_NOEXCEPT { + pTypeExternalMemoryHandleTypes = pTypeExternalMemoryHandleTypes_; + return *this; + } +#endif +#endif + + public: + AllocatorCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::PhysicalDevice physicalDevice = {}; + VULKAN_HPP_NAMESPACE::Device device = {}; + VULKAN_HPP_NAMESPACE::DeviceSize preferredLargeHeapBlockSize = {}; + const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocationCallbacks = {}; + const DeviceMemoryCallbacks* pDeviceMemoryCallbacks = {}; + const VULKAN_HPP_NAMESPACE::DeviceSize* pHeapSizeLimit = {}; + const VulkanFunctions* pVulkanFunctions = {}; + VULKAN_HPP_NAMESPACE::Instance instance = {}; + uint32_t vulkanApiVersion = {}; +#if VMA_EXTERNAL_MEMORY + const VULKAN_HPP_NAMESPACE::ExternalMemoryHandleTypeFlagsKHR* pTypeExternalMemoryHandleTypes = {}; +#endif + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(AllocatorCreateInfo) == sizeof(VmaAllocatorCreateInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "AllocatorCreateInfo is not nothrow_move_constructible!"); + + struct AllocatorInfo { + using NativeType = VmaAllocatorInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AllocatorInfo( + VULKAN_HPP_NAMESPACE::Instance instance_ = {} + , VULKAN_HPP_NAMESPACE::PhysicalDevice physicalDevice_ = {} + , VULKAN_HPP_NAMESPACE::Device device_ = {} + ) VULKAN_HPP_NOEXCEPT + : instance(instance_) + , physicalDevice(physicalDevice_) + , device(device_) + {} + + VULKAN_HPP_CONSTEXPR AllocatorInfo(AllocatorInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocatorInfo(VmaAllocatorInfo const & rhs) VULKAN_HPP_NOEXCEPT : AllocatorInfo(*reinterpret_cast(&rhs)) {} +#endif + + AllocatorInfo& operator=(AllocatorInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocatorInfo& operator=(VmaAllocatorInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaAllocatorInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaAllocatorInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(AllocatorInfo const &) const = default; +#else + bool operator==(AllocatorInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return instance == rhs.instance + && physicalDevice == rhs.physicalDevice + && device == rhs.device + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 AllocatorInfo& setInstance(VULKAN_HPP_NAMESPACE::Instance instance_) VULKAN_HPP_NOEXCEPT { + instance = instance_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorInfo& setPhysicalDevice(VULKAN_HPP_NAMESPACE::PhysicalDevice physicalDevice_) VULKAN_HPP_NOEXCEPT { + physicalDevice = physicalDevice_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocatorInfo& setDevice(VULKAN_HPP_NAMESPACE::Device device_) VULKAN_HPP_NOEXCEPT { + device = device_; + return *this; + } +#endif + + public: + VULKAN_HPP_NAMESPACE::Instance instance = {}; + VULKAN_HPP_NAMESPACE::PhysicalDevice physicalDevice = {}; + VULKAN_HPP_NAMESPACE::Device device = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(AllocatorInfo) == sizeof(VmaAllocatorInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "AllocatorInfo is not nothrow_move_constructible!"); + + struct Statistics { + using NativeType = VmaStatistics; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Statistics( + uint32_t blockCount_ = {} + , uint32_t allocationCount_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize blockBytes_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize allocationBytes_ = {} + ) VULKAN_HPP_NOEXCEPT + : blockCount(blockCount_) + , allocationCount(allocationCount_) + , blockBytes(blockBytes_) + , allocationBytes(allocationBytes_) + {} + + VULKAN_HPP_CONSTEXPR Statistics(Statistics const &) VULKAN_HPP_NOEXCEPT = default; + Statistics(VmaStatistics const & rhs) VULKAN_HPP_NOEXCEPT : Statistics(*reinterpret_cast(&rhs)) {} +#endif + + Statistics& operator=(Statistics const &) VULKAN_HPP_NOEXCEPT = default; + Statistics& operator=(VmaStatistics const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaStatistics const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaStatistics&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(Statistics const &) const = default; +#else + bool operator==(Statistics const & rhs) const VULKAN_HPP_NOEXCEPT { + return blockCount == rhs.blockCount + && allocationCount == rhs.allocationCount + && blockBytes == rhs.blockBytes + && allocationBytes == rhs.allocationBytes + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 Statistics& setBlockCount(uint32_t blockCount_) VULKAN_HPP_NOEXCEPT { + blockCount = blockCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Statistics& setAllocationCount(uint32_t allocationCount_) VULKAN_HPP_NOEXCEPT { + allocationCount = allocationCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Statistics& setBlockBytes(VULKAN_HPP_NAMESPACE::DeviceSize blockBytes_) VULKAN_HPP_NOEXCEPT { + blockBytes = blockBytes_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Statistics& setAllocationBytes(VULKAN_HPP_NAMESPACE::DeviceSize allocationBytes_) VULKAN_HPP_NOEXCEPT { + allocationBytes = allocationBytes_; + return *this; + } +#endif + + public: + uint32_t blockCount = {}; + uint32_t allocationCount = {}; + VULKAN_HPP_NAMESPACE::DeviceSize blockBytes = {}; + VULKAN_HPP_NAMESPACE::DeviceSize allocationBytes = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(Statistics) == sizeof(VmaStatistics), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "Statistics is not nothrow_move_constructible!"); + + struct DetailedStatistics { + using NativeType = VmaDetailedStatistics; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DetailedStatistics( + Statistics statistics_ = {} + , uint32_t unusedRangeCount_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize allocationSizeMin_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize allocationSizeMax_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize unusedRangeSizeMin_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize unusedRangeSizeMax_ = {} + ) VULKAN_HPP_NOEXCEPT + : statistics(statistics_) + , unusedRangeCount(unusedRangeCount_) + , allocationSizeMin(allocationSizeMin_) + , allocationSizeMax(allocationSizeMax_) + , unusedRangeSizeMin(unusedRangeSizeMin_) + , unusedRangeSizeMax(unusedRangeSizeMax_) + {} + + VULKAN_HPP_CONSTEXPR DetailedStatistics(DetailedStatistics const &) VULKAN_HPP_NOEXCEPT = default; + DetailedStatistics(VmaDetailedStatistics const & rhs) VULKAN_HPP_NOEXCEPT : DetailedStatistics(*reinterpret_cast(&rhs)) {} +#endif + + DetailedStatistics& operator=(DetailedStatistics const &) VULKAN_HPP_NOEXCEPT = default; + DetailedStatistics& operator=(VmaDetailedStatistics const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaDetailedStatistics const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaDetailedStatistics&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(DetailedStatistics const &) const = default; +#else + bool operator==(DetailedStatistics const & rhs) const VULKAN_HPP_NOEXCEPT { + return statistics == rhs.statistics + && unusedRangeCount == rhs.unusedRangeCount + && allocationSizeMin == rhs.allocationSizeMin + && allocationSizeMax == rhs.allocationSizeMax + && unusedRangeSizeMin == rhs.unusedRangeSizeMin + && unusedRangeSizeMax == rhs.unusedRangeSizeMax + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 DetailedStatistics& setStatistics(Statistics statistics_) VULKAN_HPP_NOEXCEPT { + statistics = statistics_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DetailedStatistics& setUnusedRangeCount(uint32_t unusedRangeCount_) VULKAN_HPP_NOEXCEPT { + unusedRangeCount = unusedRangeCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DetailedStatistics& setAllocationSizeMin(VULKAN_HPP_NAMESPACE::DeviceSize allocationSizeMin_) VULKAN_HPP_NOEXCEPT { + allocationSizeMin = allocationSizeMin_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DetailedStatistics& setAllocationSizeMax(VULKAN_HPP_NAMESPACE::DeviceSize allocationSizeMax_) VULKAN_HPP_NOEXCEPT { + allocationSizeMax = allocationSizeMax_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DetailedStatistics& setUnusedRangeSizeMin(VULKAN_HPP_NAMESPACE::DeviceSize unusedRangeSizeMin_) VULKAN_HPP_NOEXCEPT { + unusedRangeSizeMin = unusedRangeSizeMin_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DetailedStatistics& setUnusedRangeSizeMax(VULKAN_HPP_NAMESPACE::DeviceSize unusedRangeSizeMax_) VULKAN_HPP_NOEXCEPT { + unusedRangeSizeMax = unusedRangeSizeMax_; + return *this; + } +#endif + + public: + Statistics statistics = {}; + uint32_t unusedRangeCount = {}; + VULKAN_HPP_NAMESPACE::DeviceSize allocationSizeMin = {}; + VULKAN_HPP_NAMESPACE::DeviceSize allocationSizeMax = {}; + VULKAN_HPP_NAMESPACE::DeviceSize unusedRangeSizeMin = {}; + VULKAN_HPP_NAMESPACE::DeviceSize unusedRangeSizeMax = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DetailedStatistics) == sizeof(VmaDetailedStatistics), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "DetailedStatistics is not nothrow_move_constructible!"); + + struct TotalStatistics { + using NativeType = VmaTotalStatistics; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR TotalStatistics( + std::array memoryType_ = {} + , std::array memoryHeap_ = {} + , DetailedStatistics total_ = {} + ) VULKAN_HPP_NOEXCEPT + : memoryType(memoryType_) + , memoryHeap(memoryHeap_) + , total(total_) + {} + + VULKAN_HPP_CONSTEXPR TotalStatistics(TotalStatistics const &) VULKAN_HPP_NOEXCEPT = default; + TotalStatistics(VmaTotalStatistics const & rhs) VULKAN_HPP_NOEXCEPT : TotalStatistics(*reinterpret_cast(&rhs)) {} +#endif + + TotalStatistics& operator=(TotalStatistics const &) VULKAN_HPP_NOEXCEPT = default; + TotalStatistics& operator=(VmaTotalStatistics const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaTotalStatistics const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaTotalStatistics&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(TotalStatistics const &) const = default; +#else + bool operator==(TotalStatistics const & rhs) const VULKAN_HPP_NOEXCEPT { + return memoryType == rhs.memoryType + && memoryHeap == rhs.memoryHeap + && total == rhs.total + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 TotalStatistics& setMemoryType(std::array memoryType_) VULKAN_HPP_NOEXCEPT { + memoryType = memoryType_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 TotalStatistics& setMemoryHeap(std::array memoryHeap_) VULKAN_HPP_NOEXCEPT { + memoryHeap = memoryHeap_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 TotalStatistics& setTotal(DetailedStatistics total_) VULKAN_HPP_NOEXCEPT { + total = total_; + return *this; + } +#endif + + public: + std::array memoryType = {}; + std::array memoryHeap = {}; + DetailedStatistics total = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(TotalStatistics) == sizeof(VmaTotalStatistics), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "TotalStatistics is not nothrow_move_constructible!"); + + struct Budget { + using NativeType = VmaBudget; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR Budget( + Statistics statistics_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize usage_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize budget_ = {} + ) VULKAN_HPP_NOEXCEPT + : statistics(statistics_) + , usage(usage_) + , budget(budget_) + {} + + VULKAN_HPP_CONSTEXPR Budget(Budget const &) VULKAN_HPP_NOEXCEPT = default; + Budget(VmaBudget const & rhs) VULKAN_HPP_NOEXCEPT : Budget(*reinterpret_cast(&rhs)) {} +#endif + + Budget& operator=(Budget const &) VULKAN_HPP_NOEXCEPT = default; + Budget& operator=(VmaBudget const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaBudget const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaBudget&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(Budget const &) const = default; +#else + bool operator==(Budget const & rhs) const VULKAN_HPP_NOEXCEPT { + return statistics == rhs.statistics + && usage == rhs.usage + && budget == rhs.budget + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 Budget& setStatistics(Statistics statistics_) VULKAN_HPP_NOEXCEPT { + statistics = statistics_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Budget& setUsage(VULKAN_HPP_NAMESPACE::DeviceSize usage_) VULKAN_HPP_NOEXCEPT { + usage = usage_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 Budget& setBudget(VULKAN_HPP_NAMESPACE::DeviceSize budget_) VULKAN_HPP_NOEXCEPT { + budget = budget_; + return *this; + } +#endif + + public: + Statistics statistics = {}; + VULKAN_HPP_NAMESPACE::DeviceSize usage = {}; + VULKAN_HPP_NAMESPACE::DeviceSize budget = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(Budget) == sizeof(VmaBudget), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "Budget is not nothrow_move_constructible!"); + + struct AllocationCreateInfo { + using NativeType = VmaAllocationCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AllocationCreateInfo( + AllocationCreateFlags flags_ = {} + , MemoryUsage usage_ = {} + , VULKAN_HPP_NAMESPACE::MemoryPropertyFlags requiredFlags_ = {} + , VULKAN_HPP_NAMESPACE::MemoryPropertyFlags preferredFlags_ = {} + , uint32_t memoryTypeBits_ = {} + , Pool pool_ = {} + , void* pUserData_ = {} + , float priority_ = {} + ) VULKAN_HPP_NOEXCEPT + : flags(flags_) + , usage(usage_) + , requiredFlags(requiredFlags_) + , preferredFlags(preferredFlags_) + , memoryTypeBits(memoryTypeBits_) + , pool(pool_) + , pUserData(pUserData_) + , priority(priority_) + {} + + VULKAN_HPP_CONSTEXPR AllocationCreateInfo(AllocationCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocationCreateInfo(VmaAllocationCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT : AllocationCreateInfo(*reinterpret_cast(&rhs)) {} +#endif + + AllocationCreateInfo& operator=(AllocationCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocationCreateInfo& operator=(VmaAllocationCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaAllocationCreateInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaAllocationCreateInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(AllocationCreateInfo const &) const = default; +#else + bool operator==(AllocationCreateInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return flags == rhs.flags + && usage == rhs.usage + && requiredFlags == rhs.requiredFlags + && preferredFlags == rhs.preferredFlags + && memoryTypeBits == rhs.memoryTypeBits + && pool == rhs.pool + && pUserData == rhs.pUserData + && priority == rhs.priority + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setFlags(AllocationCreateFlags flags_) VULKAN_HPP_NOEXCEPT { + flags = flags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setUsage(MemoryUsage usage_) VULKAN_HPP_NOEXCEPT { + usage = usage_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setRequiredFlags(VULKAN_HPP_NAMESPACE::MemoryPropertyFlags requiredFlags_) VULKAN_HPP_NOEXCEPT { + requiredFlags = requiredFlags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setPreferredFlags(VULKAN_HPP_NAMESPACE::MemoryPropertyFlags preferredFlags_) VULKAN_HPP_NOEXCEPT { + preferredFlags = preferredFlags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setMemoryTypeBits(uint32_t memoryTypeBits_) VULKAN_HPP_NOEXCEPT { + memoryTypeBits = memoryTypeBits_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setPool(Pool pool_) VULKAN_HPP_NOEXCEPT { + pool = pool_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setPUserData(void* pUserData_) VULKAN_HPP_NOEXCEPT { + pUserData = pUserData_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationCreateInfo& setPriority(float priority_) VULKAN_HPP_NOEXCEPT { + priority = priority_; + return *this; + } +#endif + + public: + AllocationCreateFlags flags = {}; + MemoryUsage usage = {}; + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags requiredFlags = {}; + VULKAN_HPP_NAMESPACE::MemoryPropertyFlags preferredFlags = {}; + uint32_t memoryTypeBits = {}; + Pool pool = {}; + void* pUserData = {}; + float priority = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(AllocationCreateInfo) == sizeof(VmaAllocationCreateInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "AllocationCreateInfo is not nothrow_move_constructible!"); + + struct PoolCreateInfo { + using NativeType = VmaPoolCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR PoolCreateInfo( + uint32_t memoryTypeIndex_ = {} + , PoolCreateFlags flags_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize blockSize_ = {} + , size_t minBlockCount_ = {} + , size_t maxBlockCount_ = {} + , float priority_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize minAllocationAlignment_ = {} + , void* pMemoryAllocateNext_ = {} + ) VULKAN_HPP_NOEXCEPT + : memoryTypeIndex(memoryTypeIndex_) + , flags(flags_) + , blockSize(blockSize_) + , minBlockCount(minBlockCount_) + , maxBlockCount(maxBlockCount_) + , priority(priority_) + , minAllocationAlignment(minAllocationAlignment_) + , pMemoryAllocateNext(pMemoryAllocateNext_) + {} + + VULKAN_HPP_CONSTEXPR PoolCreateInfo(PoolCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + PoolCreateInfo(VmaPoolCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT : PoolCreateInfo(*reinterpret_cast(&rhs)) {} +#endif + + PoolCreateInfo& operator=(PoolCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + PoolCreateInfo& operator=(VmaPoolCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaPoolCreateInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaPoolCreateInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(PoolCreateInfo const &) const = default; +#else + bool operator==(PoolCreateInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return memoryTypeIndex == rhs.memoryTypeIndex + && flags == rhs.flags + && blockSize == rhs.blockSize + && minBlockCount == rhs.minBlockCount + && maxBlockCount == rhs.maxBlockCount + && priority == rhs.priority + && minAllocationAlignment == rhs.minAllocationAlignment + && pMemoryAllocateNext == rhs.pMemoryAllocateNext + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setMemoryTypeIndex(uint32_t memoryTypeIndex_) VULKAN_HPP_NOEXCEPT { + memoryTypeIndex = memoryTypeIndex_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setFlags(PoolCreateFlags flags_) VULKAN_HPP_NOEXCEPT { + flags = flags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setBlockSize(VULKAN_HPP_NAMESPACE::DeviceSize blockSize_) VULKAN_HPP_NOEXCEPT { + blockSize = blockSize_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setMinBlockCount(size_t minBlockCount_) VULKAN_HPP_NOEXCEPT { + minBlockCount = minBlockCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setMaxBlockCount(size_t maxBlockCount_) VULKAN_HPP_NOEXCEPT { + maxBlockCount = maxBlockCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setPriority(float priority_) VULKAN_HPP_NOEXCEPT { + priority = priority_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setMinAllocationAlignment(VULKAN_HPP_NAMESPACE::DeviceSize minAllocationAlignment_) VULKAN_HPP_NOEXCEPT { + minAllocationAlignment = minAllocationAlignment_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 PoolCreateInfo& setPMemoryAllocateNext(void* pMemoryAllocateNext_) VULKAN_HPP_NOEXCEPT { + pMemoryAllocateNext = pMemoryAllocateNext_; + return *this; + } +#endif + + public: + uint32_t memoryTypeIndex = {}; + PoolCreateFlags flags = {}; + VULKAN_HPP_NAMESPACE::DeviceSize blockSize = {}; + size_t minBlockCount = {}; + size_t maxBlockCount = {}; + float priority = {}; + VULKAN_HPP_NAMESPACE::DeviceSize minAllocationAlignment = {}; + void* pMemoryAllocateNext = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(PoolCreateInfo) == sizeof(VmaPoolCreateInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "PoolCreateInfo is not nothrow_move_constructible!"); + + struct AllocationInfo { + using NativeType = VmaAllocationInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR AllocationInfo( + uint32_t memoryType_ = {} + , VULKAN_HPP_NAMESPACE::DeviceMemory deviceMemory_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} + , void* pMappedData_ = {} + , void* pUserData_ = {} + , const char* pName_ = {} + ) VULKAN_HPP_NOEXCEPT + : memoryType(memoryType_) + , deviceMemory(deviceMemory_) + , offset(offset_) + , size(size_) + , pMappedData(pMappedData_) + , pUserData(pUserData_) + , pName(pName_) + {} + + VULKAN_HPP_CONSTEXPR AllocationInfo(AllocationInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocationInfo(VmaAllocationInfo const & rhs) VULKAN_HPP_NOEXCEPT : AllocationInfo(*reinterpret_cast(&rhs)) {} +#endif + + AllocationInfo& operator=(AllocationInfo const &) VULKAN_HPP_NOEXCEPT = default; + AllocationInfo& operator=(VmaAllocationInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaAllocationInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaAllocationInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(AllocationInfo const &) const = default; +#else + bool operator==(AllocationInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return memoryType == rhs.memoryType + && deviceMemory == rhs.deviceMemory + && offset == rhs.offset + && size == rhs.size + && pMappedData == rhs.pMappedData + && pUserData == rhs.pUserData + && pName == rhs.pName + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setMemoryType(uint32_t memoryType_) VULKAN_HPP_NOEXCEPT { + memoryType = memoryType_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setDeviceMemory(VULKAN_HPP_NAMESPACE::DeviceMemory deviceMemory_) VULKAN_HPP_NOEXCEPT { + deviceMemory = deviceMemory_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setOffset(VULKAN_HPP_NAMESPACE::DeviceSize offset_) VULKAN_HPP_NOEXCEPT { + offset = offset_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setSize(VULKAN_HPP_NAMESPACE::DeviceSize size_) VULKAN_HPP_NOEXCEPT { + size = size_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setPMappedData(void* pMappedData_) VULKAN_HPP_NOEXCEPT { + pMappedData = pMappedData_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setPUserData(void* pUserData_) VULKAN_HPP_NOEXCEPT { + pUserData = pUserData_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 AllocationInfo& setPName(const char* pName_) VULKAN_HPP_NOEXCEPT { + pName = pName_; + return *this; + } +#endif + + public: + uint32_t memoryType = {}; + VULKAN_HPP_NAMESPACE::DeviceMemory deviceMemory = {}; + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + void* pMappedData = {}; + void* pUserData = {}; + const char* pName = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(AllocationInfo) == sizeof(VmaAllocationInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "AllocationInfo is not nothrow_move_constructible!"); + + struct DefragmentationInfo { + using NativeType = VmaDefragmentationInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DefragmentationInfo( + DefragmentationFlags flags_ = {} + , Pool pool_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize maxBytesPerPass_ = {} + , uint32_t maxAllocationsPerPass_ = {} + ) VULKAN_HPP_NOEXCEPT + : flags(flags_) + , pool(pool_) + , maxBytesPerPass(maxBytesPerPass_) + , maxAllocationsPerPass(maxAllocationsPerPass_) + {} + + VULKAN_HPP_CONSTEXPR DefragmentationInfo(DefragmentationInfo const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationInfo(VmaDefragmentationInfo const & rhs) VULKAN_HPP_NOEXCEPT : DefragmentationInfo(*reinterpret_cast(&rhs)) {} +#endif + + DefragmentationInfo& operator=(DefragmentationInfo const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationInfo& operator=(VmaDefragmentationInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaDefragmentationInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaDefragmentationInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(DefragmentationInfo const &) const = default; +#else + bool operator==(DefragmentationInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return flags == rhs.flags + && pool == rhs.pool + && maxBytesPerPass == rhs.maxBytesPerPass + && maxAllocationsPerPass == rhs.maxAllocationsPerPass + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 DefragmentationInfo& setFlags(DefragmentationFlags flags_) VULKAN_HPP_NOEXCEPT { + flags = flags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationInfo& setPool(Pool pool_) VULKAN_HPP_NOEXCEPT { + pool = pool_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationInfo& setMaxBytesPerPass(VULKAN_HPP_NAMESPACE::DeviceSize maxBytesPerPass_) VULKAN_HPP_NOEXCEPT { + maxBytesPerPass = maxBytesPerPass_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationInfo& setMaxAllocationsPerPass(uint32_t maxAllocationsPerPass_) VULKAN_HPP_NOEXCEPT { + maxAllocationsPerPass = maxAllocationsPerPass_; + return *this; + } +#endif + + public: + DefragmentationFlags flags = {}; + Pool pool = {}; + VULKAN_HPP_NAMESPACE::DeviceSize maxBytesPerPass = {}; + uint32_t maxAllocationsPerPass = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationInfo) == sizeof(VmaDefragmentationInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "DefragmentationInfo is not nothrow_move_constructible!"); + + struct DefragmentationMove { + using NativeType = VmaDefragmentationMove; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DefragmentationMove( + DefragmentationMoveOperation operation_ = {} + , Allocation srcAllocation_ = {} + , Allocation dstTmpAllocation_ = {} + ) VULKAN_HPP_NOEXCEPT + : operation(operation_) + , srcAllocation(srcAllocation_) + , dstTmpAllocation(dstTmpAllocation_) + {} + + VULKAN_HPP_CONSTEXPR DefragmentationMove(DefragmentationMove const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationMove(VmaDefragmentationMove const & rhs) VULKAN_HPP_NOEXCEPT : DefragmentationMove(*reinterpret_cast(&rhs)) {} +#endif + + DefragmentationMove& operator=(DefragmentationMove const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationMove& operator=(VmaDefragmentationMove const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaDefragmentationMove const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaDefragmentationMove&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(DefragmentationMove const &) const = default; +#else + bool operator==(DefragmentationMove const & rhs) const VULKAN_HPP_NOEXCEPT { + return operation == rhs.operation + && srcAllocation == rhs.srcAllocation + && dstTmpAllocation == rhs.dstTmpAllocation + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 DefragmentationMove& setOperation(DefragmentationMoveOperation operation_) VULKAN_HPP_NOEXCEPT { + operation = operation_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationMove& setSrcAllocation(Allocation srcAllocation_) VULKAN_HPP_NOEXCEPT { + srcAllocation = srcAllocation_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationMove& setDstTmpAllocation(Allocation dstTmpAllocation_) VULKAN_HPP_NOEXCEPT { + dstTmpAllocation = dstTmpAllocation_; + return *this; + } +#endif + + public: + DefragmentationMoveOperation operation = {}; + Allocation srcAllocation = {}; + Allocation dstTmpAllocation = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationMove) == sizeof(VmaDefragmentationMove), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "DefragmentationMove is not nothrow_move_constructible!"); + + struct DefragmentationPassMoveInfo { + using NativeType = VmaDefragmentationPassMoveInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DefragmentationPassMoveInfo( + uint32_t moveCount_ = {} + , DefragmentationMove* pMoves_ = {} + ) VULKAN_HPP_NOEXCEPT + : moveCount(moveCount_) + , pMoves(pMoves_) + {} + + VULKAN_HPP_CONSTEXPR DefragmentationPassMoveInfo(DefragmentationPassMoveInfo const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationPassMoveInfo(VmaDefragmentationPassMoveInfo const & rhs) VULKAN_HPP_NOEXCEPT : DefragmentationPassMoveInfo(*reinterpret_cast(&rhs)) {} +#endif + + DefragmentationPassMoveInfo& operator=(DefragmentationPassMoveInfo const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationPassMoveInfo& operator=(VmaDefragmentationPassMoveInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaDefragmentationPassMoveInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaDefragmentationPassMoveInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(DefragmentationPassMoveInfo const &) const = default; +#else + bool operator==(DefragmentationPassMoveInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return moveCount == rhs.moveCount + && pMoves == rhs.pMoves + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 DefragmentationPassMoveInfo& setMoveCount(uint32_t moveCount_) VULKAN_HPP_NOEXCEPT { + moveCount = moveCount_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationPassMoveInfo& setPMoves(DefragmentationMove* pMoves_) VULKAN_HPP_NOEXCEPT { + pMoves = pMoves_; + return *this; + } +#endif + + public: + uint32_t moveCount = {}; + DefragmentationMove* pMoves = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationPassMoveInfo) == sizeof(VmaDefragmentationPassMoveInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "DefragmentationPassMoveInfo is not nothrow_move_constructible!"); + + struct DefragmentationStats { + using NativeType = VmaDefragmentationStats; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR DefragmentationStats( + VULKAN_HPP_NAMESPACE::DeviceSize bytesMoved_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize bytesFreed_ = {} + , uint32_t allocationsMoved_ = {} + , uint32_t deviceMemoryBlocksFreed_ = {} + ) VULKAN_HPP_NOEXCEPT + : bytesMoved(bytesMoved_) + , bytesFreed(bytesFreed_) + , allocationsMoved(allocationsMoved_) + , deviceMemoryBlocksFreed(deviceMemoryBlocksFreed_) + {} + + VULKAN_HPP_CONSTEXPR DefragmentationStats(DefragmentationStats const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationStats(VmaDefragmentationStats const & rhs) VULKAN_HPP_NOEXCEPT : DefragmentationStats(*reinterpret_cast(&rhs)) {} +#endif + + DefragmentationStats& operator=(DefragmentationStats const &) VULKAN_HPP_NOEXCEPT = default; + DefragmentationStats& operator=(VmaDefragmentationStats const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaDefragmentationStats const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaDefragmentationStats&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(DefragmentationStats const &) const = default; +#else + bool operator==(DefragmentationStats const & rhs) const VULKAN_HPP_NOEXCEPT { + return bytesMoved == rhs.bytesMoved + && bytesFreed == rhs.bytesFreed + && allocationsMoved == rhs.allocationsMoved + && deviceMemoryBlocksFreed == rhs.deviceMemoryBlocksFreed + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 DefragmentationStats& setBytesMoved(VULKAN_HPP_NAMESPACE::DeviceSize bytesMoved_) VULKAN_HPP_NOEXCEPT { + bytesMoved = bytesMoved_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationStats& setBytesFreed(VULKAN_HPP_NAMESPACE::DeviceSize bytesFreed_) VULKAN_HPP_NOEXCEPT { + bytesFreed = bytesFreed_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationStats& setAllocationsMoved(uint32_t allocationsMoved_) VULKAN_HPP_NOEXCEPT { + allocationsMoved = allocationsMoved_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 DefragmentationStats& setDeviceMemoryBlocksFreed(uint32_t deviceMemoryBlocksFreed_) VULKAN_HPP_NOEXCEPT { + deviceMemoryBlocksFreed = deviceMemoryBlocksFreed_; + return *this; + } +#endif + + public: + VULKAN_HPP_NAMESPACE::DeviceSize bytesMoved = {}; + VULKAN_HPP_NAMESPACE::DeviceSize bytesFreed = {}; + uint32_t allocationsMoved = {}; + uint32_t deviceMemoryBlocksFreed = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(DefragmentationStats) == sizeof(VmaDefragmentationStats), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "DefragmentationStats is not nothrow_move_constructible!"); + + struct VirtualBlockCreateInfo { + using NativeType = VmaVirtualBlockCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VirtualBlockCreateInfo( + VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} + , VirtualBlockCreateFlags flags_ = {} + , const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocationCallbacks_ = {} + ) VULKAN_HPP_NOEXCEPT + : size(size_) + , flags(flags_) + , pAllocationCallbacks(pAllocationCallbacks_) + {} + + VULKAN_HPP_CONSTEXPR VirtualBlockCreateInfo(VirtualBlockCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + VirtualBlockCreateInfo(VmaVirtualBlockCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT : VirtualBlockCreateInfo(*reinterpret_cast(&rhs)) {} +#endif + + VirtualBlockCreateInfo& operator=(VirtualBlockCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + VirtualBlockCreateInfo& operator=(VmaVirtualBlockCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaVirtualBlockCreateInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaVirtualBlockCreateInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(VirtualBlockCreateInfo const &) const = default; +#else + bool operator==(VirtualBlockCreateInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return size == rhs.size + && flags == rhs.flags + && pAllocationCallbacks == rhs.pAllocationCallbacks + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 VirtualBlockCreateInfo& setSize(VULKAN_HPP_NAMESPACE::DeviceSize size_) VULKAN_HPP_NOEXCEPT { + size = size_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualBlockCreateInfo& setFlags(VirtualBlockCreateFlags flags_) VULKAN_HPP_NOEXCEPT { + flags = flags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualBlockCreateInfo& setPAllocationCallbacks(const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocationCallbacks_) VULKAN_HPP_NOEXCEPT { + pAllocationCallbacks = pAllocationCallbacks_; + return *this; + } +#endif + + public: + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VirtualBlockCreateFlags flags = {}; + const VULKAN_HPP_NAMESPACE::AllocationCallbacks* pAllocationCallbacks = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualBlockCreateInfo) == sizeof(VmaVirtualBlockCreateInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "VirtualBlockCreateInfo is not nothrow_move_constructible!"); + + struct VirtualAllocationCreateInfo { + using NativeType = VmaVirtualAllocationCreateInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VirtualAllocationCreateInfo( + VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize alignment_ = {} + , VirtualAllocationCreateFlags flags_ = {} + , void* pUserData_ = {} + ) VULKAN_HPP_NOEXCEPT + : size(size_) + , alignment(alignment_) + , flags(flags_) + , pUserData(pUserData_) + {} + + VULKAN_HPP_CONSTEXPR VirtualAllocationCreateInfo(VirtualAllocationCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + VirtualAllocationCreateInfo(VmaVirtualAllocationCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT : VirtualAllocationCreateInfo(*reinterpret_cast(&rhs)) {} +#endif + + VirtualAllocationCreateInfo& operator=(VirtualAllocationCreateInfo const &) VULKAN_HPP_NOEXCEPT = default; + VirtualAllocationCreateInfo& operator=(VmaVirtualAllocationCreateInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaVirtualAllocationCreateInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaVirtualAllocationCreateInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(VirtualAllocationCreateInfo const &) const = default; +#else + bool operator==(VirtualAllocationCreateInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return size == rhs.size + && alignment == rhs.alignment + && flags == rhs.flags + && pUserData == rhs.pUserData + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationCreateInfo& setSize(VULKAN_HPP_NAMESPACE::DeviceSize size_) VULKAN_HPP_NOEXCEPT { + size = size_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationCreateInfo& setAlignment(VULKAN_HPP_NAMESPACE::DeviceSize alignment_) VULKAN_HPP_NOEXCEPT { + alignment = alignment_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationCreateInfo& setFlags(VirtualAllocationCreateFlags flags_) VULKAN_HPP_NOEXCEPT { + flags = flags_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationCreateInfo& setPUserData(void* pUserData_) VULKAN_HPP_NOEXCEPT { + pUserData = pUserData_; + return *this; + } +#endif + + public: + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + VULKAN_HPP_NAMESPACE::DeviceSize alignment = {}; + VirtualAllocationCreateFlags flags = {}; + void* pUserData = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualAllocationCreateInfo) == sizeof(VmaVirtualAllocationCreateInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "VirtualAllocationCreateInfo is not nothrow_move_constructible!"); + + struct VirtualAllocationInfo { + using NativeType = VmaVirtualAllocationInfo; + +#if !defined( VULKAN_HPP_NO_STRUCT_CONSTRUCTORS ) + VULKAN_HPP_CONSTEXPR VirtualAllocationInfo( + VULKAN_HPP_NAMESPACE::DeviceSize offset_ = {} + , VULKAN_HPP_NAMESPACE::DeviceSize size_ = {} + , void* pUserData_ = {} + ) VULKAN_HPP_NOEXCEPT + : offset(offset_) + , size(size_) + , pUserData(pUserData_) + {} + + VULKAN_HPP_CONSTEXPR VirtualAllocationInfo(VirtualAllocationInfo const &) VULKAN_HPP_NOEXCEPT = default; + VirtualAllocationInfo(VmaVirtualAllocationInfo const & rhs) VULKAN_HPP_NOEXCEPT : VirtualAllocationInfo(*reinterpret_cast(&rhs)) {} +#endif + + VirtualAllocationInfo& operator=(VirtualAllocationInfo const &) VULKAN_HPP_NOEXCEPT = default; + VirtualAllocationInfo& operator=(VmaVirtualAllocationInfo const & rhs) VULKAN_HPP_NOEXCEPT { + *this = *reinterpret_cast(&rhs); + return *this; + } + + explicit operator VmaVirtualAllocationInfo const &() const VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + + explicit operator VmaVirtualAllocationInfo&() VULKAN_HPP_NOEXCEPT { + return *reinterpret_cast(this); + } + +#if defined( VULKAN_HPP_HAS_SPACESHIP_OPERATOR ) + bool operator==(VirtualAllocationInfo const &) const = default; +#else + bool operator==(VirtualAllocationInfo const & rhs) const VULKAN_HPP_NOEXCEPT { + return offset == rhs.offset + && size == rhs.size + && pUserData == rhs.pUserData + ; + } +#endif + +#if !defined( VULKAN_HPP_NO_STRUCT_SETTERS ) + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationInfo& setOffset(VULKAN_HPP_NAMESPACE::DeviceSize offset_) VULKAN_HPP_NOEXCEPT { + offset = offset_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationInfo& setSize(VULKAN_HPP_NAMESPACE::DeviceSize size_) VULKAN_HPP_NOEXCEPT { + size = size_; + return *this; + } + + VULKAN_HPP_CONSTEXPR_14 VirtualAllocationInfo& setPUserData(void* pUserData_) VULKAN_HPP_NOEXCEPT { + pUserData = pUserData_; + return *this; + } +#endif + + public: + VULKAN_HPP_NAMESPACE::DeviceSize offset = {}; + VULKAN_HPP_NAMESPACE::DeviceSize size = {}; + void* pUserData = {}; + }; + VULKAN_HPP_STATIC_ASSERT(sizeof(VirtualAllocationInfo) == sizeof(VmaVirtualAllocationInfo), + "struct and wrapper have different size!"); + VULKAN_HPP_STATIC_ASSERT(std::is_standard_layout::value, + "struct wrapper is not a standard layout!"); + VULKAN_HPP_STATIC_ASSERT(std::is_nothrow_move_constructible::value, + "VirtualAllocationInfo is not nothrow_move_constructible!"); +} +#endif diff --git a/external/fmt/.clang-format b/external/fmt/.clang-format new file mode 100644 index 00000000..df55d340 --- /dev/null +++ b/external/fmt/.clang-format @@ -0,0 +1,8 @@ +# Run manually to reformat a file: +# clang-format -i --style=file +Language: Cpp +BasedOnStyle: Google +IndentPPDirectives: AfterHash +IndentCaseLabels: false +AlwaysBreakTemplateDeclarations: false +DerivePointerAlignment: false diff --git a/external/fmt/LICENSE.rst b/external/fmt/LICENSE.rst new file mode 100644 index 00000000..f0ec3db4 --- /dev/null +++ b/external/fmt/LICENSE.rst @@ -0,0 +1,27 @@ +Copyright (c) 2012 - present, Victor Zverovich + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--- Optional exception to the license --- + +As an exception, if, as a result of your compiling your source code, portions +of this Software are embedded into a machine-executable object form of such +source code, you may redistribute such embedded portions in such object form +without including the above copyright and permission notices. diff --git a/external/fmt/include/fmt/args.h b/external/fmt/include/fmt/args.h new file mode 100644 index 00000000..a3966d14 --- /dev/null +++ b/external/fmt/include/fmt/args.h @@ -0,0 +1,234 @@ +// Formatting library for C++ - dynamic format arguments +// +// Copyright (c) 2012 - present, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_ARGS_H_ +#define FMT_ARGS_H_ + +#include // std::reference_wrapper +#include // std::unique_ptr +#include + +#include "core.h" + +FMT_BEGIN_NAMESPACE + +namespace detail { + +template struct is_reference_wrapper : std::false_type {}; +template +struct is_reference_wrapper> : std::true_type {}; + +template const T& unwrap(const T& v) { return v; } +template const T& unwrap(const std::reference_wrapper& v) { + return static_cast(v); +} + +class dynamic_arg_list { + // Workaround for clang's -Wweak-vtables. Unlike for regular classes, for + // templates it doesn't complain about inability to deduce single translation + // unit for placing vtable. So storage_node_base is made a fake template. + template struct node { + virtual ~node() = default; + std::unique_ptr> next; + }; + + template struct typed_node : node<> { + T value; + + template + FMT_CONSTEXPR typed_node(const Arg& arg) : value(arg) {} + + template + FMT_CONSTEXPR typed_node(const basic_string_view& arg) + : value(arg.data(), arg.size()) {} + }; + + std::unique_ptr> head_; + + public: + template const T& push(const Arg& arg) { + auto new_node = std::unique_ptr>(new typed_node(arg)); + auto& value = new_node->value; + new_node->next = std::move(head_); + head_ = std::move(new_node); + return value; + } +}; +} // namespace detail + +/** + \rst + A dynamic version of `fmt::format_arg_store`. + It's equipped with a storage to potentially temporary objects which lifetimes + could be shorter than the format arguments object. + + It can be implicitly converted into `~fmt::basic_format_args` for passing + into type-erased formatting functions such as `~fmt::vformat`. + \endrst + */ +template +class dynamic_format_arg_store +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 + // Workaround a GCC template argument substitution bug. + : public basic_format_args +#endif +{ + private: + using char_type = typename Context::char_type; + + template struct need_copy { + static constexpr detail::type mapped_type = + detail::mapped_type_constant::value; + + enum { + value = !(detail::is_reference_wrapper::value || + std::is_same>::value || + std::is_same>::value || + (mapped_type != detail::type::cstring_type && + mapped_type != detail::type::string_type && + mapped_type != detail::type::custom_type)) + }; + }; + + template + using stored_type = conditional_t< + std::is_convertible>::value && + !detail::is_reference_wrapper::value, + std::basic_string, T>; + + // Storage of basic_format_arg must be contiguous. + std::vector> data_; + std::vector> named_info_; + + // Storage of arguments not fitting into basic_format_arg must grow + // without relocation because items in data_ refer to it. + detail::dynamic_arg_list dynamic_args_; + + friend class basic_format_args; + + unsigned long long get_types() const { + return detail::is_unpacked_bit | data_.size() | + (named_info_.empty() + ? 0ULL + : static_cast(detail::has_named_args_bit)); + } + + const basic_format_arg* data() const { + return named_info_.empty() ? data_.data() : data_.data() + 1; + } + + template void emplace_arg(const T& arg) { + data_.emplace_back(detail::make_arg(arg)); + } + + template + void emplace_arg(const detail::named_arg& arg) { + if (named_info_.empty()) { + constexpr const detail::named_arg_info* zero_ptr{nullptr}; + data_.insert(data_.begin(), {zero_ptr, 0}); + } + data_.emplace_back(detail::make_arg(detail::unwrap(arg.value))); + auto pop_one = [](std::vector>* data) { + data->pop_back(); + }; + std::unique_ptr>, decltype(pop_one)> + guard{&data_, pop_one}; + named_info_.push_back({arg.name, static_cast(data_.size() - 2u)}); + data_[0].value_.named_args = {named_info_.data(), named_info_.size()}; + guard.release(); + } + + public: + constexpr dynamic_format_arg_store() = default; + + /** + \rst + Adds an argument into the dynamic store for later passing to a formatting + function. + + Note that custom types and string types (but not string views) are copied + into the store dynamically allocating memory if necessary. + + **Example**:: + + fmt::dynamic_format_arg_store store; + store.push_back(42); + store.push_back("abc"); + store.push_back(1.5f); + std::string result = fmt::vformat("{} and {} and {}", store); + \endrst + */ + template void push_back(const T& arg) { + if (detail::const_check(need_copy::value)) + emplace_arg(dynamic_args_.push>(arg)); + else + emplace_arg(detail::unwrap(arg)); + } + + /** + \rst + Adds a reference to the argument into the dynamic store for later passing to + a formatting function. + + **Example**:: + + fmt::dynamic_format_arg_store store; + char band[] = "Rolling Stones"; + store.push_back(std::cref(band)); + band[9] = 'c'; // Changing str affects the output. + std::string result = fmt::vformat("{}", store); + // result == "Rolling Scones" + \endrst + */ + template void push_back(std::reference_wrapper arg) { + static_assert( + need_copy::value, + "objects of built-in types and string views are always copied"); + emplace_arg(arg.get()); + } + + /** + Adds named argument into the dynamic store for later passing to a formatting + function. ``std::reference_wrapper`` is supported to avoid copying of the + argument. The name is always copied into the store. + */ + template + void push_back(const detail::named_arg& arg) { + const char_type* arg_name = + dynamic_args_.push>(arg.name).c_str(); + if (detail::const_check(need_copy::value)) { + emplace_arg( + fmt::arg(arg_name, dynamic_args_.push>(arg.value))); + } else { + emplace_arg(fmt::arg(arg_name, arg.value)); + } + } + + /** Erase all elements from the store */ + void clear() { + data_.clear(); + named_info_.clear(); + dynamic_args_ = detail::dynamic_arg_list(); + } + + /** + \rst + Reserves space to store at least *new_cap* arguments including + *new_cap_named* named arguments. + \endrst + */ + void reserve(size_t new_cap, size_t new_cap_named) { + FMT_ASSERT(new_cap >= new_cap_named, + "Set of arguments includes set of named arguments"); + data_.reserve(new_cap); + named_info_.reserve(new_cap_named); + } +}; + +FMT_END_NAMESPACE + +#endif // FMT_ARGS_H_ diff --git a/external/fmt/include/fmt/chrono.h b/external/fmt/include/fmt/chrono.h new file mode 100644 index 00000000..7872fb4b --- /dev/null +++ b/external/fmt/include/fmt/chrono.h @@ -0,0 +1,2081 @@ +// Formatting library for C++ - chrono support +// +// Copyright (c) 2012 - present, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_CHRONO_H_ +#define FMT_CHRONO_H_ + +#include +#include +#include // std::isfinite +#include // std::memcpy +#include +#include +#include +#include +#include + +#include "format.h" + +FMT_BEGIN_NAMESPACE + +// Enable tzset. +#ifndef FMT_USE_TZSET +// UWP doesn't provide _tzset. +# if FMT_HAS_INCLUDE("winapifamily.h") +# include +# endif +# if defined(_WIN32) && (!defined(WINAPI_FAMILY) || \ + (WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP)) +# define FMT_USE_TZSET 1 +# else +# define FMT_USE_TZSET 0 +# endif +#endif + +// Enable safe chrono durations, unless explicitly disabled. +#ifndef FMT_SAFE_DURATION_CAST +# define FMT_SAFE_DURATION_CAST 1 +#endif +#if FMT_SAFE_DURATION_CAST + +// For conversion between std::chrono::durations without undefined +// behaviour or erroneous results. +// This is a stripped down version of duration_cast, for inclusion in fmt. +// See https://github.com/pauldreik/safe_duration_cast +// +// Copyright Paul Dreik 2019 +namespace safe_duration_cast { + +template ::value && + std::numeric_limits::is_signed == + std::numeric_limits::is_signed)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { + ec = 0; + using F = std::numeric_limits; + using T = std::numeric_limits; + static_assert(F::is_integer, "From must be integral"); + static_assert(T::is_integer, "To must be integral"); + + // A and B are both signed, or both unsigned. + if (detail::const_check(F::digits <= T::digits)) { + // From fits in To without any problem. + } else { + // From does not always fit in To, resort to a dynamic check. + if (from < (T::min)() || from > (T::max)()) { + // outside range. + ec = 1; + return {}; + } + } + return static_cast(from); +} + +/** + * converts From to To, without loss. If the dynamic value of from + * can't be converted to To without loss, ec is set. + */ +template ::value && + std::numeric_limits::is_signed != + std::numeric_limits::is_signed)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { + ec = 0; + using F = std::numeric_limits; + using T = std::numeric_limits; + static_assert(F::is_integer, "From must be integral"); + static_assert(T::is_integer, "To must be integral"); + + if (detail::const_check(F::is_signed && !T::is_signed)) { + // From may be negative, not allowed! + if (fmt::detail::is_negative(from)) { + ec = 1; + return {}; + } + // From is positive. Can it always fit in To? + if (detail::const_check(F::digits > T::digits) && + from > static_cast(detail::max_value())) { + ec = 1; + return {}; + } + } + + if (detail::const_check(!F::is_signed && T::is_signed && + F::digits >= T::digits) && + from > static_cast(detail::max_value())) { + ec = 1; + return {}; + } + return static_cast(from); // Lossless conversion. +} + +template ::value)> +FMT_CONSTEXPR To lossless_integral_conversion(const From from, int& ec) { + ec = 0; + return from; +} // function + +// clang-format off +/** + * converts From to To if possible, otherwise ec is set. + * + * input | output + * ---------------------------------|--------------- + * NaN | NaN + * Inf | Inf + * normal, fits in output | converted (possibly lossy) + * normal, does not fit in output | ec is set + * subnormal | best effort + * -Inf | -Inf + */ +// clang-format on +template ::value)> +FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { + ec = 0; + using T = std::numeric_limits; + static_assert(std::is_floating_point::value, "From must be floating"); + static_assert(std::is_floating_point::value, "To must be floating"); + + // catch the only happy case + if (std::isfinite(from)) { + if (from >= T::lowest() && from <= (T::max)()) { + return static_cast(from); + } + // not within range. + ec = 1; + return {}; + } + + // nan and inf will be preserved + return static_cast(from); +} // function + +template ::value)> +FMT_CONSTEXPR To safe_float_conversion(const From from, int& ec) { + ec = 0; + static_assert(std::is_floating_point::value, "From must be floating"); + return from; +} + +/** + * safe duration cast between integral durations + */ +template ::value), + FMT_ENABLE_IF(std::is_integral::value)> +To safe_duration_cast(std::chrono::duration from, + int& ec) { + using From = std::chrono::duration; + ec = 0; + // the basic idea is that we need to convert from count() in the from type + // to count() in the To type, by multiplying it with this: + struct Factor + : std::ratio_divide {}; + + static_assert(Factor::num > 0, "num must be positive"); + static_assert(Factor::den > 0, "den must be positive"); + + // the conversion is like this: multiply from.count() with Factor::num + // /Factor::den and convert it to To::rep, all this without + // overflow/underflow. let's start by finding a suitable type that can hold + // both To, From and Factor::num + using IntermediateRep = + typename std::common_type::type; + + // safe conversion to IntermediateRep + IntermediateRep count = + lossless_integral_conversion(from.count(), ec); + if (ec) return {}; + // multiply with Factor::num without overflow or underflow + if (detail::const_check(Factor::num != 1)) { + const auto max1 = detail::max_value() / Factor::num; + if (count > max1) { + ec = 1; + return {}; + } + const auto min1 = + (std::numeric_limits::min)() / Factor::num; + if (count < min1) { + ec = 1; + return {}; + } + count *= Factor::num; + } + + if (detail::const_check(Factor::den != 1)) count /= Factor::den; + auto tocount = lossless_integral_conversion(count, ec); + return ec ? To() : To(tocount); +} + +/** + * safe duration_cast between floating point durations + */ +template ::value), + FMT_ENABLE_IF(std::is_floating_point::value)> +To safe_duration_cast(std::chrono::duration from, + int& ec) { + using From = std::chrono::duration; + ec = 0; + if (std::isnan(from.count())) { + // nan in, gives nan out. easy. + return To{std::numeric_limits::quiet_NaN()}; + } + // maybe we should also check if from is denormal, and decide what to do about + // it. + + // +-inf should be preserved. + if (std::isinf(from.count())) { + return To{from.count()}; + } + + // the basic idea is that we need to convert from count() in the from type + // to count() in the To type, by multiplying it with this: + struct Factor + : std::ratio_divide {}; + + static_assert(Factor::num > 0, "num must be positive"); + static_assert(Factor::den > 0, "den must be positive"); + + // the conversion is like this: multiply from.count() with Factor::num + // /Factor::den and convert it to To::rep, all this without + // overflow/underflow. let's start by finding a suitable type that can hold + // both To, From and Factor::num + using IntermediateRep = + typename std::common_type::type; + + // force conversion of From::rep -> IntermediateRep to be safe, + // even if it will never happen be narrowing in this context. + IntermediateRep count = + safe_float_conversion(from.count(), ec); + if (ec) { + return {}; + } + + // multiply with Factor::num without overflow or underflow + if (detail::const_check(Factor::num != 1)) { + constexpr auto max1 = detail::max_value() / + static_cast(Factor::num); + if (count > max1) { + ec = 1; + return {}; + } + constexpr auto min1 = std::numeric_limits::lowest() / + static_cast(Factor::num); + if (count < min1) { + ec = 1; + return {}; + } + count *= static_cast(Factor::num); + } + + // this can't go wrong, right? den>0 is checked earlier. + if (detail::const_check(Factor::den != 1)) { + using common_t = typename std::common_type::type; + count /= static_cast(Factor::den); + } + + // convert to the to type, safely + using ToRep = typename To::rep; + + const ToRep tocount = safe_float_conversion(count, ec); + if (ec) { + return {}; + } + return To{tocount}; +} +} // namespace safe_duration_cast +#endif + +// Prevents expansion of a preceding token as a function-style macro. +// Usage: f FMT_NOMACRO() +#define FMT_NOMACRO + +namespace detail { +template struct null {}; +inline null<> localtime_r FMT_NOMACRO(...) { return null<>(); } +inline null<> localtime_s(...) { return null<>(); } +inline null<> gmtime_r(...) { return null<>(); } +inline null<> gmtime_s(...) { return null<>(); } + +inline const std::locale& get_classic_locale() { + static const auto& locale = std::locale::classic(); + return locale; +} + +template struct codecvt_result { + static constexpr const size_t max_size = 32; + CodeUnit buf[max_size]; + CodeUnit* end; +}; +template +constexpr const size_t codecvt_result::max_size; + +template +void write_codecvt(codecvt_result& out, string_view in_buf, + const std::locale& loc) { +#if FMT_CLANG_VERSION +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wdeprecated" + auto& f = std::use_facet>(loc); +# pragma clang diagnostic pop +#else + auto& f = std::use_facet>(loc); +#endif + auto mb = std::mbstate_t(); + const char* from_next = nullptr; + auto result = f.in(mb, in_buf.begin(), in_buf.end(), from_next, + std::begin(out.buf), std::end(out.buf), out.end); + if (result != std::codecvt_base::ok) + FMT_THROW(format_error("failed to format time")); +} + +template +auto write_encoded_tm_str(OutputIt out, string_view in, const std::locale& loc) + -> OutputIt { + if (detail::is_utf8() && loc != get_classic_locale()) { + // char16_t and char32_t codecvts are broken in MSVC (linkage errors) and + // gcc-4. +#if FMT_MSC_VER != 0 || \ + (defined(__GLIBCXX__) && !defined(_GLIBCXX_USE_DUAL_ABI)) + // The _GLIBCXX_USE_DUAL_ABI macro is always defined in libstdc++ from gcc-5 + // and newer. + using code_unit = wchar_t; +#else + using code_unit = char32_t; +#endif + + using unit_t = codecvt_result; + unit_t unit; + write_codecvt(unit, in, loc); + // In UTF-8 is used one to four one-byte code units. + auto&& buf = basic_memory_buffer(); + for (code_unit* p = unit.buf; p != unit.end; ++p) { + uint32_t c = static_cast(*p); + if (sizeof(code_unit) == 2 && c >= 0xd800 && c <= 0xdfff) { + // surrogate pair + ++p; + if (p == unit.end || (c & 0xfc00) != 0xd800 || + (*p & 0xfc00) != 0xdc00) { + FMT_THROW(format_error("failed to format time")); + } + c = (c << 10) + static_cast(*p) - 0x35fdc00; + } + if (c < 0x80) { + buf.push_back(static_cast(c)); + } else if (c < 0x800) { + buf.push_back(static_cast(0xc0 | (c >> 6))); + buf.push_back(static_cast(0x80 | (c & 0x3f))); + } else if ((c >= 0x800 && c <= 0xd7ff) || (c >= 0xe000 && c <= 0xffff)) { + buf.push_back(static_cast(0xe0 | (c >> 12))); + buf.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); + buf.push_back(static_cast(0x80 | (c & 0x3f))); + } else if (c >= 0x10000 && c <= 0x10ffff) { + buf.push_back(static_cast(0xf0 | (c >> 18))); + buf.push_back(static_cast(0x80 | ((c & 0x3ffff) >> 12))); + buf.push_back(static_cast(0x80 | ((c & 0xfff) >> 6))); + buf.push_back(static_cast(0x80 | (c & 0x3f))); + } else { + FMT_THROW(format_error("failed to format time")); + } + } + return copy_str(buf.data(), buf.data() + buf.size(), out); + } + return copy_str(in.data(), in.data() + in.size(), out); +} + +template ::value)> +auto write_tm_str(OutputIt out, string_view sv, const std::locale& loc) + -> OutputIt { + codecvt_result unit; + write_codecvt(unit, sv, loc); + return copy_str(unit.buf, unit.end, out); +} + +template ::value)> +auto write_tm_str(OutputIt out, string_view sv, const std::locale& loc) + -> OutputIt { + return write_encoded_tm_str(out, sv, loc); +} + +template +inline void do_write(buffer& buf, const std::tm& time, + const std::locale& loc, char format, char modifier) { + auto&& format_buf = formatbuf>(buf); + auto&& os = std::basic_ostream(&format_buf); + os.imbue(loc); + using iterator = std::ostreambuf_iterator; + const auto& facet = std::use_facet>(loc); + auto end = facet.put(os, os, Char(' '), &time, format, modifier); + if (end.failed()) FMT_THROW(format_error("failed to format time")); +} + +template ::value)> +auto write(OutputIt out, const std::tm& time, const std::locale& loc, + char format, char modifier = 0) -> OutputIt { + auto&& buf = get_buffer(out); + do_write(buf, time, loc, format, modifier); + return buf.out(); +} + +template ::value)> +auto write(OutputIt out, const std::tm& time, const std::locale& loc, + char format, char modifier = 0) -> OutputIt { + auto&& buf = basic_memory_buffer(); + do_write(buf, time, loc, format, modifier); + return write_encoded_tm_str(out, string_view(buf.data(), buf.size()), loc); +} + +} // namespace detail + +FMT_MODULE_EXPORT_BEGIN + +/** + Converts given time since epoch as ``std::time_t`` value into calendar time, + expressed in local time. Unlike ``std::localtime``, this function is + thread-safe on most platforms. + */ +inline std::tm localtime(std::time_t time) { + struct dispatcher { + std::time_t time_; + std::tm tm_; + + dispatcher(std::time_t t) : time_(t) {} + + bool run() { + using namespace fmt::detail; + return handle(localtime_r(&time_, &tm_)); + } + + bool handle(std::tm* tm) { return tm != nullptr; } + + bool handle(detail::null<>) { + using namespace fmt::detail; + return fallback(localtime_s(&tm_, &time_)); + } + + bool fallback(int res) { return res == 0; } + +#if !FMT_MSC_VER + bool fallback(detail::null<>) { + using namespace fmt::detail; + std::tm* tm = std::localtime(&time_); + if (tm) tm_ = *tm; + return tm != nullptr; + } +#endif + }; + dispatcher lt(time); + // Too big time values may be unsupported. + if (!lt.run()) FMT_THROW(format_error("time_t value out of range")); + return lt.tm_; +} + +inline std::tm localtime( + std::chrono::time_point time_point) { + return localtime(std::chrono::system_clock::to_time_t(time_point)); +} + +/** + Converts given time since epoch as ``std::time_t`` value into calendar time, + expressed in Coordinated Universal Time (UTC). Unlike ``std::gmtime``, this + function is thread-safe on most platforms. + */ +inline std::tm gmtime(std::time_t time) { + struct dispatcher { + std::time_t time_; + std::tm tm_; + + dispatcher(std::time_t t) : time_(t) {} + + bool run() { + using namespace fmt::detail; + return handle(gmtime_r(&time_, &tm_)); + } + + bool handle(std::tm* tm) { return tm != nullptr; } + + bool handle(detail::null<>) { + using namespace fmt::detail; + return fallback(gmtime_s(&tm_, &time_)); + } + + bool fallback(int res) { return res == 0; } + +#if !FMT_MSC_VER + bool fallback(detail::null<>) { + std::tm* tm = std::gmtime(&time_); + if (tm) tm_ = *tm; + return tm != nullptr; + } +#endif + }; + dispatcher gt(time); + // Too big time values may be unsupported. + if (!gt.run()) FMT_THROW(format_error("time_t value out of range")); + return gt.tm_; +} + +inline std::tm gmtime( + std::chrono::time_point time_point) { + return gmtime(std::chrono::system_clock::to_time_t(time_point)); +} + +FMT_BEGIN_DETAIL_NAMESPACE + +// Writes two-digit numbers a, b and c separated by sep to buf. +// The method by Pavel Novikov based on +// https://johnnylee-sde.github.io/Fast-unsigned-integer-to-time-string/. +inline void write_digit2_separated(char* buf, unsigned a, unsigned b, + unsigned c, char sep) { + unsigned long long digits = + a | (b << 24) | (static_cast(c) << 48); + // Convert each value to BCD. + // We have x = a * 10 + b and we want to convert it to BCD y = a * 16 + b. + // The difference is + // y - x = a * 6 + // a can be found from x: + // a = floor(x / 10) + // then + // y = x + a * 6 = x + floor(x / 10) * 6 + // floor(x / 10) is (x * 205) >> 11 (needs 16 bits). + digits += (((digits * 205) >> 11) & 0x000f00000f00000f) * 6; + // Put low nibbles to high bytes and high nibbles to low bytes. + digits = ((digits & 0x00f00000f00000f0) >> 4) | + ((digits & 0x000f00000f00000f) << 8); + auto usep = static_cast(sep); + // Add ASCII '0' to each digit byte and insert separators. + digits |= 0x3030003030003030 | (usep << 16) | (usep << 40); + + constexpr const size_t len = 8; + if (const_check(is_big_endian())) { + char tmp[len]; + std::memcpy(tmp, &digits, len); + std::reverse_copy(tmp, tmp + len, buf); + } else { + std::memcpy(buf, &digits, len); + } +} + +template FMT_CONSTEXPR inline const char* get_units() { + if (std::is_same::value) return "as"; + if (std::is_same::value) return "fs"; + if (std::is_same::value) return "ps"; + if (std::is_same::value) return "ns"; + if (std::is_same::value) return "µs"; + if (std::is_same::value) return "ms"; + if (std::is_same::value) return "cs"; + if (std::is_same::value) return "ds"; + if (std::is_same>::value) return "s"; + if (std::is_same::value) return "das"; + if (std::is_same::value) return "hs"; + if (std::is_same::value) return "ks"; + if (std::is_same::value) return "Ms"; + if (std::is_same::value) return "Gs"; + if (std::is_same::value) return "Ts"; + if (std::is_same::value) return "Ps"; + if (std::is_same::value) return "Es"; + if (std::is_same>::value) return "m"; + if (std::is_same>::value) return "h"; + return nullptr; +} + +enum class numeric_system { + standard, + // Alternative numeric system, e.g. 十二 instead of 12 in ja_JP locale. + alternative +}; + +// Parses a put_time-like format string and invokes handler actions. +template +FMT_CONSTEXPR const Char* parse_chrono_format(const Char* begin, + const Char* end, + Handler&& handler) { + auto ptr = begin; + while (ptr != end) { + auto c = *ptr; + if (c == '}') break; + if (c != '%') { + ++ptr; + continue; + } + if (begin != ptr) handler.on_text(begin, ptr); + ++ptr; // consume '%' + if (ptr == end) FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case '%': + handler.on_text(ptr - 1, ptr); + break; + case 'n': { + const Char newline[] = {'\n'}; + handler.on_text(newline, newline + 1); + break; + } + case 't': { + const Char tab[] = {'\t'}; + handler.on_text(tab, tab + 1); + break; + } + // Year: + case 'Y': + handler.on_year(numeric_system::standard); + break; + case 'y': + handler.on_short_year(numeric_system::standard); + break; + case 'C': + handler.on_century(numeric_system::standard); + break; + case 'G': + handler.on_iso_week_based_year(); + break; + case 'g': + handler.on_iso_week_based_short_year(); + break; + // Day of the week: + case 'a': + handler.on_abbr_weekday(); + break; + case 'A': + handler.on_full_weekday(); + break; + case 'w': + handler.on_dec0_weekday(numeric_system::standard); + break; + case 'u': + handler.on_dec1_weekday(numeric_system::standard); + break; + // Month: + case 'b': + case 'h': + handler.on_abbr_month(); + break; + case 'B': + handler.on_full_month(); + break; + case 'm': + handler.on_dec_month(numeric_system::standard); + break; + // Day of the year/month: + case 'U': + handler.on_dec0_week_of_year(numeric_system::standard); + break; + case 'W': + handler.on_dec1_week_of_year(numeric_system::standard); + break; + case 'V': + handler.on_iso_week_of_year(numeric_system::standard); + break; + case 'j': + handler.on_day_of_year(); + break; + case 'd': + handler.on_day_of_month(numeric_system::standard); + break; + case 'e': + handler.on_day_of_month_space(numeric_system::standard); + break; + // Hour, minute, second: + case 'H': + handler.on_24_hour(numeric_system::standard); + break; + case 'I': + handler.on_12_hour(numeric_system::standard); + break; + case 'M': + handler.on_minute(numeric_system::standard); + break; + case 'S': + handler.on_second(numeric_system::standard); + break; + // Other: + case 'c': + handler.on_datetime(numeric_system::standard); + break; + case 'x': + handler.on_loc_date(numeric_system::standard); + break; + case 'X': + handler.on_loc_time(numeric_system::standard); + break; + case 'D': + handler.on_us_date(); + break; + case 'F': + handler.on_iso_date(); + break; + case 'r': + handler.on_12_hour_time(); + break; + case 'R': + handler.on_24_hour_time(); + break; + case 'T': + handler.on_iso_time(); + break; + case 'p': + handler.on_am_pm(); + break; + case 'Q': + handler.on_duration_value(); + break; + case 'q': + handler.on_duration_unit(); + break; + case 'z': + handler.on_utc_offset(); + break; + case 'Z': + handler.on_tz_name(); + break; + // Alternative representation: + case 'E': { + if (ptr == end) FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case 'Y': + handler.on_year(numeric_system::alternative); + break; + case 'y': + handler.on_offset_year(); + break; + case 'C': + handler.on_century(numeric_system::alternative); + break; + case 'c': + handler.on_datetime(numeric_system::alternative); + break; + case 'x': + handler.on_loc_date(numeric_system::alternative); + break; + case 'X': + handler.on_loc_time(numeric_system::alternative); + break; + default: + FMT_THROW(format_error("invalid format")); + } + break; + } + case 'O': + if (ptr == end) FMT_THROW(format_error("invalid format")); + c = *ptr++; + switch (c) { + case 'y': + handler.on_short_year(numeric_system::alternative); + break; + case 'm': + handler.on_dec_month(numeric_system::alternative); + break; + case 'U': + handler.on_dec0_week_of_year(numeric_system::alternative); + break; + case 'W': + handler.on_dec1_week_of_year(numeric_system::alternative); + break; + case 'V': + handler.on_iso_week_of_year(numeric_system::alternative); + break; + case 'd': + handler.on_day_of_month(numeric_system::alternative); + break; + case 'e': + handler.on_day_of_month_space(numeric_system::alternative); + break; + case 'w': + handler.on_dec0_weekday(numeric_system::alternative); + break; + case 'u': + handler.on_dec1_weekday(numeric_system::alternative); + break; + case 'H': + handler.on_24_hour(numeric_system::alternative); + break; + case 'I': + handler.on_12_hour(numeric_system::alternative); + break; + case 'M': + handler.on_minute(numeric_system::alternative); + break; + case 'S': + handler.on_second(numeric_system::alternative); + break; + default: + FMT_THROW(format_error("invalid format")); + } + break; + default: + FMT_THROW(format_error("invalid format")); + } + begin = ptr; + } + if (begin != ptr) handler.on_text(begin, ptr); + return ptr; +} + +template struct null_chrono_spec_handler { + FMT_CONSTEXPR void unsupported() { + static_cast(this)->unsupported(); + } + FMT_CONSTEXPR void on_year(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_short_year(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_offset_year() { unsupported(); } + FMT_CONSTEXPR void on_century(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_iso_week_based_year() { unsupported(); } + FMT_CONSTEXPR void on_iso_week_based_short_year() { unsupported(); } + FMT_CONSTEXPR void on_abbr_weekday() { unsupported(); } + FMT_CONSTEXPR void on_full_weekday() { unsupported(); } + FMT_CONSTEXPR void on_dec0_weekday(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_dec1_weekday(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_abbr_month() { unsupported(); } + FMT_CONSTEXPR void on_full_month() { unsupported(); } + FMT_CONSTEXPR void on_dec_month(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_dec0_week_of_year(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_dec1_week_of_year(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_iso_week_of_year(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_day_of_year() { unsupported(); } + FMT_CONSTEXPR void on_day_of_month(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_day_of_month_space(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_24_hour(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_12_hour(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_minute(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_second(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_datetime(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_loc_date(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_loc_time(numeric_system) { unsupported(); } + FMT_CONSTEXPR void on_us_date() { unsupported(); } + FMT_CONSTEXPR void on_iso_date() { unsupported(); } + FMT_CONSTEXPR void on_12_hour_time() { unsupported(); } + FMT_CONSTEXPR void on_24_hour_time() { unsupported(); } + FMT_CONSTEXPR void on_iso_time() { unsupported(); } + FMT_CONSTEXPR void on_am_pm() { unsupported(); } + FMT_CONSTEXPR void on_duration_value() { unsupported(); } + FMT_CONSTEXPR void on_duration_unit() { unsupported(); } + FMT_CONSTEXPR void on_utc_offset() { unsupported(); } + FMT_CONSTEXPR void on_tz_name() { unsupported(); } +}; + +struct tm_format_checker : null_chrono_spec_handler { + FMT_NORETURN void unsupported() { FMT_THROW(format_error("no format")); } + + template + FMT_CONSTEXPR void on_text(const Char*, const Char*) {} + FMT_CONSTEXPR void on_year(numeric_system) {} + FMT_CONSTEXPR void on_short_year(numeric_system) {} + FMT_CONSTEXPR void on_offset_year() {} + FMT_CONSTEXPR void on_century(numeric_system) {} + FMT_CONSTEXPR void on_iso_week_based_year() {} + FMT_CONSTEXPR void on_iso_week_based_short_year() {} + FMT_CONSTEXPR void on_abbr_weekday() {} + FMT_CONSTEXPR void on_full_weekday() {} + FMT_CONSTEXPR void on_dec0_weekday(numeric_system) {} + FMT_CONSTEXPR void on_dec1_weekday(numeric_system) {} + FMT_CONSTEXPR void on_abbr_month() {} + FMT_CONSTEXPR void on_full_month() {} + FMT_CONSTEXPR void on_dec_month(numeric_system) {} + FMT_CONSTEXPR void on_dec0_week_of_year(numeric_system) {} + FMT_CONSTEXPR void on_dec1_week_of_year(numeric_system) {} + FMT_CONSTEXPR void on_iso_week_of_year(numeric_system) {} + FMT_CONSTEXPR void on_day_of_year() {} + FMT_CONSTEXPR void on_day_of_month(numeric_system) {} + FMT_CONSTEXPR void on_day_of_month_space(numeric_system) {} + FMT_CONSTEXPR void on_24_hour(numeric_system) {} + FMT_CONSTEXPR void on_12_hour(numeric_system) {} + FMT_CONSTEXPR void on_minute(numeric_system) {} + FMT_CONSTEXPR void on_second(numeric_system) {} + FMT_CONSTEXPR void on_datetime(numeric_system) {} + FMT_CONSTEXPR void on_loc_date(numeric_system) {} + FMT_CONSTEXPR void on_loc_time(numeric_system) {} + FMT_CONSTEXPR void on_us_date() {} + FMT_CONSTEXPR void on_iso_date() {} + FMT_CONSTEXPR void on_12_hour_time() {} + FMT_CONSTEXPR void on_24_hour_time() {} + FMT_CONSTEXPR void on_iso_time() {} + FMT_CONSTEXPR void on_am_pm() {} + FMT_CONSTEXPR void on_utc_offset() {} + FMT_CONSTEXPR void on_tz_name() {} +}; + +inline const char* tm_wday_full_name(int wday) { + static constexpr const char* full_name_list[] = { + "Sunday", "Monday", "Tuesday", "Wednesday", + "Thursday", "Friday", "Saturday"}; + return wday >= 0 && wday <= 6 ? full_name_list[wday] : "?"; +} +inline const char* tm_wday_short_name(int wday) { + static constexpr const char* short_name_list[] = {"Sun", "Mon", "Tue", "Wed", + "Thu", "Fri", "Sat"}; + return wday >= 0 && wday <= 6 ? short_name_list[wday] : "???"; +} + +inline const char* tm_mon_full_name(int mon) { + static constexpr const char* full_name_list[] = { + "January", "February", "March", "April", "May", "June", + "July", "August", "September", "October", "November", "December"}; + return mon >= 0 && mon <= 11 ? full_name_list[mon] : "?"; +} +inline const char* tm_mon_short_name(int mon) { + static constexpr const char* short_name_list[] = { + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec", + }; + return mon >= 0 && mon <= 11 ? short_name_list[mon] : "???"; +} + +template +struct has_member_data_tm_gmtoff : std::false_type {}; +template +struct has_member_data_tm_gmtoff> + : std::true_type {}; + +template +struct has_member_data_tm_zone : std::false_type {}; +template +struct has_member_data_tm_zone> + : std::true_type {}; + +#if FMT_USE_TZSET +inline void tzset_once() { + static bool init = []() -> bool { + _tzset(); + return true; + }(); + ignore_unused(init); +} +#endif + +template class tm_writer { + private: + static constexpr int days_per_week = 7; + + const std::locale& loc_; + const bool is_classic_; + OutputIt out_; + const std::tm& tm_; + + auto tm_sec() const noexcept -> int { + FMT_ASSERT(tm_.tm_sec >= 0 && tm_.tm_sec <= 61, ""); + return tm_.tm_sec; + } + auto tm_min() const noexcept -> int { + FMT_ASSERT(tm_.tm_min >= 0 && tm_.tm_min <= 59, ""); + return tm_.tm_min; + } + auto tm_hour() const noexcept -> int { + FMT_ASSERT(tm_.tm_hour >= 0 && tm_.tm_hour <= 23, ""); + return tm_.tm_hour; + } + auto tm_mday() const noexcept -> int { + FMT_ASSERT(tm_.tm_mday >= 1 && tm_.tm_mday <= 31, ""); + return tm_.tm_mday; + } + auto tm_mon() const noexcept -> int { + FMT_ASSERT(tm_.tm_mon >= 0 && tm_.tm_mon <= 11, ""); + return tm_.tm_mon; + } + auto tm_year() const noexcept -> long long { return 1900ll + tm_.tm_year; } + auto tm_wday() const noexcept -> int { + FMT_ASSERT(tm_.tm_wday >= 0 && tm_.tm_wday <= 6, ""); + return tm_.tm_wday; + } + auto tm_yday() const noexcept -> int { + FMT_ASSERT(tm_.tm_yday >= 0 && tm_.tm_yday <= 365, ""); + return tm_.tm_yday; + } + + auto tm_hour12() const noexcept -> int { + const auto h = tm_hour(); + const auto z = h < 12 ? h : h - 12; + return z == 0 ? 12 : z; + } + + // POSIX and the C Standard are unclear or inconsistent about what %C and %y + // do if the year is negative or exceeds 9999. Use the convention that %C + // concatenated with %y yields the same output as %Y, and that %Y contains at + // least 4 characters, with more only if necessary. + auto split_year_lower(long long year) const noexcept -> int { + auto l = year % 100; + if (l < 0) l = -l; // l in [0, 99] + return static_cast(l); + } + + // Algorithm: + // https://en.wikipedia.org/wiki/ISO_week_date#Calculating_the_week_number_from_a_month_and_day_of_the_month_or_ordinal_date + auto iso_year_weeks(long long curr_year) const noexcept -> int { + const auto prev_year = curr_year - 1; + const auto curr_p = + (curr_year + curr_year / 4 - curr_year / 100 + curr_year / 400) % + days_per_week; + const auto prev_p = + (prev_year + prev_year / 4 - prev_year / 100 + prev_year / 400) % + days_per_week; + return 52 + ((curr_p == 4 || prev_p == 3) ? 1 : 0); + } + auto iso_week_num(int tm_yday, int tm_wday) const noexcept -> int { + return (tm_yday + 11 - (tm_wday == 0 ? days_per_week : tm_wday)) / + days_per_week; + } + auto tm_iso_week_year() const noexcept -> long long { + const auto year = tm_year(); + const auto w = iso_week_num(tm_yday(), tm_wday()); + if (w < 1) return year - 1; + if (w > iso_year_weeks(year)) return year + 1; + return year; + } + auto tm_iso_week_of_year() const noexcept -> int { + const auto year = tm_year(); + const auto w = iso_week_num(tm_yday(), tm_wday()); + if (w < 1) return iso_year_weeks(year - 1); + if (w > iso_year_weeks(year)) return 1; + return w; + } + + void write1(int value) { + *out_++ = static_cast('0' + to_unsigned(value) % 10); + } + void write2(int value) { + const char* d = digits2(to_unsigned(value) % 100); + *out_++ = *d++; + *out_++ = *d; + } + + void write_year_extended(long long year) { + // At least 4 characters. + int width = 4; + if (year < 0) { + *out_++ = '-'; + year = 0 - year; + --width; + } + uint32_or_64_or_128_t n = to_unsigned(year); + const int num_digits = count_digits(n); + if (width > num_digits) out_ = std::fill_n(out_, width - num_digits, '0'); + out_ = format_decimal(out_, n, num_digits).end; + } + void write_year(long long year) { + if (year >= 0 && year < 10000) { + write2(static_cast(year / 100)); + write2(static_cast(year % 100)); + } else { + write_year_extended(year); + } + } + + void write_utc_offset(long offset) { + if (offset < 0) { + *out_++ = '-'; + offset = -offset; + } else { + *out_++ = '+'; + } + offset /= 60; + write2(static_cast(offset / 60)); + write2(static_cast(offset % 60)); + } + template ::value)> + void format_utc_offset_impl(const T& tm) { + write_utc_offset(tm.tm_gmtoff); + } + template ::value)> + void format_utc_offset_impl(const T& tm) { +#if defined(_WIN32) && defined(_UCRT) +# if FMT_USE_TZSET + tzset_once(); +# endif + long offset = 0; + _get_timezone(&offset); + if (tm.tm_isdst) { + long dstbias = 0; + _get_dstbias(&dstbias); + offset += dstbias; + } + write_utc_offset(-offset); +#else + ignore_unused(tm); + format_localized('z'); +#endif + } + + template ::value)> + void format_tz_name_impl(const T& tm) { + if (is_classic_) + out_ = write_tm_str(out_, tm.tm_zone, loc_); + else + format_localized('Z'); + } + template ::value)> + void format_tz_name_impl(const T&) { + format_localized('Z'); + } + + void format_localized(char format, char modifier = 0) { + out_ = write(out_, tm_, loc_, format, modifier); + } + + public: + tm_writer(const std::locale& loc, OutputIt out, const std::tm& tm) + : loc_(loc), + is_classic_(loc_ == get_classic_locale()), + out_(out), + tm_(tm) {} + + OutputIt out() const { return out_; } + + FMT_CONSTEXPR void on_text(const Char* begin, const Char* end) { + out_ = copy_str(begin, end, out_); + } + + void on_abbr_weekday() { + if (is_classic_) + out_ = write(out_, tm_wday_short_name(tm_wday())); + else + format_localized('a'); + } + void on_full_weekday() { + if (is_classic_) + out_ = write(out_, tm_wday_full_name(tm_wday())); + else + format_localized('A'); + } + void on_dec0_weekday(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) return write1(tm_wday()); + format_localized('w', 'O'); + } + void on_dec1_weekday(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) { + auto wday = tm_wday(); + write1(wday == 0 ? days_per_week : wday); + } else { + format_localized('u', 'O'); + } + } + + void on_abbr_month() { + if (is_classic_) + out_ = write(out_, tm_mon_short_name(tm_mon())); + else + format_localized('b'); + } + void on_full_month() { + if (is_classic_) + out_ = write(out_, tm_mon_full_name(tm_mon())); + else + format_localized('B'); + } + + void on_datetime(numeric_system ns) { + if (is_classic_) { + on_abbr_weekday(); + *out_++ = ' '; + on_abbr_month(); + *out_++ = ' '; + on_day_of_month_space(numeric_system::standard); + *out_++ = ' '; + on_iso_time(); + *out_++ = ' '; + on_year(numeric_system::standard); + } else { + format_localized('c', ns == numeric_system::standard ? '\0' : 'E'); + } + } + void on_loc_date(numeric_system ns) { + if (is_classic_) + on_us_date(); + else + format_localized('x', ns == numeric_system::standard ? '\0' : 'E'); + } + void on_loc_time(numeric_system ns) { + if (is_classic_) + on_iso_time(); + else + format_localized('X', ns == numeric_system::standard ? '\0' : 'E'); + } + void on_us_date() { + char buf[8]; + write_digit2_separated(buf, to_unsigned(tm_mon() + 1), + to_unsigned(tm_mday()), + to_unsigned(split_year_lower(tm_year())), '/'); + out_ = copy_str(std::begin(buf), std::end(buf), out_); + } + void on_iso_date() { + auto year = tm_year(); + char buf[10]; + size_t offset = 0; + if (year >= 0 && year < 10000) { + copy2(buf, digits2(static_cast(year / 100))); + } else { + offset = 4; + write_year_extended(year); + year = 0; + } + write_digit2_separated(buf + 2, static_cast(year % 100), + to_unsigned(tm_mon() + 1), to_unsigned(tm_mday()), + '-'); + out_ = copy_str(std::begin(buf) + offset, std::end(buf), out_); + } + + void on_utc_offset() { format_utc_offset_impl(tm_); } + void on_tz_name() { format_tz_name_impl(tm_); } + + void on_year(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) + return write_year(tm_year()); + format_localized('Y', 'E'); + } + void on_short_year(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) + return write2(split_year_lower(tm_year())); + format_localized('y', 'O'); + } + void on_offset_year() { + if (is_classic_) return write2(split_year_lower(tm_year())); + format_localized('y', 'E'); + } + + void on_century(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) { + auto year = tm_year(); + auto upper = year / 100; + if (year >= -99 && year < 0) { + // Zero upper on negative year. + *out_++ = '-'; + *out_++ = '0'; + } else if (upper >= 0 && upper < 100) { + write2(static_cast(upper)); + } else { + out_ = write(out_, upper); + } + } else { + format_localized('C', 'E'); + } + } + + void on_dec_month(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) + return write2(tm_mon() + 1); + format_localized('m', 'O'); + } + + void on_dec0_week_of_year(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) + return write2((tm_yday() + days_per_week - tm_wday()) / days_per_week); + format_localized('U', 'O'); + } + void on_dec1_week_of_year(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) { + auto wday = tm_wday(); + write2((tm_yday() + days_per_week - + (wday == 0 ? (days_per_week - 1) : (wday - 1))) / + days_per_week); + } else { + format_localized('W', 'O'); + } + } + void on_iso_week_of_year(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) + return write2(tm_iso_week_of_year()); + format_localized('V', 'O'); + } + + void on_iso_week_based_year() { write_year(tm_iso_week_year()); } + void on_iso_week_based_short_year() { + write2(split_year_lower(tm_iso_week_year())); + } + + void on_day_of_year() { + auto yday = tm_yday() + 1; + write1(yday / 100); + write2(yday % 100); + } + void on_day_of_month(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) return write2(tm_mday()); + format_localized('d', 'O'); + } + void on_day_of_month_space(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) { + auto mday = to_unsigned(tm_mday()) % 100; + const char* d2 = digits2(mday); + *out_++ = mday < 10 ? ' ' : d2[0]; + *out_++ = d2[1]; + } else { + format_localized('e', 'O'); + } + } + + void on_24_hour(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) return write2(tm_hour()); + format_localized('H', 'O'); + } + void on_12_hour(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) + return write2(tm_hour12()); + format_localized('I', 'O'); + } + void on_minute(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) return write2(tm_min()); + format_localized('M', 'O'); + } + void on_second(numeric_system ns) { + if (is_classic_ || ns == numeric_system::standard) return write2(tm_sec()); + format_localized('S', 'O'); + } + + void on_12_hour_time() { + if (is_classic_) { + char buf[8]; + write_digit2_separated(buf, to_unsigned(tm_hour12()), + to_unsigned(tm_min()), to_unsigned(tm_sec()), ':'); + out_ = copy_str(std::begin(buf), std::end(buf), out_); + *out_++ = ' '; + on_am_pm(); + } else { + format_localized('r'); + } + } + void on_24_hour_time() { + write2(tm_hour()); + *out_++ = ':'; + write2(tm_min()); + } + void on_iso_time() { + char buf[8]; + write_digit2_separated(buf, to_unsigned(tm_hour()), to_unsigned(tm_min()), + to_unsigned(tm_sec()), ':'); + out_ = copy_str(std::begin(buf), std::end(buf), out_); + } + + void on_am_pm() { + if (is_classic_) { + *out_++ = tm_hour() < 12 ? 'A' : 'P'; + *out_++ = 'M'; + } else { + format_localized('p'); + } + } + + // These apply to chrono durations but not tm. + void on_duration_value() {} + void on_duration_unit() {} +}; + +struct chrono_format_checker : null_chrono_spec_handler { + FMT_NORETURN void unsupported() { FMT_THROW(format_error("no date")); } + + template + FMT_CONSTEXPR void on_text(const Char*, const Char*) {} + FMT_CONSTEXPR void on_24_hour(numeric_system) {} + FMT_CONSTEXPR void on_12_hour(numeric_system) {} + FMT_CONSTEXPR void on_minute(numeric_system) {} + FMT_CONSTEXPR void on_second(numeric_system) {} + FMT_CONSTEXPR void on_12_hour_time() {} + FMT_CONSTEXPR void on_24_hour_time() {} + FMT_CONSTEXPR void on_iso_time() {} + FMT_CONSTEXPR void on_am_pm() {} + FMT_CONSTEXPR void on_duration_value() {} + FMT_CONSTEXPR void on_duration_unit() {} +}; + +template ::value)> +inline bool isfinite(T) { + return true; +} + +// Converts value to Int and checks that it's in the range [0, upper). +template ::value)> +inline Int to_nonnegative_int(T value, Int upper) { + FMT_ASSERT(value >= 0 && to_unsigned(value) <= to_unsigned(upper), + "invalid value"); + (void)upper; + return static_cast(value); +} +template ::value)> +inline Int to_nonnegative_int(T value, Int upper) { + if (value < 0 || value > static_cast(upper)) + FMT_THROW(format_error("invalid value")); + return static_cast(value); +} + +template ::value)> +inline T mod(T x, int y) { + return x % static_cast(y); +} +template ::value)> +inline T mod(T x, int y) { + return std::fmod(x, static_cast(y)); +} + +// If T is an integral type, maps T to its unsigned counterpart, otherwise +// leaves it unchanged (unlike std::make_unsigned). +template ::value> +struct make_unsigned_or_unchanged { + using type = T; +}; + +template struct make_unsigned_or_unchanged { + using type = typename std::make_unsigned::type; +}; + +#if FMT_SAFE_DURATION_CAST +// throwing version of safe_duration_cast +template +To fmt_safe_duration_cast(std::chrono::duration from) { + int ec; + To to = safe_duration_cast::safe_duration_cast(from, ec); + if (ec) FMT_THROW(format_error("cannot format duration")); + return to; +} +#endif + +template ::value)> +inline std::chrono::duration get_milliseconds( + std::chrono::duration d) { + // this may overflow and/or the result may not fit in the + // target type. +#if FMT_SAFE_DURATION_CAST + using CommonSecondsType = + typename std::common_type::type; + const auto d_as_common = fmt_safe_duration_cast(d); + const auto d_as_whole_seconds = + fmt_safe_duration_cast(d_as_common); + // this conversion should be nonproblematic + const auto diff = d_as_common - d_as_whole_seconds; + const auto ms = + fmt_safe_duration_cast>(diff); + return ms; +#else + auto s = std::chrono::duration_cast(d); + return std::chrono::duration_cast(d - s); +#endif +} + +// Counts the number of fractional digits in the range [0, 18] according to the +// C++20 spec. If more than 18 fractional digits are required then returns 6 for +// microseconds precision. +template ::max() / 10)> +struct count_fractional_digits { + static constexpr int value = + Num % Den == 0 ? N : count_fractional_digits::value; +}; + +// Base case that doesn't instantiate any more templates +// in order to avoid overflow. +template +struct count_fractional_digits { + static constexpr int value = (Num % Den == 0) ? N : 6; +}; + +constexpr long long pow10(std::uint32_t n) { + return n == 0 ? 1 : 10 * pow10(n - 1); +} + +template ::is_signed)> +constexpr std::chrono::duration abs( + std::chrono::duration d) { + // We need to compare the duration using the count() method directly + // due to a compiler bug in clang-11 regarding the spaceship operator, + // when -Wzero-as-null-pointer-constant is enabled. + // In clang-12 the bug has been fixed. See + // https://bugs.llvm.org/show_bug.cgi?id=46235 and the reproducible example: + // https://www.godbolt.org/z/Knbb5joYx. + return d.count() >= d.zero().count() ? d : -d; +} + +template ::is_signed)> +constexpr std::chrono::duration abs( + std::chrono::duration d) { + return d; +} + +template ::value)> +OutputIt format_duration_value(OutputIt out, Rep val, int) { + return write(out, val); +} + +template ::value)> +OutputIt format_duration_value(OutputIt out, Rep val, int precision) { + auto specs = basic_format_specs(); + specs.precision = precision; + specs.type = precision >= 0 ? presentation_type::fixed_lower + : presentation_type::general_lower; + return write(out, val, specs); +} + +template +OutputIt copy_unit(string_view unit, OutputIt out, Char) { + return std::copy(unit.begin(), unit.end(), out); +} + +template +OutputIt copy_unit(string_view unit, OutputIt out, wchar_t) { + // This works when wchar_t is UTF-32 because units only contain characters + // that have the same representation in UTF-16 and UTF-32. + utf8_to_utf16 u(unit); + return std::copy(u.c_str(), u.c_str() + u.size(), out); +} + +template +OutputIt format_duration_unit(OutputIt out) { + if (const char* unit = get_units()) + return copy_unit(string_view(unit), out, Char()); + *out++ = '['; + out = write(out, Period::num); + if (const_check(Period::den != 1)) { + *out++ = '/'; + out = write(out, Period::den); + } + *out++ = ']'; + *out++ = 's'; + return out; +} + +class get_locale { + private: + union { + std::locale locale_; + }; + bool has_locale_ = false; + + public: + get_locale(bool localized, locale_ref loc) : has_locale_(localized) { + if (localized) + ::new (&locale_) std::locale(loc.template get()); + } + ~get_locale() { + if (has_locale_) locale_.~locale(); + } + operator const std::locale&() const { + return has_locale_ ? locale_ : get_classic_locale(); + } +}; + +template +struct chrono_formatter { + FormatContext& context; + OutputIt out; + int precision; + bool localized = false; + // rep is unsigned to avoid overflow. + using rep = + conditional_t::value && sizeof(Rep) < sizeof(int), + unsigned, typename make_unsigned_or_unchanged::type>; + rep val; + using seconds = std::chrono::duration; + seconds s; + using milliseconds = std::chrono::duration; + bool negative; + + using char_type = typename FormatContext::char_type; + using tm_writer_type = tm_writer; + + chrono_formatter(FormatContext& ctx, OutputIt o, + std::chrono::duration d) + : context(ctx), + out(o), + val(static_cast(d.count())), + negative(false) { + if (d.count() < 0) { + val = 0 - val; + negative = true; + } + + // this may overflow and/or the result may not fit in the + // target type. +#if FMT_SAFE_DURATION_CAST + // might need checked conversion (rep!=Rep) + auto tmpval = std::chrono::duration(val); + s = fmt_safe_duration_cast(tmpval); +#else + s = std::chrono::duration_cast( + std::chrono::duration(val)); +#endif + } + + // returns true if nan or inf, writes to out. + bool handle_nan_inf() { + if (isfinite(val)) { + return false; + } + if (isnan(val)) { + write_nan(); + return true; + } + // must be +-inf + if (val > 0) { + write_pinf(); + } else { + write_ninf(); + } + return true; + } + + Rep hour() const { return static_cast(mod((s.count() / 3600), 24)); } + + Rep hour12() const { + Rep hour = static_cast(mod((s.count() / 3600), 12)); + return hour <= 0 ? 12 : hour; + } + + Rep minute() const { return static_cast(mod((s.count() / 60), 60)); } + Rep second() const { return static_cast(mod(s.count(), 60)); } + + std::tm time() const { + auto time = std::tm(); + time.tm_hour = to_nonnegative_int(hour(), 24); + time.tm_min = to_nonnegative_int(minute(), 60); + time.tm_sec = to_nonnegative_int(second(), 60); + return time; + } + + void write_sign() { + if (negative) { + *out++ = '-'; + negative = false; + } + } + + void write(Rep value, int width) { + write_sign(); + if (isnan(value)) return write_nan(); + uint32_or_64_or_128_t n = + to_unsigned(to_nonnegative_int(value, max_value())); + int num_digits = detail::count_digits(n); + if (width > num_digits) out = std::fill_n(out, width - num_digits, '0'); + out = format_decimal(out, n, num_digits).end; + } + + template void write_fractional_seconds(Duration d) { + FMT_ASSERT(!std::is_floating_point::value, ""); + constexpr auto num_fractional_digits = + count_fractional_digits::value; + + using subsecond_precision = std::chrono::duration< + typename std::common_type::type, + std::ratio<1, detail::pow10(num_fractional_digits)>>; + if (std::ratio_less::value) { + *out++ = '.'; + auto fractional = + detail::abs(d) - std::chrono::duration_cast(d); + auto subseconds = + std::chrono::treat_as_floating_point< + typename subsecond_precision::rep>::value + ? fractional.count() + : std::chrono::duration_cast(fractional) + .count(); + uint32_or_64_or_128_t n = + to_unsigned(to_nonnegative_int(subseconds, max_value())); + int num_digits = detail::count_digits(n); + if (num_fractional_digits > num_digits) + out = std::fill_n(out, num_fractional_digits - num_digits, '0'); + out = format_decimal(out, n, num_digits).end; + } + } + + void write_nan() { std::copy_n("nan", 3, out); } + void write_pinf() { std::copy_n("inf", 3, out); } + void write_ninf() { std::copy_n("-inf", 4, out); } + + template + void format_tm(const tm& time, Callback cb, Args... args) { + if (isnan(val)) return write_nan(); + get_locale loc(localized, context.locale()); + auto w = tm_writer_type(loc, out, time); + (w.*cb)(args...); + out = w.out(); + } + + void on_text(const char_type* begin, const char_type* end) { + std::copy(begin, end, out); + } + + // These are not implemented because durations don't have date information. + void on_abbr_weekday() {} + void on_full_weekday() {} + void on_dec0_weekday(numeric_system) {} + void on_dec1_weekday(numeric_system) {} + void on_abbr_month() {} + void on_full_month() {} + void on_datetime(numeric_system) {} + void on_loc_date(numeric_system) {} + void on_loc_time(numeric_system) {} + void on_us_date() {} + void on_iso_date() {} + void on_utc_offset() {} + void on_tz_name() {} + void on_year(numeric_system) {} + void on_short_year(numeric_system) {} + void on_offset_year() {} + void on_century(numeric_system) {} + void on_iso_week_based_year() {} + void on_iso_week_based_short_year() {} + void on_dec_month(numeric_system) {} + void on_dec0_week_of_year(numeric_system) {} + void on_dec1_week_of_year(numeric_system) {} + void on_iso_week_of_year(numeric_system) {} + void on_day_of_year() {} + void on_day_of_month(numeric_system) {} + void on_day_of_month_space(numeric_system) {} + + void on_24_hour(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) return write(hour(), 2); + auto time = tm(); + time.tm_hour = to_nonnegative_int(hour(), 24); + format_tm(time, &tm_writer_type::on_24_hour, ns); + } + + void on_12_hour(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) return write(hour12(), 2); + auto time = tm(); + time.tm_hour = to_nonnegative_int(hour12(), 12); + format_tm(time, &tm_writer_type::on_12_hour, ns); + } + + void on_minute(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) return write(minute(), 2); + auto time = tm(); + time.tm_min = to_nonnegative_int(minute(), 60); + format_tm(time, &tm_writer_type::on_minute, ns); + } + + void on_second(numeric_system ns) { + if (handle_nan_inf()) return; + + if (ns == numeric_system::standard) { + if (std::is_floating_point::value) { + constexpr auto num_fractional_digits = + count_fractional_digits::value; + auto buf = memory_buffer(); + format_to(std::back_inserter(buf), runtime("{:.{}f}"), + std::fmod(val * static_cast(Period::num) / + static_cast(Period::den), + 60), + num_fractional_digits); + if (negative) *out++ = '-'; + if (buf.size() < 2 || buf[1] == '.') *out++ = '0'; + out = std::copy(buf.begin(), buf.end(), out); + } else { + write(second(), 2); + write_fractional_seconds(std::chrono::duration(val)); + } + return; + } + auto time = tm(); + time.tm_sec = to_nonnegative_int(second(), 60); + format_tm(time, &tm_writer_type::on_second, ns); + } + + void on_12_hour_time() { + if (handle_nan_inf()) return; + format_tm(time(), &tm_writer_type::on_12_hour_time); + } + + void on_24_hour_time() { + if (handle_nan_inf()) { + *out++ = ':'; + handle_nan_inf(); + return; + } + + write(hour(), 2); + *out++ = ':'; + write(minute(), 2); + } + + void on_iso_time() { + on_24_hour_time(); + *out++ = ':'; + if (handle_nan_inf()) return; + on_second(numeric_system::standard); + } + + void on_am_pm() { + if (handle_nan_inf()) return; + format_tm(time(), &tm_writer_type::on_am_pm); + } + + void on_duration_value() { + if (handle_nan_inf()) return; + write_sign(); + out = format_duration_value(out, val, precision); + } + + void on_duration_unit() { + out = format_duration_unit(out); + } +}; + +FMT_END_DETAIL_NAMESPACE + +#if defined(__cpp_lib_chrono) && __cpp_lib_chrono >= 201907 +using weekday = std::chrono::weekday; +#else +// A fallback version of weekday. +class weekday { + private: + unsigned char value; + + public: + weekday() = default; + explicit constexpr weekday(unsigned wd) noexcept + : value(static_cast(wd != 7 ? wd : 0)) {} + constexpr unsigned c_encoding() const noexcept { return value; } +}; + +class year_month_day {}; +#endif + +// A rudimentary weekday formatter. +template struct formatter { + private: + bool localized = false; + + public: + FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) + -> decltype(ctx.begin()) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin != end && *begin == 'L') { + ++begin; + localized = true; + } + return begin; + } + + template + auto format(weekday wd, FormatContext& ctx) const -> decltype(ctx.out()) { + auto time = std::tm(); + time.tm_wday = static_cast(wd.c_encoding()); + detail::get_locale loc(localized, ctx.locale()); + auto w = detail::tm_writer(loc, ctx.out(), time); + w.on_abbr_weekday(); + return w.out(); + } +}; + +template +struct formatter, Char> { + private: + basic_format_specs specs; + int precision = -1; + using arg_ref_type = detail::arg_ref; + arg_ref_type width_ref; + arg_ref_type precision_ref; + bool localized = false; + basic_string_view format_str; + using duration = std::chrono::duration; + + struct spec_handler { + formatter& f; + basic_format_parse_context& context; + basic_string_view format_str; + + template FMT_CONSTEXPR arg_ref_type make_arg_ref(Id arg_id) { + context.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR arg_ref_type make_arg_ref(basic_string_view arg_id) { + context.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR arg_ref_type make_arg_ref(detail::auto_id) { + return arg_ref_type(context.next_arg_id()); + } + + void on_error(const char* msg) { FMT_THROW(format_error(msg)); } + FMT_CONSTEXPR void on_fill(basic_string_view fill) { + f.specs.fill = fill; + } + FMT_CONSTEXPR void on_align(align_t align) { f.specs.align = align; } + FMT_CONSTEXPR void on_width(int width) { f.specs.width = width; } + FMT_CONSTEXPR void on_precision(int _precision) { + f.precision = _precision; + } + FMT_CONSTEXPR void end_precision() {} + + template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { + f.width_ref = make_arg_ref(arg_id); + } + + template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { + f.precision_ref = make_arg_ref(arg_id); + } + }; + + using iterator = typename basic_format_parse_context::iterator; + struct parse_range { + iterator begin; + iterator end; + }; + + FMT_CONSTEXPR parse_range do_parse(basic_format_parse_context& ctx) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin == end || *begin == '}') return {begin, begin}; + spec_handler handler{*this, ctx, format_str}; + begin = detail::parse_align(begin, end, handler); + if (begin == end) return {begin, begin}; + begin = detail::parse_width(begin, end, handler); + if (begin == end) return {begin, begin}; + if (*begin == '.') { + if (std::is_floating_point::value) + begin = detail::parse_precision(begin, end, handler); + else + handler.on_error("precision not allowed for this argument type"); + } + if (begin != end && *begin == 'L') { + ++begin; + localized = true; + } + end = detail::parse_chrono_format(begin, end, + detail::chrono_format_checker()); + return {begin, end}; + } + + public: + FMT_CONSTEXPR auto parse(basic_format_parse_context& ctx) + -> decltype(ctx.begin()) { + auto range = do_parse(ctx); + format_str = basic_string_view( + &*range.begin, detail::to_unsigned(range.end - range.begin)); + return range.end; + } + + template + auto format(const duration& d, FormatContext& ctx) const + -> decltype(ctx.out()) { + auto specs_copy = specs; + auto precision_copy = precision; + auto begin = format_str.begin(), end = format_str.end(); + // As a possible future optimization, we could avoid extra copying if width + // is not specified. + basic_memory_buffer buf; + auto out = std::back_inserter(buf); + detail::handle_dynamic_spec(specs_copy.width, + width_ref, ctx); + detail::handle_dynamic_spec(precision_copy, + precision_ref, ctx); + if (begin == end || *begin == '}') { + out = detail::format_duration_value(out, d.count(), precision_copy); + detail::format_duration_unit(out); + } else { + detail::chrono_formatter f( + ctx, out, d); + f.precision = precision_copy; + f.localized = localized; + detail::parse_chrono_format(begin, end, f); + } + return detail::write( + ctx.out(), basic_string_view(buf.data(), buf.size()), specs_copy); + } +}; + +template +struct formatter, + Char> : formatter { + FMT_CONSTEXPR formatter() { + this->do_parse(default_specs, + default_specs + sizeof(default_specs) / sizeof(Char)); + } + + template + FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { + return this->do_parse(ctx.begin(), ctx.end(), true); + } + + template + auto format(std::chrono::time_point val, + FormatContext& ctx) const -> decltype(ctx.out()) { + return formatter::format(localtime(val), ctx); + } + + static constexpr const Char default_specs[] = {'%', 'F', ' ', '%', 'T'}; +}; + +template +constexpr const Char + formatter, + Char>::default_specs[]; + +template struct formatter { + private: + enum class spec { + unknown, + year_month_day, + hh_mm_ss, + }; + spec spec_ = spec::unknown; + basic_string_view specs; + + protected: + template + FMT_CONSTEXPR auto do_parse(It begin, It end, bool with_default = false) + -> It { + if (begin != end && *begin == ':') ++begin; + end = detail::parse_chrono_format(begin, end, detail::tm_format_checker()); + if (!with_default || end != begin) + specs = {begin, detail::to_unsigned(end - begin)}; + // basic_string_view<>::compare isn't constexpr before C++17. + if (specs.size() == 2 && specs[0] == Char('%')) { + if (specs[1] == Char('F')) + spec_ = spec::year_month_day; + else if (specs[1] == Char('T')) + spec_ = spec::hh_mm_ss; + } + return end; + } + + public: + template + FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { + return this->do_parse(ctx.begin(), ctx.end()); + } + + template + auto format(const std::tm& tm, FormatContext& ctx) const + -> decltype(ctx.out()) { + const auto loc_ref = ctx.locale(); + detail::get_locale loc(static_cast(loc_ref), loc_ref); + auto w = detail::tm_writer(loc, ctx.out(), tm); + if (spec_ == spec::year_month_day) + w.on_iso_date(); + else if (spec_ == spec::hh_mm_ss) + w.on_iso_time(); + else + detail::parse_chrono_format(specs.begin(), specs.end(), w); + return w.out(); + } +}; + +FMT_MODULE_EXPORT_END +FMT_END_NAMESPACE + +#endif // FMT_CHRONO_H_ diff --git a/external/fmt/include/fmt/color.h b/external/fmt/include/fmt/color.h new file mode 100644 index 00000000..762809ca --- /dev/null +++ b/external/fmt/include/fmt/color.h @@ -0,0 +1,694 @@ +// Formatting library for C++ - color support +// +// Copyright (c) 2018 - present, Victor Zverovich and fmt contributors +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_COLOR_H_ +#define FMT_COLOR_H_ + +#include "format.h" + +// __declspec(deprecated) is broken in some MSVC versions. +#if FMT_MSC_VER +# define FMT_DEPRECATED_NONMSVC +#else +# define FMT_DEPRECATED_NONMSVC FMT_DEPRECATED +#endif + +FMT_BEGIN_NAMESPACE +FMT_MODULE_EXPORT_BEGIN + +enum class color : uint32_t { + alice_blue = 0xF0F8FF, // rgb(240,248,255) + antique_white = 0xFAEBD7, // rgb(250,235,215) + aqua = 0x00FFFF, // rgb(0,255,255) + aquamarine = 0x7FFFD4, // rgb(127,255,212) + azure = 0xF0FFFF, // rgb(240,255,255) + beige = 0xF5F5DC, // rgb(245,245,220) + bisque = 0xFFE4C4, // rgb(255,228,196) + black = 0x000000, // rgb(0,0,0) + blanched_almond = 0xFFEBCD, // rgb(255,235,205) + blue = 0x0000FF, // rgb(0,0,255) + blue_violet = 0x8A2BE2, // rgb(138,43,226) + brown = 0xA52A2A, // rgb(165,42,42) + burly_wood = 0xDEB887, // rgb(222,184,135) + cadet_blue = 0x5F9EA0, // rgb(95,158,160) + chartreuse = 0x7FFF00, // rgb(127,255,0) + chocolate = 0xD2691E, // rgb(210,105,30) + coral = 0xFF7F50, // rgb(255,127,80) + cornflower_blue = 0x6495ED, // rgb(100,149,237) + cornsilk = 0xFFF8DC, // rgb(255,248,220) + crimson = 0xDC143C, // rgb(220,20,60) + cyan = 0x00FFFF, // rgb(0,255,255) + dark_blue = 0x00008B, // rgb(0,0,139) + dark_cyan = 0x008B8B, // rgb(0,139,139) + dark_golden_rod = 0xB8860B, // rgb(184,134,11) + dark_gray = 0xA9A9A9, // rgb(169,169,169) + dark_green = 0x006400, // rgb(0,100,0) + dark_khaki = 0xBDB76B, // rgb(189,183,107) + dark_magenta = 0x8B008B, // rgb(139,0,139) + dark_olive_green = 0x556B2F, // rgb(85,107,47) + dark_orange = 0xFF8C00, // rgb(255,140,0) + dark_orchid = 0x9932CC, // rgb(153,50,204) + dark_red = 0x8B0000, // rgb(139,0,0) + dark_salmon = 0xE9967A, // rgb(233,150,122) + dark_sea_green = 0x8FBC8F, // rgb(143,188,143) + dark_slate_blue = 0x483D8B, // rgb(72,61,139) + dark_slate_gray = 0x2F4F4F, // rgb(47,79,79) + dark_turquoise = 0x00CED1, // rgb(0,206,209) + dark_violet = 0x9400D3, // rgb(148,0,211) + deep_pink = 0xFF1493, // rgb(255,20,147) + deep_sky_blue = 0x00BFFF, // rgb(0,191,255) + dim_gray = 0x696969, // rgb(105,105,105) + dodger_blue = 0x1E90FF, // rgb(30,144,255) + fire_brick = 0xB22222, // rgb(178,34,34) + floral_white = 0xFFFAF0, // rgb(255,250,240) + forest_green = 0x228B22, // rgb(34,139,34) + fuchsia = 0xFF00FF, // rgb(255,0,255) + gainsboro = 0xDCDCDC, // rgb(220,220,220) + ghost_white = 0xF8F8FF, // rgb(248,248,255) + gold = 0xFFD700, // rgb(255,215,0) + golden_rod = 0xDAA520, // rgb(218,165,32) + gray = 0x808080, // rgb(128,128,128) + green = 0x008000, // rgb(0,128,0) + green_yellow = 0xADFF2F, // rgb(173,255,47) + honey_dew = 0xF0FFF0, // rgb(240,255,240) + hot_pink = 0xFF69B4, // rgb(255,105,180) + indian_red = 0xCD5C5C, // rgb(205,92,92) + indigo = 0x4B0082, // rgb(75,0,130) + ivory = 0xFFFFF0, // rgb(255,255,240) + khaki = 0xF0E68C, // rgb(240,230,140) + lavender = 0xE6E6FA, // rgb(230,230,250) + lavender_blush = 0xFFF0F5, // rgb(255,240,245) + lawn_green = 0x7CFC00, // rgb(124,252,0) + lemon_chiffon = 0xFFFACD, // rgb(255,250,205) + light_blue = 0xADD8E6, // rgb(173,216,230) + light_coral = 0xF08080, // rgb(240,128,128) + light_cyan = 0xE0FFFF, // rgb(224,255,255) + light_golden_rod_yellow = 0xFAFAD2, // rgb(250,250,210) + light_gray = 0xD3D3D3, // rgb(211,211,211) + light_green = 0x90EE90, // rgb(144,238,144) + light_pink = 0xFFB6C1, // rgb(255,182,193) + light_salmon = 0xFFA07A, // rgb(255,160,122) + light_sea_green = 0x20B2AA, // rgb(32,178,170) + light_sky_blue = 0x87CEFA, // rgb(135,206,250) + light_slate_gray = 0x778899, // rgb(119,136,153) + light_steel_blue = 0xB0C4DE, // rgb(176,196,222) + light_yellow = 0xFFFFE0, // rgb(255,255,224) + lime = 0x00FF00, // rgb(0,255,0) + lime_green = 0x32CD32, // rgb(50,205,50) + linen = 0xFAF0E6, // rgb(250,240,230) + magenta = 0xFF00FF, // rgb(255,0,255) + maroon = 0x800000, // rgb(128,0,0) + medium_aquamarine = 0x66CDAA, // rgb(102,205,170) + medium_blue = 0x0000CD, // rgb(0,0,205) + medium_orchid = 0xBA55D3, // rgb(186,85,211) + medium_purple = 0x9370DB, // rgb(147,112,219) + medium_sea_green = 0x3CB371, // rgb(60,179,113) + medium_slate_blue = 0x7B68EE, // rgb(123,104,238) + medium_spring_green = 0x00FA9A, // rgb(0,250,154) + medium_turquoise = 0x48D1CC, // rgb(72,209,204) + medium_violet_red = 0xC71585, // rgb(199,21,133) + midnight_blue = 0x191970, // rgb(25,25,112) + mint_cream = 0xF5FFFA, // rgb(245,255,250) + misty_rose = 0xFFE4E1, // rgb(255,228,225) + moccasin = 0xFFE4B5, // rgb(255,228,181) + navajo_white = 0xFFDEAD, // rgb(255,222,173) + navy = 0x000080, // rgb(0,0,128) + old_lace = 0xFDF5E6, // rgb(253,245,230) + olive = 0x808000, // rgb(128,128,0) + olive_drab = 0x6B8E23, // rgb(107,142,35) + orange = 0xFFA500, // rgb(255,165,0) + orange_red = 0xFF4500, // rgb(255,69,0) + orchid = 0xDA70D6, // rgb(218,112,214) + pale_golden_rod = 0xEEE8AA, // rgb(238,232,170) + pale_green = 0x98FB98, // rgb(152,251,152) + pale_turquoise = 0xAFEEEE, // rgb(175,238,238) + pale_violet_red = 0xDB7093, // rgb(219,112,147) + papaya_whip = 0xFFEFD5, // rgb(255,239,213) + peach_puff = 0xFFDAB9, // rgb(255,218,185) + peru = 0xCD853F, // rgb(205,133,63) + pink = 0xFFC0CB, // rgb(255,192,203) + plum = 0xDDA0DD, // rgb(221,160,221) + powder_blue = 0xB0E0E6, // rgb(176,224,230) + purple = 0x800080, // rgb(128,0,128) + rebecca_purple = 0x663399, // rgb(102,51,153) + red = 0xFF0000, // rgb(255,0,0) + rosy_brown = 0xBC8F8F, // rgb(188,143,143) + royal_blue = 0x4169E1, // rgb(65,105,225) + saddle_brown = 0x8B4513, // rgb(139,69,19) + salmon = 0xFA8072, // rgb(250,128,114) + sandy_brown = 0xF4A460, // rgb(244,164,96) + sea_green = 0x2E8B57, // rgb(46,139,87) + sea_shell = 0xFFF5EE, // rgb(255,245,238) + sienna = 0xA0522D, // rgb(160,82,45) + silver = 0xC0C0C0, // rgb(192,192,192) + sky_blue = 0x87CEEB, // rgb(135,206,235) + slate_blue = 0x6A5ACD, // rgb(106,90,205) + slate_gray = 0x708090, // rgb(112,128,144) + snow = 0xFFFAFA, // rgb(255,250,250) + spring_green = 0x00FF7F, // rgb(0,255,127) + steel_blue = 0x4682B4, // rgb(70,130,180) + tan = 0xD2B48C, // rgb(210,180,140) + teal = 0x008080, // rgb(0,128,128) + thistle = 0xD8BFD8, // rgb(216,191,216) + tomato = 0xFF6347, // rgb(255,99,71) + turquoise = 0x40E0D0, // rgb(64,224,208) + violet = 0xEE82EE, // rgb(238,130,238) + wheat = 0xF5DEB3, // rgb(245,222,179) + white = 0xFFFFFF, // rgb(255,255,255) + white_smoke = 0xF5F5F5, // rgb(245,245,245) + yellow = 0xFFFF00, // rgb(255,255,0) + yellow_green = 0x9ACD32 // rgb(154,205,50) +}; // enum class color + +enum class terminal_color : uint8_t { + black = 30, + red, + green, + yellow, + blue, + magenta, + cyan, + white, + bright_black = 90, + bright_red, + bright_green, + bright_yellow, + bright_blue, + bright_magenta, + bright_cyan, + bright_white +}; + +enum class emphasis : uint8_t { + bold = 1, + faint = 1 << 1, + italic = 1 << 2, + underline = 1 << 3, + blink = 1 << 4, + reverse = 1 << 5, + conceal = 1 << 6, + strikethrough = 1 << 7, +}; + +// rgb is a struct for red, green and blue colors. +// Using the name "rgb" makes some editors show the color in a tooltip. +struct rgb { + FMT_CONSTEXPR rgb() : r(0), g(0), b(0) {} + FMT_CONSTEXPR rgb(uint8_t r_, uint8_t g_, uint8_t b_) : r(r_), g(g_), b(b_) {} + FMT_CONSTEXPR rgb(uint32_t hex) + : r((hex >> 16) & 0xFF), g((hex >> 8) & 0xFF), b(hex & 0xFF) {} + FMT_CONSTEXPR rgb(color hex) + : r((uint32_t(hex) >> 16) & 0xFF), + g((uint32_t(hex) >> 8) & 0xFF), + b(uint32_t(hex) & 0xFF) {} + uint8_t r; + uint8_t g; + uint8_t b; +}; + +FMT_BEGIN_DETAIL_NAMESPACE + +// color is a struct of either a rgb color or a terminal color. +struct color_type { + FMT_CONSTEXPR color_type() noexcept : is_rgb(), value{} {} + FMT_CONSTEXPR color_type(color rgb_color) noexcept : is_rgb(true), value{} { + value.rgb_color = static_cast(rgb_color); + } + FMT_CONSTEXPR color_type(rgb rgb_color) noexcept : is_rgb(true), value{} { + value.rgb_color = (static_cast(rgb_color.r) << 16) | + (static_cast(rgb_color.g) << 8) | rgb_color.b; + } + FMT_CONSTEXPR color_type(terminal_color term_color) noexcept + : is_rgb(), value{} { + value.term_color = static_cast(term_color); + } + bool is_rgb; + union color_union { + uint8_t term_color; + uint32_t rgb_color; + } value; +}; + +FMT_END_DETAIL_NAMESPACE + +/** A text style consisting of foreground and background colors and emphasis. */ +class text_style { + public: + FMT_CONSTEXPR text_style(emphasis em = emphasis()) noexcept + : set_foreground_color(), set_background_color(), ems(em) {} + + FMT_CONSTEXPR text_style& operator|=(const text_style& rhs) { + if (!set_foreground_color) { + set_foreground_color = rhs.set_foreground_color; + foreground_color = rhs.foreground_color; + } else if (rhs.set_foreground_color) { + if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) + FMT_THROW(format_error("can't OR a terminal color")); + foreground_color.value.rgb_color |= rhs.foreground_color.value.rgb_color; + } + + if (!set_background_color) { + set_background_color = rhs.set_background_color; + background_color = rhs.background_color; + } else if (rhs.set_background_color) { + if (!background_color.is_rgb || !rhs.background_color.is_rgb) + FMT_THROW(format_error("can't OR a terminal color")); + background_color.value.rgb_color |= rhs.background_color.value.rgb_color; + } + + ems = static_cast(static_cast(ems) | + static_cast(rhs.ems)); + return *this; + } + + friend FMT_CONSTEXPR text_style operator|(text_style lhs, + const text_style& rhs) { + return lhs |= rhs; + } + + FMT_DEPRECATED_NONMSVC FMT_CONSTEXPR text_style& operator&=( + const text_style& rhs) { + return and_assign(rhs); + } + + FMT_DEPRECATED_NONMSVC friend FMT_CONSTEXPR text_style + operator&(text_style lhs, const text_style& rhs) { + return lhs.and_assign(rhs); + } + + FMT_CONSTEXPR bool has_foreground() const noexcept { + return set_foreground_color; + } + FMT_CONSTEXPR bool has_background() const noexcept { + return set_background_color; + } + FMT_CONSTEXPR bool has_emphasis() const noexcept { + return static_cast(ems) != 0; + } + FMT_CONSTEXPR detail::color_type get_foreground() const noexcept { + FMT_ASSERT(has_foreground(), "no foreground specified for this style"); + return foreground_color; + } + FMT_CONSTEXPR detail::color_type get_background() const noexcept { + FMT_ASSERT(has_background(), "no background specified for this style"); + return background_color; + } + FMT_CONSTEXPR emphasis get_emphasis() const noexcept { + FMT_ASSERT(has_emphasis(), "no emphasis specified for this style"); + return ems; + } + + private: + FMT_CONSTEXPR text_style(bool is_foreground, + detail::color_type text_color) noexcept + : set_foreground_color(), set_background_color(), ems() { + if (is_foreground) { + foreground_color = text_color; + set_foreground_color = true; + } else { + background_color = text_color; + set_background_color = true; + } + } + + // DEPRECATED! + FMT_CONSTEXPR text_style& and_assign(const text_style& rhs) { + if (!set_foreground_color) { + set_foreground_color = rhs.set_foreground_color; + foreground_color = rhs.foreground_color; + } else if (rhs.set_foreground_color) { + if (!foreground_color.is_rgb || !rhs.foreground_color.is_rgb) + FMT_THROW(format_error("can't AND a terminal color")); + foreground_color.value.rgb_color &= rhs.foreground_color.value.rgb_color; + } + + if (!set_background_color) { + set_background_color = rhs.set_background_color; + background_color = rhs.background_color; + } else if (rhs.set_background_color) { + if (!background_color.is_rgb || !rhs.background_color.is_rgb) + FMT_THROW(format_error("can't AND a terminal color")); + background_color.value.rgb_color &= rhs.background_color.value.rgb_color; + } + + ems = static_cast(static_cast(ems) & + static_cast(rhs.ems)); + return *this; + } + + friend FMT_CONSTEXPR_DECL text_style + fg(detail::color_type foreground) noexcept; + + friend FMT_CONSTEXPR_DECL text_style + bg(detail::color_type background) noexcept; + + detail::color_type foreground_color; + detail::color_type background_color; + bool set_foreground_color; + bool set_background_color; + emphasis ems; +}; + +/** Creates a text style from the foreground (text) color. */ +FMT_CONSTEXPR inline text_style fg(detail::color_type foreground) noexcept { + return text_style(true, foreground); +} + +/** Creates a text style from the background color. */ +FMT_CONSTEXPR inline text_style bg(detail::color_type background) noexcept { + return text_style(false, background); +} + +FMT_CONSTEXPR inline text_style operator|(emphasis lhs, emphasis rhs) noexcept { + return text_style(lhs) | rhs; +} + +FMT_BEGIN_DETAIL_NAMESPACE + +template struct ansi_color_escape { + FMT_CONSTEXPR ansi_color_escape(detail::color_type text_color, + const char* esc) noexcept { + // If we have a terminal color, we need to output another escape code + // sequence. + if (!text_color.is_rgb) { + bool is_background = esc == string_view("\x1b[48;2;"); + uint32_t value = text_color.value.term_color; + // Background ASCII codes are the same as the foreground ones but with + // 10 more. + if (is_background) value += 10u; + + size_t index = 0; + buffer[index++] = static_cast('\x1b'); + buffer[index++] = static_cast('['); + + if (value >= 100u) { + buffer[index++] = static_cast('1'); + value %= 100u; + } + buffer[index++] = static_cast('0' + value / 10u); + buffer[index++] = static_cast('0' + value % 10u); + + buffer[index++] = static_cast('m'); + buffer[index++] = static_cast('\0'); + return; + } + + for (int i = 0; i < 7; i++) { + buffer[i] = static_cast(esc[i]); + } + rgb color(text_color.value.rgb_color); + to_esc(color.r, buffer + 7, ';'); + to_esc(color.g, buffer + 11, ';'); + to_esc(color.b, buffer + 15, 'm'); + buffer[19] = static_cast(0); + } + FMT_CONSTEXPR ansi_color_escape(emphasis em) noexcept { + uint8_t em_codes[num_emphases] = {}; + if (has_emphasis(em, emphasis::bold)) em_codes[0] = 1; + if (has_emphasis(em, emphasis::faint)) em_codes[1] = 2; + if (has_emphasis(em, emphasis::italic)) em_codes[2] = 3; + if (has_emphasis(em, emphasis::underline)) em_codes[3] = 4; + if (has_emphasis(em, emphasis::blink)) em_codes[4] = 5; + if (has_emphasis(em, emphasis::reverse)) em_codes[5] = 7; + if (has_emphasis(em, emphasis::conceal)) em_codes[6] = 8; + if (has_emphasis(em, emphasis::strikethrough)) em_codes[7] = 9; + + size_t index = 0; + for (size_t i = 0; i < num_emphases; ++i) { + if (!em_codes[i]) continue; + buffer[index++] = static_cast('\x1b'); + buffer[index++] = static_cast('['); + buffer[index++] = static_cast('0' + em_codes[i]); + buffer[index++] = static_cast('m'); + } + buffer[index++] = static_cast(0); + } + FMT_CONSTEXPR operator const Char*() const noexcept { return buffer; } + + FMT_CONSTEXPR const Char* begin() const noexcept { return buffer; } + FMT_CONSTEXPR_CHAR_TRAITS const Char* end() const noexcept { + return buffer + std::char_traits::length(buffer); + } + + private: + static constexpr size_t num_emphases = 8; + Char buffer[7u + 3u * num_emphases + 1u]; + + static FMT_CONSTEXPR void to_esc(uint8_t c, Char* out, + char delimiter) noexcept { + out[0] = static_cast('0' + c / 100); + out[1] = static_cast('0' + c / 10 % 10); + out[2] = static_cast('0' + c % 10); + out[3] = static_cast(delimiter); + } + static FMT_CONSTEXPR bool has_emphasis(emphasis em, emphasis mask) noexcept { + return static_cast(em) & static_cast(mask); + } +}; + +template +FMT_CONSTEXPR ansi_color_escape make_foreground_color( + detail::color_type foreground) noexcept { + return ansi_color_escape(foreground, "\x1b[38;2;"); +} + +template +FMT_CONSTEXPR ansi_color_escape make_background_color( + detail::color_type background) noexcept { + return ansi_color_escape(background, "\x1b[48;2;"); +} + +template +FMT_CONSTEXPR ansi_color_escape make_emphasis(emphasis em) noexcept { + return ansi_color_escape(em); +} + +template inline void fputs(const Char* chars, FILE* stream) { + int result = std::fputs(chars, stream); + if (result < 0) + FMT_THROW(system_error(errno, FMT_STRING("cannot write to file"))); +} + +template <> inline void fputs(const wchar_t* chars, FILE* stream) { + int result = std::fputws(chars, stream); + if (result < 0) + FMT_THROW(system_error(errno, FMT_STRING("cannot write to file"))); +} + +template inline void reset_color(FILE* stream) { + fputs("\x1b[0m", stream); +} + +template <> inline void reset_color(FILE* stream) { + fputs(L"\x1b[0m", stream); +} + +template inline void reset_color(buffer& buffer) { + auto reset_color = string_view("\x1b[0m"); + buffer.append(reset_color.begin(), reset_color.end()); +} + +template struct styled_arg { + const T& value; + text_style style; +}; + +template +void vformat_to(buffer& buf, const text_style& ts, + basic_string_view format_str, + basic_format_args>> args) { + bool has_style = false; + if (ts.has_emphasis()) { + has_style = true; + auto emphasis = detail::make_emphasis(ts.get_emphasis()); + buf.append(emphasis.begin(), emphasis.end()); + } + if (ts.has_foreground()) { + has_style = true; + auto foreground = detail::make_foreground_color(ts.get_foreground()); + buf.append(foreground.begin(), foreground.end()); + } + if (ts.has_background()) { + has_style = true; + auto background = detail::make_background_color(ts.get_background()); + buf.append(background.begin(), background.end()); + } + detail::vformat_to(buf, format_str, args, {}); + if (has_style) detail::reset_color(buf); +} + +FMT_END_DETAIL_NAMESPACE + +template > +void vprint(std::FILE* f, const text_style& ts, const S& format, + basic_format_args>> args) { + basic_memory_buffer buf; + detail::vformat_to(buf, ts, to_string_view(format), args); + if (detail::is_utf8()) { + detail::print(f, basic_string_view(buf.begin(), buf.size())); + } else { + buf.push_back(Char(0)); + detail::fputs(buf.data(), f); + } +} + +/** + \rst + Formats a string and prints it to the specified file stream using ANSI + escape sequences to specify text formatting. + + **Example**:: + + fmt::print(fmt::emphasis::bold | fg(fmt::color::red), + "Elapsed time: {0:.2f} seconds", 1.23); + \endrst + */ +template ::value)> +void print(std::FILE* f, const text_style& ts, const S& format_str, + const Args&... args) { + vprint(f, ts, format_str, + fmt::make_format_args>>(args...)); +} + +/** + \rst + Formats a string and prints it to stdout using ANSI escape sequences to + specify text formatting. + + **Example**:: + + fmt::print(fmt::emphasis::bold | fg(fmt::color::red), + "Elapsed time: {0:.2f} seconds", 1.23); + \endrst + */ +template ::value)> +void print(const text_style& ts, const S& format_str, const Args&... args) { + return print(stdout, ts, format_str, args...); +} + +template > +inline std::basic_string vformat( + const text_style& ts, const S& format_str, + basic_format_args>> args) { + basic_memory_buffer buf; + detail::vformat_to(buf, ts, to_string_view(format_str), args); + return fmt::to_string(buf); +} + +/** + \rst + Formats arguments and returns the result as a string using ANSI + escape sequences to specify text formatting. + + **Example**:: + + #include + std::string message = fmt::format(fmt::emphasis::bold | fg(fmt::color::red), + "The answer is {}", 42); + \endrst +*/ +template > +inline std::basic_string format(const text_style& ts, const S& format_str, + const Args&... args) { + return fmt::vformat(ts, to_string_view(format_str), + fmt::make_format_args>(args...)); +} + +/** + Formats a string with the given text_style and writes the output to ``out``. + */ +template ::value)> +OutputIt vformat_to( + OutputIt out, const text_style& ts, basic_string_view format_str, + basic_format_args>> args) { + auto&& buf = detail::get_buffer(out); + detail::vformat_to(buf, ts, format_str, args); + return detail::get_iterator(buf); +} + +/** + \rst + Formats arguments with the given text_style, writes the result to the output + iterator ``out`` and returns the iterator past the end of the output range. + + **Example**:: + + std::vector out; + fmt::format_to(std::back_inserter(out), + fmt::emphasis::bold | fg(fmt::color::red), "{}", 42); + \endrst +*/ +template >::value&& + detail::is_string::value> +inline auto format_to(OutputIt out, const text_style& ts, const S& format_str, + Args&&... args) -> + typename std::enable_if::type { + return vformat_to(out, ts, to_string_view(format_str), + fmt::make_format_args>>(args...)); +} + +template +struct formatter, Char> : formatter { + template + auto format(const detail::styled_arg& arg, FormatContext& ctx) const + -> decltype(ctx.out()) { + const auto& ts = arg.style; + const auto& value = arg.value; + auto out = ctx.out(); + + bool has_style = false; + if (ts.has_emphasis()) { + has_style = true; + auto emphasis = detail::make_emphasis(ts.get_emphasis()); + out = std::copy(emphasis.begin(), emphasis.end(), out); + } + if (ts.has_foreground()) { + has_style = true; + auto foreground = + detail::make_foreground_color(ts.get_foreground()); + out = std::copy(foreground.begin(), foreground.end(), out); + } + if (ts.has_background()) { + has_style = true; + auto background = + detail::make_background_color(ts.get_background()); + out = std::copy(background.begin(), background.end(), out); + } + out = formatter::format(value, ctx); + if (has_style) { + auto reset_color = string_view("\x1b[0m"); + out = std::copy(reset_color.begin(), reset_color.end(), out); + } + return out; + } +}; + +/** + \rst + Returns an argument that will be formatted using ANSI escape sequences, + to be used in a formatting function. + + **Example**:: + + fmt::print("Elapsed time: {s:.2f} seconds", + fmt::styled(1.23, fmt::fg(fmt::color::green) | fmt::bg(fmt::color::blue))); + \endrst + */ +template +FMT_CONSTEXPR auto styled(const T& value, text_style ts) + -> detail::styled_arg> { + return detail::styled_arg>{value, ts}; +} + +FMT_MODULE_EXPORT_END +FMT_END_NAMESPACE + +#endif // FMT_COLOR_H_ diff --git a/external/fmt/include/fmt/compile.h b/external/fmt/include/fmt/compile.h new file mode 100644 index 00000000..296ef42c --- /dev/null +++ b/external/fmt/include/fmt/compile.h @@ -0,0 +1,602 @@ +// Formatting library for C++ - experimental format string compilation +// +// Copyright (c) 2012 - present, Victor Zverovich and fmt contributors +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_COMPILE_H_ +#define FMT_COMPILE_H_ + +#include "format.h" + +FMT_BEGIN_NAMESPACE +namespace detail { + +template +inline counting_iterator copy_str(InputIt begin, InputIt end, + counting_iterator it) { + return it + (end - begin); +} + +template class truncating_iterator_base { + protected: + OutputIt out_; + size_t limit_; + size_t count_ = 0; + + truncating_iterator_base() : out_(), limit_(0) {} + + truncating_iterator_base(OutputIt out, size_t limit) + : out_(out), limit_(limit) {} + + public: + using iterator_category = std::output_iterator_tag; + using value_type = typename std::iterator_traits::value_type; + using difference_type = std::ptrdiff_t; + using pointer = void; + using reference = void; + FMT_UNCHECKED_ITERATOR(truncating_iterator_base); + + OutputIt base() const { return out_; } + size_t count() const { return count_; } +}; + +// An output iterator that truncates the output and counts the number of objects +// written to it. +template ::value_type>::type> +class truncating_iterator; + +template +class truncating_iterator + : public truncating_iterator_base { + mutable typename truncating_iterator_base::value_type blackhole_; + + public: + using value_type = typename truncating_iterator_base::value_type; + + truncating_iterator() = default; + + truncating_iterator(OutputIt out, size_t limit) + : truncating_iterator_base(out, limit) {} + + truncating_iterator& operator++() { + if (this->count_++ < this->limit_) ++this->out_; + return *this; + } + + truncating_iterator operator++(int) { + auto it = *this; + ++*this; + return it; + } + + value_type& operator*() const { + return this->count_ < this->limit_ ? *this->out_ : blackhole_; + } +}; + +template +class truncating_iterator + : public truncating_iterator_base { + public: + truncating_iterator() = default; + + truncating_iterator(OutputIt out, size_t limit) + : truncating_iterator_base(out, limit) {} + + template truncating_iterator& operator=(T val) { + if (this->count_++ < this->limit_) *this->out_++ = val; + return *this; + } + + truncating_iterator& operator++() { return *this; } + truncating_iterator& operator++(int) { return *this; } + truncating_iterator& operator*() { return *this; } +}; + +// A compile-time string which is compiled into fast formatting code. +class compiled_string {}; + +template +struct is_compiled_string : std::is_base_of {}; + +/** + \rst + Converts a string literal *s* into a format string that will be parsed at + compile time and converted into efficient formatting code. Requires C++17 + ``constexpr if`` compiler support. + + **Example**:: + + // Converts 42 into std::string using the most efficient method and no + // runtime format string processing. + std::string s = fmt::format(FMT_COMPILE("{}"), 42); + \endrst + */ +#if defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) +# define FMT_COMPILE(s) \ + FMT_STRING_IMPL(s, fmt::detail::compiled_string, explicit) +#else +# define FMT_COMPILE(s) FMT_STRING(s) +#endif + +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +template Str> +struct udl_compiled_string : compiled_string { + using char_type = Char; + explicit constexpr operator basic_string_view() const { + return {Str.data, N - 1}; + } +}; +#endif + +template +const T& first(const T& value, const Tail&...) { + return value; +} + +#if defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) +template struct type_list {}; + +// Returns a reference to the argument at index N from [first, rest...]. +template +constexpr const auto& get([[maybe_unused]] const T& first, + [[maybe_unused]] const Args&... rest) { + static_assert(N < 1 + sizeof...(Args), "index is out of bounds"); + if constexpr (N == 0) + return first; + else + return detail::get(rest...); +} + +template +constexpr int get_arg_index_by_name(basic_string_view name, + type_list) { + return get_arg_index_by_name(name); +} + +template struct get_type_impl; + +template struct get_type_impl> { + using type = + remove_cvref_t(std::declval()...))>; +}; + +template +using get_type = typename get_type_impl::type; + +template struct is_compiled_format : std::false_type {}; + +template struct text { + basic_string_view data; + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args&...) const { + return write(out, data); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +template +constexpr text make_text(basic_string_view s, size_t pos, + size_t size) { + return {{&s[pos], size}}; +} + +template struct code_unit { + Char value; + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args&...) const { + return write(out, value); + } +}; + +// This ensures that the argument type is convertible to `const T&`. +template +constexpr const T& get_arg_checked(const Args&... args) { + const auto& arg = detail::get(args...); + if constexpr (detail::is_named_arg>()) { + return arg.value; + } else { + return arg; + } +} + +template +struct is_compiled_format> : std::true_type {}; + +// A replacement field that refers to argument N. +template struct field { + using char_type = Char; + + template + constexpr OutputIt format(OutputIt out, const Args&... args) const { + return write(out, get_arg_checked(args...)); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +// A replacement field that refers to argument with name. +template struct runtime_named_field { + using char_type = Char; + basic_string_view name; + + template + constexpr static bool try_format_argument( + OutputIt& out, + // [[maybe_unused]] due to unused-but-set-parameter warning in GCC 7,8,9 + [[maybe_unused]] basic_string_view arg_name, const T& arg) { + if constexpr (is_named_arg::type>::value) { + if (arg_name == arg.name) { + out = write(out, arg.value); + return true; + } + } + return false; + } + + template + constexpr OutputIt format(OutputIt out, const Args&... args) const { + bool found = (try_format_argument(out, name, args) || ...); + if (!found) { + FMT_THROW(format_error("argument with specified name is not found")); + } + return out; + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +// A replacement field that refers to argument N and has format specifiers. +template struct spec_field { + using char_type = Char; + formatter fmt; + + template + constexpr FMT_INLINE OutputIt format(OutputIt out, + const Args&... args) const { + const auto& vargs = + fmt::make_format_args>(args...); + basic_format_context ctx(out, vargs); + return fmt.format(get_arg_checked(args...), ctx); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +template struct concat { + L lhs; + R rhs; + using char_type = typename L::char_type; + + template + constexpr OutputIt format(OutputIt out, const Args&... args) const { + out = lhs.format(out, args...); + return rhs.format(out, args...); + } +}; + +template +struct is_compiled_format> : std::true_type {}; + +template +constexpr concat make_concat(L lhs, R rhs) { + return {lhs, rhs}; +} + +struct unknown_format {}; + +template +constexpr size_t parse_text(basic_string_view str, size_t pos) { + for (size_t size = str.size(); pos != size; ++pos) { + if (str[pos] == '{' || str[pos] == '}') break; + } + return pos; +} + +template +constexpr auto compile_format_string(S format_str); + +template +constexpr auto parse_tail(T head, S format_str) { + if constexpr (POS != + basic_string_view(format_str).size()) { + constexpr auto tail = compile_format_string(format_str); + if constexpr (std::is_same, + unknown_format>()) + return tail; + else + return make_concat(head, tail); + } else { + return head; + } +} + +template struct parse_specs_result { + formatter fmt; + size_t end; + int next_arg_id; +}; + +constexpr int manual_indexing_id = -1; + +template +constexpr parse_specs_result parse_specs(basic_string_view str, + size_t pos, int next_arg_id) { + str.remove_prefix(pos); + auto ctx = basic_format_parse_context(str, {}, next_arg_id); + auto f = formatter(); + auto end = f.parse(ctx); + return {f, pos + fmt::detail::to_unsigned(end - str.data()) + 1, + next_arg_id == 0 ? manual_indexing_id : ctx.next_arg_id()}; +} + +template struct arg_id_handler { + arg_ref arg_id; + + constexpr int operator()() { + FMT_ASSERT(false, "handler cannot be used with automatic indexing"); + return 0; + } + constexpr int operator()(int id) { + arg_id = arg_ref(id); + return 0; + } + constexpr int operator()(basic_string_view id) { + arg_id = arg_ref(id); + return 0; + } + + constexpr void on_error(const char* message) { + FMT_THROW(format_error(message)); + } +}; + +template struct parse_arg_id_result { + arg_ref arg_id; + const Char* arg_id_end; +}; + +template +constexpr auto parse_arg_id(const Char* begin, const Char* end) { + auto handler = arg_id_handler{arg_ref{}}; + auto arg_id_end = parse_arg_id(begin, end, handler); + return parse_arg_id_result{handler.arg_id, arg_id_end}; +} + +template struct field_type { + using type = remove_cvref_t; +}; + +template +struct field_type::value>> { + using type = remove_cvref_t; +}; + +template +constexpr auto parse_replacement_field_then_tail(S format_str) { + using char_type = typename S::char_type; + constexpr auto str = basic_string_view(format_str); + constexpr char_type c = END_POS != str.size() ? str[END_POS] : char_type(); + if constexpr (c == '}') { + return parse_tail( + field::type, ARG_INDEX>(), + format_str); + } else if constexpr (c == ':') { + constexpr auto result = parse_specs::type>( + str, END_POS + 1, NEXT_ID == manual_indexing_id ? 0 : NEXT_ID); + return parse_tail( + spec_field::type, ARG_INDEX>{ + result.fmt}, + format_str); + } +} + +// Compiles a non-empty format string and returns the compiled representation +// or unknown_format() on unrecognized input. +template +constexpr auto compile_format_string(S format_str) { + using char_type = typename S::char_type; + constexpr auto str = basic_string_view(format_str); + if constexpr (str[POS] == '{') { + if constexpr (POS + 1 == str.size()) + FMT_THROW(format_error("unmatched '{' in format string")); + if constexpr (str[POS + 1] == '{') { + return parse_tail(make_text(str, POS, 1), format_str); + } else if constexpr (str[POS + 1] == '}' || str[POS + 1] == ':') { + static_assert(ID != manual_indexing_id, + "cannot switch from manual to automatic argument indexing"); + constexpr auto next_id = + ID != manual_indexing_id ? ID + 1 : manual_indexing_id; + return parse_replacement_field_then_tail, Args, + POS + 1, ID, next_id>( + format_str); + } else { + constexpr auto arg_id_result = + parse_arg_id(str.data() + POS + 1, str.data() + str.size()); + constexpr auto arg_id_end_pos = arg_id_result.arg_id_end - str.data(); + constexpr char_type c = + arg_id_end_pos != str.size() ? str[arg_id_end_pos] : char_type(); + static_assert(c == '}' || c == ':', "missing '}' in format string"); + if constexpr (arg_id_result.arg_id.kind == arg_id_kind::index) { + static_assert( + ID == manual_indexing_id || ID == 0, + "cannot switch from automatic to manual argument indexing"); + constexpr auto arg_index = arg_id_result.arg_id.val.index; + return parse_replacement_field_then_tail, + Args, arg_id_end_pos, + arg_index, manual_indexing_id>( + format_str); + } else if constexpr (arg_id_result.arg_id.kind == arg_id_kind::name) { + constexpr auto arg_index = + get_arg_index_by_name(arg_id_result.arg_id.val.name, Args{}); + if constexpr (arg_index != invalid_arg_index) { + constexpr auto next_id = + ID != manual_indexing_id ? ID + 1 : manual_indexing_id; + return parse_replacement_field_then_tail< + decltype(get_type::value), Args, arg_id_end_pos, + arg_index, next_id>(format_str); + } else { + if constexpr (c == '}') { + return parse_tail( + runtime_named_field{arg_id_result.arg_id.val.name}, + format_str); + } else if constexpr (c == ':') { + return unknown_format(); // no type info for specs parsing + } + } + } + } + } else if constexpr (str[POS] == '}') { + if constexpr (POS + 1 == str.size()) + FMT_THROW(format_error("unmatched '}' in format string")); + return parse_tail(make_text(str, POS, 1), format_str); + } else { + constexpr auto end = parse_text(str, POS + 1); + if constexpr (end - POS > 1) { + return parse_tail(make_text(str, POS, end - POS), + format_str); + } else { + return parse_tail(code_unit{str[POS]}, + format_str); + } + } +} + +template ::value)> +constexpr auto compile(S format_str) { + constexpr auto str = basic_string_view(format_str); + if constexpr (str.size() == 0) { + return detail::make_text(str, 0, 0); + } else { + constexpr auto result = + detail::compile_format_string, 0, 0>( + format_str); + return result; + } +} +#endif // defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) +} // namespace detail + +FMT_MODULE_EXPORT_BEGIN + +#if defined(__cpp_if_constexpr) && defined(__cpp_return_type_deduction) + +template ::value)> +FMT_INLINE std::basic_string format(const CompiledFormat& cf, + const Args&... args) { + auto s = std::basic_string(); + cf.format(std::back_inserter(s), args...); + return s; +} + +template ::value)> +constexpr FMT_INLINE OutputIt format_to(OutputIt out, const CompiledFormat& cf, + const Args&... args) { + return cf.format(out, args...); +} + +template ::value)> +FMT_INLINE std::basic_string format(const S&, + Args&&... args) { + if constexpr (std::is_same::value) { + constexpr auto str = basic_string_view(S()); + if constexpr (str.size() == 2 && str[0] == '{' && str[1] == '}') { + const auto& first = detail::first(args...); + if constexpr (detail::is_named_arg< + remove_cvref_t>::value) { + return fmt::to_string(first.value); + } else { + return fmt::to_string(first); + } + } + } + constexpr auto compiled = detail::compile(S()); + if constexpr (std::is_same, + detail::unknown_format>()) { + return fmt::format( + static_cast>(S()), + std::forward(args)...); + } else { + return fmt::format(compiled, std::forward(args)...); + } +} + +template ::value)> +FMT_CONSTEXPR OutputIt format_to(OutputIt out, const S&, Args&&... args) { + constexpr auto compiled = detail::compile(S()); + if constexpr (std::is_same, + detail::unknown_format>()) { + return fmt::format_to( + out, static_cast>(S()), + std::forward(args)...); + } else { + return fmt::format_to(out, compiled, std::forward(args)...); + } +} +#endif + +template ::value)> +format_to_n_result format_to_n(OutputIt out, size_t n, + const S& format_str, Args&&... args) { + auto it = fmt::format_to(detail::truncating_iterator(out, n), + format_str, std::forward(args)...); + return {it.base(), it.count()}; +} + +template ::value)> +size_t formatted_size(const S& format_str, const Args&... args) { + return fmt::format_to(detail::counting_iterator(), format_str, args...) + .count(); +} + +template ::value)> +void print(std::FILE* f, const S& format_str, const Args&... args) { + memory_buffer buffer; + fmt::format_to(std::back_inserter(buffer), format_str, args...); + detail::print(f, {buffer.data(), buffer.size()}); +} + +template ::value)> +void print(const S& format_str, const Args&... args) { + print(stdout, format_str, args...); +} + +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +inline namespace literals { +template constexpr auto operator""_cf() { + using char_t = remove_cvref_t; + return detail::udl_compiled_string(); +} +} // namespace literals +#endif + +FMT_MODULE_EXPORT_END +FMT_END_NAMESPACE + +#endif // FMT_COMPILE_H_ diff --git a/external/fmt/include/fmt/core.h b/external/fmt/include/fmt/core.h new file mode 100644 index 00000000..36c92f66 --- /dev/null +++ b/external/fmt/include/fmt/core.h @@ -0,0 +1,3285 @@ +// Formatting library for C++ - the core API for char/UTF-8 +// +// Copyright (c) 2012 - present, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_CORE_H_ +#define FMT_CORE_H_ + +#include // std::byte +#include // std::FILE +#include // std::strlen +#include +#include +#include +#include + +// The fmt library version in the form major * 10000 + minor * 100 + patch. +#define FMT_VERSION 80102 + +#if defined(__clang__) && !defined(__ibmxl__) +# define FMT_CLANG_VERSION (__clang_major__ * 100 + __clang_minor__) +#else +# define FMT_CLANG_VERSION 0 +#endif + +#if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && \ + !defined(__NVCOMPILER) +# define FMT_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) +#else +# define FMT_GCC_VERSION 0 +#endif + +#ifndef FMT_GCC_PRAGMA +// Workaround _Pragma bug https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59884. +# if FMT_GCC_VERSION >= 504 +# define FMT_GCC_PRAGMA(arg) _Pragma(arg) +# else +# define FMT_GCC_PRAGMA(arg) +# endif +#endif + +#ifdef __ICL +# define FMT_ICC_VERSION __ICL +#elif defined(__INTEL_COMPILER) +# define FMT_ICC_VERSION __INTEL_COMPILER +#else +# define FMT_ICC_VERSION 0 +#endif + +#ifdef __NVCOMPILER +# define FMT_NVCOMPILER_VERSION \ + (__NVCOMPILER_MAJOR__ * 100 + __NVCOMPILER_MINOR__) +#else +# define FMT_NVCOMPILER_VERSION 0 +#endif + +#ifdef __NVCC__ +# define FMT_NVCC __NVCC__ +#else +# define FMT_NVCC 0 +#endif + +#ifdef _MSC_VER +# define FMT_MSC_VER _MSC_VER +# define FMT_MSC_WARNING(...) __pragma(warning(__VA_ARGS__)) +#else +# define FMT_MSC_VER 0 +# define FMT_MSC_WARNING(...) +#endif + +#ifdef __has_feature +# define FMT_HAS_FEATURE(x) __has_feature(x) +#else +# define FMT_HAS_FEATURE(x) 0 +#endif + +#if defined(__has_include) && \ + (!defined(__INTELLISENSE__) || FMT_MSC_VER > 1900) && \ + (!FMT_ICC_VERSION || FMT_ICC_VERSION >= 1600) +# define FMT_HAS_INCLUDE(x) __has_include(x) +#else +# define FMT_HAS_INCLUDE(x) 0 +#endif + +#ifdef __has_cpp_attribute +# define FMT_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +# define FMT_HAS_CPP_ATTRIBUTE(x) 0 +#endif + +#ifdef _MSVC_LANG +# define FMT_CPLUSPLUS _MSVC_LANG +#else +# define FMT_CPLUSPLUS __cplusplus +#endif + +#define FMT_HAS_CPP14_ATTRIBUTE(attribute) \ + (FMT_CPLUSPLUS >= 201402L && FMT_HAS_CPP_ATTRIBUTE(attribute)) + +#define FMT_HAS_CPP17_ATTRIBUTE(attribute) \ + (FMT_CPLUSPLUS >= 201703L && FMT_HAS_CPP_ATTRIBUTE(attribute)) + +// Check if relaxed C++14 constexpr is supported. +// GCC doesn't allow throw in constexpr until version 6 (bug 67371). +#ifndef FMT_USE_CONSTEXPR +# define FMT_USE_CONSTEXPR \ + (FMT_HAS_FEATURE(cxx_relaxed_constexpr) || FMT_MSC_VER >= 1912 || \ + (FMT_GCC_VERSION >= 600 && __cplusplus >= 201402L)) && \ + !FMT_NVCC && !FMT_ICC_VERSION +#endif +#if FMT_USE_CONSTEXPR +# define FMT_CONSTEXPR constexpr +# define FMT_CONSTEXPR_DECL constexpr +#else +# define FMT_CONSTEXPR +# define FMT_CONSTEXPR_DECL +#endif + +#if ((__cplusplus >= 202002L) && \ + (!defined(_GLIBCXX_RELEASE) || _GLIBCXX_RELEASE > 9)) || \ + (__cplusplus >= 201709L && FMT_GCC_VERSION >= 1002) +# define FMT_CONSTEXPR20 constexpr +#else +# define FMT_CONSTEXPR20 +#endif + +// Check if constexpr std::char_traits<>::compare,length is supported. +#if defined(__GLIBCXX__) +# if __cplusplus >= 201703L && defined(_GLIBCXX_RELEASE) && \ + _GLIBCXX_RELEASE >= 7 // GCC 7+ libstdc++ has _GLIBCXX_RELEASE. +# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +# endif +#elif defined(_LIBCPP_VERSION) && __cplusplus >= 201703L && \ + _LIBCPP_VERSION >= 4000 +# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#elif FMT_MSC_VER >= 1914 && _MSVC_LANG >= 201703L +# define FMT_CONSTEXPR_CHAR_TRAITS constexpr +#endif +#ifndef FMT_CONSTEXPR_CHAR_TRAITS +# define FMT_CONSTEXPR_CHAR_TRAITS +#endif + +// Check if exceptions are disabled. +#ifndef FMT_EXCEPTIONS +# if (defined(__GNUC__) && !defined(__EXCEPTIONS)) || \ + FMT_MSC_VER && !_HAS_EXCEPTIONS +# define FMT_EXCEPTIONS 0 +# else +# define FMT_EXCEPTIONS 1 +# endif +#endif + +// [[noreturn]] is disabled on MSVC and NVCC because of bogus unreachable code +// warnings. +#if FMT_EXCEPTIONS && FMT_HAS_CPP_ATTRIBUTE(noreturn) && !FMT_MSC_VER && \ + !FMT_NVCC +# define FMT_NORETURN [[noreturn]] +#else +# define FMT_NORETURN +#endif + +#if __cplusplus == 201103L || __cplusplus == 201402L +# if defined(__INTEL_COMPILER) || defined(__PGI) +# define FMT_FALLTHROUGH +# elif defined(__clang__) +# define FMT_FALLTHROUGH [[clang::fallthrough]] +# elif FMT_GCC_VERSION >= 700 && \ + (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= 520) +# define FMT_FALLTHROUGH [[gnu::fallthrough]] +# else +# define FMT_FALLTHROUGH +# endif +#elif FMT_HAS_CPP17_ATTRIBUTE(fallthrough) +# define FMT_FALLTHROUGH [[fallthrough]] +#else +# define FMT_FALLTHROUGH +#endif + +#ifndef FMT_NODISCARD +# if FMT_HAS_CPP17_ATTRIBUTE(nodiscard) +# define FMT_NODISCARD [[nodiscard]] +# else +# define FMT_NODISCARD +# endif +#endif + +#ifndef FMT_USE_FLOAT +# define FMT_USE_FLOAT 1 +#endif +#ifndef FMT_USE_DOUBLE +# define FMT_USE_DOUBLE 1 +#endif +#ifndef FMT_USE_LONG_DOUBLE +# define FMT_USE_LONG_DOUBLE 1 +#endif + +#ifndef FMT_INLINE +# if FMT_GCC_VERSION || FMT_CLANG_VERSION +# define FMT_INLINE inline __attribute__((always_inline)) +# else +# define FMT_INLINE inline +# endif +#endif + +#ifndef FMT_DEPRECATED +# if FMT_HAS_CPP14_ATTRIBUTE(deprecated) || FMT_MSC_VER >= 1900 +# define FMT_DEPRECATED [[deprecated]] +# else +# if (defined(__GNUC__) && !defined(__LCC__)) || defined(__clang__) +# define FMT_DEPRECATED __attribute__((deprecated)) +# elif FMT_MSC_VER +# define FMT_DEPRECATED __declspec(deprecated) +# else +# define FMT_DEPRECATED /* deprecated */ +# endif +# endif +#endif + +#ifdef _MSC_VER +# define FMT_UNCHECKED_ITERATOR(It) \ + using _Unchecked_type = It // Mark iterator as checked. +#else +# define FMT_UNCHECKED_ITERATOR(It) using DummyTypeName = It +#endif + +#ifndef FMT_BEGIN_NAMESPACE +# define FMT_BEGIN_NAMESPACE \ + namespace fmt { \ + inline namespace v8 { +# define FMT_END_NAMESPACE \ + } \ + } +#endif + +#ifndef FMT_MODULE_EXPORT +# define FMT_MODULE_EXPORT +# define FMT_MODULE_EXPORT_BEGIN +# define FMT_MODULE_EXPORT_END +# define FMT_BEGIN_DETAIL_NAMESPACE namespace detail { +# define FMT_END_DETAIL_NAMESPACE } +#endif + +#if !defined(FMT_HEADER_ONLY) && defined(_WIN32) +# define FMT_CLASS_API FMT_MSC_WARNING(suppress : 4275) +# ifdef FMT_EXPORT +# define FMT_API __declspec(dllexport) +# elif defined(FMT_SHARED) +# define FMT_API __declspec(dllimport) +# endif +#else +# define FMT_CLASS_API +# if defined(FMT_EXPORT) || defined(FMT_SHARED) +# if defined(__GNUC__) || defined(__clang__) +# define FMT_API __attribute__((visibility("default"))) +# endif +# endif +#endif +#ifndef FMT_API +# define FMT_API +#endif + +// libc++ supports string_view in pre-c++17. +#if (FMT_HAS_INCLUDE() && \ + (__cplusplus > 201402L || defined(_LIBCPP_VERSION))) || \ + (defined(_MSVC_LANG) && _MSVC_LANG > 201402L && _MSC_VER >= 1910) +# include +# define FMT_USE_STRING_VIEW +#elif FMT_HAS_INCLUDE("experimental/string_view") && __cplusplus >= 201402L +# include +# define FMT_USE_EXPERIMENTAL_STRING_VIEW +#endif + +#ifndef FMT_UNICODE +# define FMT_UNICODE !FMT_MSC_VER +#endif + +#ifndef FMT_CONSTEVAL +# if ((FMT_GCC_VERSION >= 1000 || FMT_CLANG_VERSION >= 1101) && \ + __cplusplus > 201703L && !defined(__apple_build_version__)) || \ + (defined(__cpp_consteval) && \ + (!FMT_MSC_VER || _MSC_FULL_VER >= 193030704)) +// consteval is broken in MSVC before VS2022 and Apple clang 13. +# define FMT_CONSTEVAL consteval +# define FMT_HAS_CONSTEVAL +# else +# define FMT_CONSTEVAL +# endif +#endif + +#ifndef FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +# if defined(__cpp_nontype_template_args) && \ + ((FMT_GCC_VERSION >= 903 && __cplusplus >= 201709L) || \ + __cpp_nontype_template_args >= 201911L) +# define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 1 +# else +# define FMT_USE_NONTYPE_TEMPLATE_PARAMETERS 0 +# endif +#endif + +// Enable minimal optimizations for more compact code in debug mode. +FMT_GCC_PRAGMA("GCC push_options") +#if !defined(__OPTIMIZE__) && !FMT_NVCOMPILER_VERSION +FMT_GCC_PRAGMA("GCC optimize(\"Og\")") +#endif + +FMT_BEGIN_NAMESPACE +FMT_MODULE_EXPORT_BEGIN + +// Implementations of enable_if_t and other metafunctions for older systems. +template +using enable_if_t = typename std::enable_if::type; +template +using conditional_t = typename std::conditional::type; +template using bool_constant = std::integral_constant; +template +using remove_reference_t = typename std::remove_reference::type; +template +using remove_const_t = typename std::remove_const::type; +template +using remove_cvref_t = typename std::remove_cv>::type; +template struct type_identity { using type = T; }; +template using type_identity_t = typename type_identity::type; +template +using underlying_t = typename std::underlying_type::type; + +struct monostate { + constexpr monostate() {} +}; + +// An enable_if helper to be used in template parameters which results in much +// shorter symbols: https://godbolt.org/z/sWw4vP. Extra parentheses are needed +// to workaround a bug in MSVC 2019 (see #1140 and #1186). +#ifdef FMT_DOC +# define FMT_ENABLE_IF(...) +#else +# define FMT_ENABLE_IF(...) enable_if_t<(__VA_ARGS__), int> = 0 +#endif + +FMT_BEGIN_DETAIL_NAMESPACE + +// Suppress "unused variable" warnings with the method described in +// https://herbsutter.com/2009/10/18/mailbag-shutting-up-compiler-warnings/. +// (void)var does not work on many Intel compilers. +template FMT_CONSTEXPR void ignore_unused(const T&...) {} + +constexpr FMT_INLINE auto is_constant_evaluated( + bool default_value = false) noexcept -> bool { +#ifdef __cpp_lib_is_constant_evaluated + ignore_unused(default_value); + return std::is_constant_evaluated(); +#else + return default_value; +#endif +} + +// A function to suppress "conditional expression is constant" warnings. +template constexpr FMT_INLINE auto const_check(T value) -> T { + return value; +} + +FMT_NORETURN FMT_API void assert_fail(const char* file, int line, + const char* message); + +#ifndef FMT_ASSERT +# ifdef NDEBUG +// FMT_ASSERT is not empty to avoid -Werror=empty-body. +# define FMT_ASSERT(condition, message) \ + ::fmt::detail::ignore_unused((condition), (message)) +# else +# define FMT_ASSERT(condition, message) \ + ((condition) /* void() fails with -Winvalid-constexpr on clang 4.0.1 */ \ + ? (void)0 \ + : ::fmt::detail::assert_fail(__FILE__, __LINE__, (message))) +# endif +#endif + +#if defined(FMT_USE_STRING_VIEW) +template using std_string_view = std::basic_string_view; +#elif defined(FMT_USE_EXPERIMENTAL_STRING_VIEW) +template +using std_string_view = std::experimental::basic_string_view; +#else +template struct std_string_view {}; +#endif + +#ifdef FMT_USE_INT128 +// Do nothing. +#elif defined(__SIZEOF_INT128__) && !FMT_NVCC && \ + !(FMT_CLANG_VERSION && FMT_MSC_VER) +# define FMT_USE_INT128 1 +using int128_opt = __int128_t; // An optional native 128-bit integer. +using uint128_opt = __uint128_t; +template inline auto convert_for_visit(T value) -> T { + return value; +} +#else +# define FMT_USE_INT128 0 +#endif +#if !FMT_USE_INT128 +enum class int128_opt {}; +enum class uint128_opt {}; +// Reduce template instantiations. +template auto convert_for_visit(T) -> monostate { return {}; } +#endif + +// Casts a nonnegative integer to unsigned. +template +FMT_CONSTEXPR auto to_unsigned(Int value) -> + typename std::make_unsigned::type { + FMT_ASSERT(value >= 0, "negative value"); + return static_cast::type>(value); +} + +FMT_MSC_WARNING(suppress : 4566) constexpr unsigned char micro[] = "\u00B5"; + +constexpr auto is_utf8() -> bool { + // Avoid buggy sign extensions in MSVC's constant evaluation mode. + // https://developercommunity.visualstudio.com/t/C-difference-in-behavior-for-unsigned/1233612 + using uchar = unsigned char; + return FMT_UNICODE || (sizeof(micro) == 3 && uchar(micro[0]) == 0xC2 && + uchar(micro[1]) == 0xB5); +} +FMT_END_DETAIL_NAMESPACE + +/** + An implementation of ``std::basic_string_view`` for pre-C++17. It provides a + subset of the API. ``fmt::basic_string_view`` is used for format strings even + if ``std::string_view`` is available to prevent issues when a library is + compiled with a different ``-std`` option than the client code (which is not + recommended). + */ +template class basic_string_view { + private: + const Char* data_; + size_t size_; + + public: + using value_type = Char; + using iterator = const Char*; + + constexpr basic_string_view() noexcept : data_(nullptr), size_(0) {} + + /** Constructs a string reference object from a C string and a size. */ + constexpr basic_string_view(const Char* s, size_t count) noexcept + : data_(s), size_(count) {} + + /** + \rst + Constructs a string reference object from a C string computing + the size with ``std::char_traits::length``. + \endrst + */ + FMT_CONSTEXPR_CHAR_TRAITS + FMT_INLINE + basic_string_view(const Char* s) + : data_(s), + size_(detail::const_check(std::is_same::value && + !detail::is_constant_evaluated(true)) + ? std::strlen(reinterpret_cast(s)) + : std::char_traits::length(s)) {} + + /** Constructs a string reference from a ``std::basic_string`` object. */ + template + FMT_CONSTEXPR basic_string_view( + const std::basic_string& s) noexcept + : data_(s.data()), size_(s.size()) {} + + template >::value)> + FMT_CONSTEXPR basic_string_view(S s) noexcept + : data_(s.data()), size_(s.size()) {} + + /** Returns a pointer to the string data. */ + constexpr auto data() const noexcept -> const Char* { return data_; } + + /** Returns the string size. */ + constexpr auto size() const noexcept -> size_t { return size_; } + + constexpr auto begin() const noexcept -> iterator { return data_; } + constexpr auto end() const noexcept -> iterator { return data_ + size_; } + + constexpr auto operator[](size_t pos) const noexcept -> const Char& { + return data_[pos]; + } + + FMT_CONSTEXPR void remove_prefix(size_t n) noexcept { + data_ += n; + size_ -= n; + } + + // Lexicographically compare this string reference to other. + FMT_CONSTEXPR_CHAR_TRAITS auto compare(basic_string_view other) const -> int { + size_t str_size = size_ < other.size_ ? size_ : other.size_; + int result = std::char_traits::compare(data_, other.data_, str_size); + if (result == 0) + result = size_ == other.size_ ? 0 : (size_ < other.size_ ? -1 : 1); + return result; + } + + FMT_CONSTEXPR_CHAR_TRAITS friend auto operator==(basic_string_view lhs, + basic_string_view rhs) + -> bool { + return lhs.compare(rhs) == 0; + } + friend auto operator!=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) != 0; + } + friend auto operator<(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) < 0; + } + friend auto operator<=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) <= 0; + } + friend auto operator>(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) > 0; + } + friend auto operator>=(basic_string_view lhs, basic_string_view rhs) -> bool { + return lhs.compare(rhs) >= 0; + } +}; + +using string_view = basic_string_view; + +/** Specifies if ``T`` is a character type. Can be specialized by users. */ +template struct is_char : std::false_type {}; +template <> struct is_char : std::true_type {}; + +// Returns a string view of `s`. +template ::value)> +FMT_INLINE auto to_string_view(const Char* s) -> basic_string_view { + return s; +} +template +inline auto to_string_view(const std::basic_string& s) + -> basic_string_view { + return s; +} +template +constexpr auto to_string_view(basic_string_view s) + -> basic_string_view { + return s; +} +template >::value)> +inline auto to_string_view(detail::std_string_view s) + -> basic_string_view { + return s; +} + +// A base class for compile-time strings. It is defined in the fmt namespace to +// make formatting functions visible via ADL, e.g. format(FMT_STRING("{}"), 42). +struct compile_string {}; + +template +struct is_compile_string : std::is_base_of {}; + +template ::value)> +constexpr auto to_string_view(const S& s) + -> basic_string_view { + return basic_string_view(s); +} + +FMT_BEGIN_DETAIL_NAMESPACE + +void to_string_view(...); +using fmt::to_string_view; + +// Specifies whether S is a string type convertible to fmt::basic_string_view. +// It should be a constexpr function but MSVC 2017 fails to compile it in +// enable_if and MSVC 2015 fails to compile it as an alias template. +template +struct is_string : std::is_class()))> { +}; + +template struct char_t_impl {}; +template struct char_t_impl::value>> { + using result = decltype(to_string_view(std::declval())); + using type = typename result::value_type; +}; + +// Reports a compile-time error if S is not a valid format string. +template ::value)> +FMT_INLINE void check_format_string(const S&) { +#ifdef FMT_ENFORCE_COMPILE_STRING + static_assert(is_compile_string::value, + "FMT_ENFORCE_COMPILE_STRING requires all format strings to use " + "FMT_STRING."); +#endif +} +template ::value)> +void check_format_string(S); + +FMT_NORETURN FMT_API void throw_format_error(const char* message); + +struct error_handler { + constexpr error_handler() = default; + constexpr error_handler(const error_handler&) = default; + + // This function is intentionally not constexpr to give a compile-time error. + FMT_NORETURN FMT_API void on_error(const char* message); +}; +FMT_END_DETAIL_NAMESPACE + +/** String's character type. */ +template using char_t = typename detail::char_t_impl::type; + +/** + \rst + Parsing context consisting of a format string range being parsed and an + argument counter for automatic indexing. + You can use the ``format_parse_context`` type alias for ``char`` instead. + \endrst + */ +template +class basic_format_parse_context : private ErrorHandler { + private: + basic_string_view format_str_; + int next_arg_id_; + + public: + using char_type = Char; + using iterator = typename basic_string_view::iterator; + + explicit constexpr basic_format_parse_context( + basic_string_view format_str, ErrorHandler eh = {}, + int next_arg_id = 0) + : ErrorHandler(eh), format_str_(format_str), next_arg_id_(next_arg_id) {} + + /** + Returns an iterator to the beginning of the format string range being + parsed. + */ + constexpr auto begin() const noexcept -> iterator { + return format_str_.begin(); + } + + /** + Returns an iterator past the end of the format string range being parsed. + */ + constexpr auto end() const noexcept -> iterator { return format_str_.end(); } + + /** Advances the begin iterator to ``it``. */ + FMT_CONSTEXPR void advance_to(iterator it) { + format_str_.remove_prefix(detail::to_unsigned(it - begin())); + } + + /** + Reports an error if using the manual argument indexing; otherwise returns + the next argument index and switches to the automatic indexing. + */ + FMT_CONSTEXPR auto next_arg_id() -> int { + // Don't check if the argument id is valid to avoid overhead and because it + // will be checked during formatting anyway. + if (next_arg_id_ >= 0) return next_arg_id_++; + on_error("cannot switch from manual to automatic argument indexing"); + return 0; + } + + /** + Reports an error if using the automatic argument indexing; otherwise + switches to the manual indexing. + */ + FMT_CONSTEXPR void check_arg_id(int) { + if (next_arg_id_ > 0) + on_error("cannot switch from automatic to manual argument indexing"); + else + next_arg_id_ = -1; + } + + FMT_CONSTEXPR void check_arg_id(basic_string_view) {} + + FMT_CONSTEXPR void on_error(const char* message) { + ErrorHandler::on_error(message); + } + + constexpr auto error_handler() const -> ErrorHandler { return *this; } +}; + +using format_parse_context = basic_format_parse_context; + +template class basic_format_arg; +template class basic_format_args; +template class dynamic_format_arg_store; + +// A formatter for objects of type T. +template +struct formatter { + // A deleted default constructor indicates a disabled formatter. + formatter() = delete; +}; + +// Specifies if T has an enabled formatter specialization. A type can be +// formattable even if it doesn't have a formatter e.g. via a conversion. +template +using has_formatter = + std::is_constructible>; + +// Checks whether T is a container with contiguous storage. +template struct is_contiguous : std::false_type {}; +template +struct is_contiguous> : std::true_type {}; + +class appender; + +FMT_BEGIN_DETAIL_NAMESPACE + +template +constexpr auto has_const_formatter_impl(T*) + -> decltype(typename Context::template formatter_type().format( + std::declval(), std::declval()), + true) { + return true; +} +template +constexpr auto has_const_formatter_impl(...) -> bool { + return false; +} +template +constexpr auto has_const_formatter() -> bool { + return has_const_formatter_impl(static_cast(nullptr)); +} + +// Extracts a reference to the container from back_insert_iterator. +template +inline auto get_container(std::back_insert_iterator it) + -> Container& { + using bi_iterator = std::back_insert_iterator; + struct accessor : bi_iterator { + accessor(bi_iterator iter) : bi_iterator(iter) {} + using bi_iterator::container; + }; + return *accessor(it).container; +} + +template +FMT_CONSTEXPR auto copy_str(InputIt begin, InputIt end, OutputIt out) + -> OutputIt { + while (begin != end) *out++ = static_cast(*begin++); + return out; +} + +template , U>::value&& is_char::value)> +FMT_CONSTEXPR auto copy_str(T* begin, T* end, U* out) -> U* { + if (is_constant_evaluated()) return copy_str(begin, end, out); + auto size = to_unsigned(end - begin); + memcpy(out, begin, size * sizeof(U)); + return out + size; +} + +/** + \rst + A contiguous memory buffer with an optional growing ability. It is an internal + class and shouldn't be used directly, only via `~fmt::basic_memory_buffer`. + \endrst + */ +template class buffer { + private: + T* ptr_; + size_t size_; + size_t capacity_; + + protected: + // Don't initialize ptr_ since it is not accessed to save a few cycles. + FMT_MSC_WARNING(suppress : 26495) + buffer(size_t sz) noexcept : size_(sz), capacity_(sz) {} + + FMT_CONSTEXPR20 buffer(T* p = nullptr, size_t sz = 0, size_t cap = 0) noexcept + : ptr_(p), size_(sz), capacity_(cap) {} + + FMT_CONSTEXPR20 ~buffer() = default; + buffer(buffer&&) = default; + + /** Sets the buffer data and capacity. */ + FMT_CONSTEXPR void set(T* buf_data, size_t buf_capacity) noexcept { + ptr_ = buf_data; + capacity_ = buf_capacity; + } + + /** Increases the buffer capacity to hold at least *capacity* elements. */ + virtual FMT_CONSTEXPR20 void grow(size_t capacity) = 0; + + public: + using value_type = T; + using const_reference = const T&; + + buffer(const buffer&) = delete; + void operator=(const buffer&) = delete; + + auto begin() noexcept -> T* { return ptr_; } + auto end() noexcept -> T* { return ptr_ + size_; } + + auto begin() const noexcept -> const T* { return ptr_; } + auto end() const noexcept -> const T* { return ptr_ + size_; } + + /** Returns the size of this buffer. */ + constexpr auto size() const noexcept -> size_t { return size_; } + + /** Returns the capacity of this buffer. */ + constexpr auto capacity() const noexcept -> size_t { return capacity_; } + + /** Returns a pointer to the buffer data. */ + FMT_CONSTEXPR auto data() noexcept -> T* { return ptr_; } + + /** Returns a pointer to the buffer data. */ + FMT_CONSTEXPR auto data() const noexcept -> const T* { return ptr_; } + + /** Clears this buffer. */ + void clear() { size_ = 0; } + + // Tries resizing the buffer to contain *count* elements. If T is a POD type + // the new elements may not be initialized. + FMT_CONSTEXPR20 void try_resize(size_t count) { + try_reserve(count); + size_ = count <= capacity_ ? count : capacity_; + } + + // Tries increasing the buffer capacity to *new_capacity*. It can increase the + // capacity by a smaller amount than requested but guarantees there is space + // for at least one additional element either by increasing the capacity or by + // flushing the buffer if it is full. + FMT_CONSTEXPR20 void try_reserve(size_t new_capacity) { + if (new_capacity > capacity_) grow(new_capacity); + } + + FMT_CONSTEXPR20 void push_back(const T& value) { + try_reserve(size_ + 1); + ptr_[size_++] = value; + } + + /** Appends data to the end of the buffer. */ + template void append(const U* begin, const U* end); + + template FMT_CONSTEXPR auto operator[](I index) -> T& { + return ptr_[index]; + } + template + FMT_CONSTEXPR auto operator[](I index) const -> const T& { + return ptr_[index]; + } +}; + +struct buffer_traits { + explicit buffer_traits(size_t) {} + auto count() const -> size_t { return 0; } + auto limit(size_t size) -> size_t { return size; } +}; + +class fixed_buffer_traits { + private: + size_t count_ = 0; + size_t limit_; + + public: + explicit fixed_buffer_traits(size_t limit) : limit_(limit) {} + auto count() const -> size_t { return count_; } + auto limit(size_t size) -> size_t { + size_t n = limit_ > count_ ? limit_ - count_ : 0; + count_ += size; + return size < n ? size : n; + } +}; + +// A buffer that writes to an output iterator when flushed. +template +class iterator_buffer final : public Traits, public buffer { + private: + OutputIt out_; + enum { buffer_size = 256 }; + T data_[buffer_size]; + + protected: + FMT_CONSTEXPR20 void grow(size_t) override { + if (this->size() == buffer_size) flush(); + } + + void flush() { + auto size = this->size(); + this->clear(); + out_ = copy_str(data_, data_ + this->limit(size), out_); + } + + public: + explicit iterator_buffer(OutputIt out, size_t n = buffer_size) + : Traits(n), buffer(data_, 0, buffer_size), out_(out) {} + iterator_buffer(iterator_buffer&& other) + : Traits(other), buffer(data_, 0, buffer_size), out_(other.out_) {} + ~iterator_buffer() { flush(); } + + auto out() -> OutputIt { + flush(); + return out_; + } + auto count() const -> size_t { return Traits::count() + this->size(); } +}; + +template +class iterator_buffer final + : public fixed_buffer_traits, + public buffer { + private: + T* out_; + enum { buffer_size = 256 }; + T data_[buffer_size]; + + protected: + FMT_CONSTEXPR20 void grow(size_t) override { + if (this->size() == this->capacity()) flush(); + } + + void flush() { + size_t n = this->limit(this->size()); + if (this->data() == out_) { + out_ += n; + this->set(data_, buffer_size); + } + this->clear(); + } + + public: + explicit iterator_buffer(T* out, size_t n = buffer_size) + : fixed_buffer_traits(n), buffer(out, 0, n), out_(out) {} + iterator_buffer(iterator_buffer&& other) + : fixed_buffer_traits(other), + buffer(std::move(other)), + out_(other.out_) { + if (this->data() != out_) { + this->set(data_, buffer_size); + this->clear(); + } + } + ~iterator_buffer() { flush(); } + + auto out() -> T* { + flush(); + return out_; + } + auto count() const -> size_t { + return fixed_buffer_traits::count() + this->size(); + } +}; + +template class iterator_buffer final : public buffer { + protected: + FMT_CONSTEXPR20 void grow(size_t) override {} + + public: + explicit iterator_buffer(T* out, size_t = 0) : buffer(out, 0, ~size_t()) {} + + auto out() -> T* { return &*this->end(); } +}; + +// A buffer that writes to a container with the contiguous storage. +template +class iterator_buffer, + enable_if_t::value, + typename Container::value_type>> + final : public buffer { + private: + Container& container_; + + protected: + FMT_CONSTEXPR20 void grow(size_t capacity) override { + container_.resize(capacity); + this->set(&container_[0], capacity); + } + + public: + explicit iterator_buffer(Container& c) + : buffer(c.size()), container_(c) {} + explicit iterator_buffer(std::back_insert_iterator out, size_t = 0) + : iterator_buffer(get_container(out)) {} + auto out() -> std::back_insert_iterator { + return std::back_inserter(container_); + } +}; + +// A buffer that counts the number of code units written discarding the output. +template class counting_buffer final : public buffer { + private: + enum { buffer_size = 256 }; + T data_[buffer_size]; + size_t count_ = 0; + + protected: + FMT_CONSTEXPR20 void grow(size_t) override { + if (this->size() != buffer_size) return; + count_ += this->size(); + this->clear(); + } + + public: + counting_buffer() : buffer(data_, 0, buffer_size) {} + + auto count() -> size_t { return count_ + this->size(); } +}; + +template +using buffer_appender = conditional_t::value, appender, + std::back_insert_iterator>>; + +// Maps an output iterator to a buffer. +template +auto get_buffer(OutputIt out) -> iterator_buffer { + return iterator_buffer(out); +} + +template +auto get_iterator(Buffer& buf) -> decltype(buf.out()) { + return buf.out(); +} +template auto get_iterator(buffer& buf) -> buffer_appender { + return buffer_appender(buf); +} + +template +struct fallback_formatter { + fallback_formatter() = delete; +}; + +// Specifies if T has an enabled fallback_formatter specialization. +template +using has_fallback_formatter = +#ifdef FMT_DEPRECATED_OSTREAM + std::is_constructible>; +#else + std::false_type; +#endif + +struct view {}; + +template struct named_arg : view { + const Char* name; + const T& value; + named_arg(const Char* n, const T& v) : name(n), value(v) {} +}; + +template struct named_arg_info { + const Char* name; + int id; +}; + +template +struct arg_data { + // args_[0].named_args points to named_args_ to avoid bloating format_args. + // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. + T args_[1 + (NUM_ARGS != 0 ? NUM_ARGS : +1)]; + named_arg_info named_args_[NUM_NAMED_ARGS]; + + template + arg_data(const U&... init) : args_{T(named_args_, NUM_NAMED_ARGS), init...} {} + arg_data(const arg_data& other) = delete; + auto args() const -> const T* { return args_ + 1; } + auto named_args() -> named_arg_info* { return named_args_; } +}; + +template +struct arg_data { + // +1 to workaround a bug in gcc 7.5 that causes duplicated-branches warning. + T args_[NUM_ARGS != 0 ? NUM_ARGS : +1]; + + template + FMT_CONSTEXPR FMT_INLINE arg_data(const U&... init) : args_{init...} {} + FMT_CONSTEXPR FMT_INLINE auto args() const -> const T* { return args_; } + FMT_CONSTEXPR FMT_INLINE auto named_args() -> std::nullptr_t { + return nullptr; + } +}; + +template +inline void init_named_args(named_arg_info*, int, int) {} + +template struct is_named_arg : std::false_type {}; +template struct is_statically_named_arg : std::false_type {}; + +template +struct is_named_arg> : std::true_type {}; + +template ::value)> +void init_named_args(named_arg_info* named_args, int arg_count, + int named_arg_count, const T&, const Tail&... args) { + init_named_args(named_args, arg_count + 1, named_arg_count, args...); +} + +template ::value)> +void init_named_args(named_arg_info* named_args, int arg_count, + int named_arg_count, const T& arg, const Tail&... args) { + named_args[named_arg_count++] = {arg.name, arg_count}; + init_named_args(named_args, arg_count + 1, named_arg_count, args...); +} + +template +FMT_CONSTEXPR FMT_INLINE void init_named_args(std::nullptr_t, int, int, + const Args&...) {} + +template constexpr auto count() -> size_t { return B ? 1 : 0; } +template constexpr auto count() -> size_t { + return (B1 ? 1 : 0) + count(); +} + +template constexpr auto count_named_args() -> size_t { + return count::value...>(); +} + +template +constexpr auto count_statically_named_args() -> size_t { + return count::value...>(); +} + +enum class type { + none_type, + // Integer types should go first, + int_type, + uint_type, + long_long_type, + ulong_long_type, + int128_type, + uint128_type, + bool_type, + char_type, + last_integer_type = char_type, + // followed by floating-point types. + float_type, + double_type, + long_double_type, + last_numeric_type = long_double_type, + cstring_type, + string_type, + pointer_type, + custom_type +}; + +// Maps core type T to the corresponding type enum constant. +template +struct type_constant : std::integral_constant {}; + +#define FMT_TYPE_CONSTANT(Type, constant) \ + template \ + struct type_constant \ + : std::integral_constant {} + +FMT_TYPE_CONSTANT(int, int_type); +FMT_TYPE_CONSTANT(unsigned, uint_type); +FMT_TYPE_CONSTANT(long long, long_long_type); +FMT_TYPE_CONSTANT(unsigned long long, ulong_long_type); +FMT_TYPE_CONSTANT(int128_opt, int128_type); +FMT_TYPE_CONSTANT(uint128_opt, uint128_type); +FMT_TYPE_CONSTANT(bool, bool_type); +FMT_TYPE_CONSTANT(Char, char_type); +FMT_TYPE_CONSTANT(float, float_type); +FMT_TYPE_CONSTANT(double, double_type); +FMT_TYPE_CONSTANT(long double, long_double_type); +FMT_TYPE_CONSTANT(const Char*, cstring_type); +FMT_TYPE_CONSTANT(basic_string_view, string_type); +FMT_TYPE_CONSTANT(const void*, pointer_type); + +constexpr bool is_integral_type(type t) { + return t > type::none_type && t <= type::last_integer_type; +} + +constexpr bool is_arithmetic_type(type t) { + return t > type::none_type && t <= type::last_numeric_type; +} + +struct unformattable {}; +struct unformattable_char : unformattable {}; +struct unformattable_const : unformattable {}; +struct unformattable_pointer : unformattable {}; + +template struct string_value { + const Char* data; + size_t size; +}; + +template struct named_arg_value { + const named_arg_info* data; + size_t size; +}; + +template struct custom_value { + using parse_context = typename Context::parse_context_type; + void* value; + void (*format)(void* arg, parse_context& parse_ctx, Context& ctx); +}; + +// A formatting argument value. +template class value { + public: + using char_type = typename Context::char_type; + + union { + monostate no_value; + int int_value; + unsigned uint_value; + long long long_long_value; + unsigned long long ulong_long_value; + int128_opt int128_value; + uint128_opt uint128_value; + bool bool_value; + char_type char_value; + float float_value; + double double_value; + long double long_double_value; + const void* pointer; + string_value string; + custom_value custom; + named_arg_value named_args; + }; + + constexpr FMT_INLINE value() : no_value() {} + constexpr FMT_INLINE value(int val) : int_value(val) {} + constexpr FMT_INLINE value(unsigned val) : uint_value(val) {} + constexpr FMT_INLINE value(long long val) : long_long_value(val) {} + constexpr FMT_INLINE value(unsigned long long val) : ulong_long_value(val) {} + FMT_INLINE value(int128_opt val) : int128_value(val) {} + FMT_INLINE value(uint128_opt val) : uint128_value(val) {} + constexpr FMT_INLINE value(float val) : float_value(val) {} + constexpr FMT_INLINE value(double val) : double_value(val) {} + FMT_INLINE value(long double val) : long_double_value(val) {} + constexpr FMT_INLINE value(bool val) : bool_value(val) {} + constexpr FMT_INLINE value(char_type val) : char_value(val) {} + FMT_CONSTEXPR FMT_INLINE value(const char_type* val) { + string.data = val; + if (is_constant_evaluated()) string.size = {}; + } + FMT_CONSTEXPR FMT_INLINE value(basic_string_view val) { + string.data = val.data(); + string.size = val.size(); + } + FMT_INLINE value(const void* val) : pointer(val) {} + FMT_INLINE value(const named_arg_info* args, size_t size) + : named_args{args, size} {} + + template FMT_CONSTEXPR FMT_INLINE value(T& val) { + using value_type = remove_cvref_t; + custom.value = const_cast(&val); + // Get the formatter type through the context to allow different contexts + // have different extension points, e.g. `formatter` for `format` and + // `printf_formatter` for `printf`. + custom.format = format_custom_arg< + value_type, + conditional_t::value, + typename Context::template formatter_type, + fallback_formatter>>; + } + value(unformattable); + value(unformattable_char); + value(unformattable_const); + value(unformattable_pointer); + + private: + // Formats an argument of a custom type, such as a user-defined class. + template + static void format_custom_arg(void* arg, + typename Context::parse_context_type& parse_ctx, + Context& ctx) { + auto f = Formatter(); + parse_ctx.advance_to(f.parse(parse_ctx)); + using qualified_type = + conditional_t(), const T, T>; + ctx.advance_to(f.format(*static_cast(arg), ctx)); + } +}; + +template +FMT_CONSTEXPR auto make_arg(T&& value) -> basic_format_arg; + +// To minimize the number of types we need to deal with, long is translated +// either to int or to long long depending on its size. +enum { long_short = sizeof(long) == sizeof(int) }; +using long_type = conditional_t; +using ulong_type = conditional_t; + +#ifdef __cpp_lib_byte +inline auto format_as(std::byte b) -> unsigned char { + return static_cast(b); +} +#endif + +template struct has_format_as { + template ::value&& std::is_integral::value)> + static auto check(U*) -> std::true_type; + static auto check(...) -> std::false_type; + + enum { value = decltype(check(static_cast(nullptr)))::value }; +}; + +// Maps formatting arguments to core types. +// arg_mapper reports errors by returning unformattable instead of using +// static_assert because it's used in the is_formattable trait. +template struct arg_mapper { + using char_type = typename Context::char_type; + + FMT_CONSTEXPR FMT_INLINE auto map(signed char val) -> int { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned char val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(short val) -> int { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned short val) -> unsigned { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(int val) -> int { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned val) -> unsigned { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(long val) -> long_type { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned long val) -> ulong_type { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(long long val) -> long long { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(unsigned long long val) + -> unsigned long long { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(int128_opt val) -> int128_opt { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(uint128_opt val) -> uint128_opt { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(bool val) -> bool { return val; } + + template ::value || + std::is_same::value)> + FMT_CONSTEXPR FMT_INLINE auto map(T val) -> char_type { + return val; + } + template ::value || +#ifdef __cpp_char8_t + std::is_same::value || +#endif + std::is_same::value || + std::is_same::value) && + !std::is_same::value, + int> = 0> + FMT_CONSTEXPR FMT_INLINE auto map(T) -> unformattable_char { + return {}; + } + + FMT_CONSTEXPR FMT_INLINE auto map(float val) -> float { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(double val) -> double { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(long double val) -> long double { + return val; + } + + FMT_CONSTEXPR FMT_INLINE auto map(char_type* val) -> const char_type* { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(const char_type* val) -> const char_type* { + return val; + } + template ::value && !std::is_pointer::value && + std::is_same>::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> basic_string_view { + return to_string_view(val); + } + template ::value && !std::is_pointer::value && + !std::is_same>::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T&) -> unformattable_char { + return {}; + } + template >::value && + !is_string::value && !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> basic_string_view { + return basic_string_view(val); + } + template >::value && + !std::is_convertible>::value && + !is_string::value && !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> basic_string_view { + return std_string_view(val); + } + + using cstring_result = conditional_t::value, + const char*, unformattable_pointer>; + + FMT_DEPRECATED FMT_CONSTEXPR FMT_INLINE auto map(const signed char* val) + -> cstring_result { + return map(reinterpret_cast(val)); + } + FMT_DEPRECATED FMT_CONSTEXPR FMT_INLINE auto map(const unsigned char* val) + -> cstring_result { + return map(reinterpret_cast(val)); + } + FMT_DEPRECATED FMT_CONSTEXPR FMT_INLINE auto map(signed char* val) + -> cstring_result { + return map(reinterpret_cast(val)); + } + FMT_DEPRECATED FMT_CONSTEXPR FMT_INLINE auto map(unsigned char* val) + -> cstring_result { + return map(reinterpret_cast(val)); + } + + FMT_CONSTEXPR FMT_INLINE auto map(void* val) -> const void* { return val; } + FMT_CONSTEXPR FMT_INLINE auto map(const void* val) -> const void* { + return val; + } + FMT_CONSTEXPR FMT_INLINE auto map(std::nullptr_t val) -> const void* { + return val; + } + + // We use SFINAE instead of a const T* parameter to avoid conflicting with + // the C array overload. + template < + typename T, + FMT_ENABLE_IF( + std::is_pointer::value || std::is_member_pointer::value || + std::is_function::type>::value || + (std::is_convertible::value && + !std::is_convertible::value && + !has_formatter::value))> + FMT_CONSTEXPR auto map(const T&) -> unformattable_pointer { + return {}; + } + + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T (&values)[N]) -> const T (&)[N] { + return values; + } + + template ::value&& std::is_convertible::value && + !has_formatter::value && + !has_fallback_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> decltype(std::declval().map( + static_cast>(val))) { + return map(static_cast>(val)); + } + + template ::value && + !has_formatter::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& val) + -> decltype(std::declval().map(format_as(T()))) { + return map(format_as(val)); + } + + template > + struct formattable + : bool_constant() || + !std::is_const>::value || + has_fallback_formatter::value> {}; + +#if (FMT_MSC_VER != 0 && FMT_MSC_VER < 1910) || FMT_ICC_VERSION != 0 || \ + FMT_NVCC != 0 + // Workaround a bug in MSVC and Intel (Issue 2746). + template FMT_CONSTEXPR FMT_INLINE auto do_map(T&& val) -> T& { + return val; + } +#else + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto do_map(T&& val) -> T& { + return val; + } + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto do_map(T&&) -> unformattable_const { + return {}; + } +#endif + + template , + FMT_ENABLE_IF(!is_string::value && !is_char::value && + !std::is_array::value && + !std::is_pointer::value && + !has_format_as::value && + (has_formatter::value || + has_fallback_formatter::value))> + FMT_CONSTEXPR FMT_INLINE auto map(T&& val) + -> decltype(this->do_map(std::forward(val))) { + return do_map(std::forward(val)); + } + + template ::value)> + FMT_CONSTEXPR FMT_INLINE auto map(const T& named_arg) + -> decltype(std::declval().map(named_arg.value)) { + return map(named_arg.value); + } + + auto map(...) -> unformattable { return {}; } +}; + +// A type constant after applying arg_mapper. +template +using mapped_type_constant = + type_constant().map(std::declval())), + typename Context::char_type>; + +enum { packed_arg_bits = 4 }; +// Maximum number of arguments with packed types. +enum { max_packed_args = 62 / packed_arg_bits }; +enum : unsigned long long { is_unpacked_bit = 1ULL << 63 }; +enum : unsigned long long { has_named_args_bit = 1ULL << 62 }; + +FMT_END_DETAIL_NAMESPACE + +// An output iterator that appends to a buffer. +// It is used to reduce symbol sizes for the common case. +class appender : public std::back_insert_iterator> { + using base = std::back_insert_iterator>; + + template + friend auto get_buffer(appender out) -> detail::buffer& { + return detail::get_container(out); + } + + public: + using std::back_insert_iterator>::back_insert_iterator; + appender(base it) noexcept : base(it) {} + FMT_UNCHECKED_ITERATOR(appender); + + auto operator++() noexcept -> appender& { return *this; } + + auto operator++(int) noexcept -> appender { return *this; } +}; + +// A formatting argument. It is a trivially copyable/constructible type to +// allow storage in basic_memory_buffer. +template class basic_format_arg { + private: + detail::value value_; + detail::type type_; + + template + friend FMT_CONSTEXPR auto detail::make_arg(T&& value) + -> basic_format_arg; + + template + friend FMT_CONSTEXPR auto visit_format_arg(Visitor&& vis, + const basic_format_arg& arg) + -> decltype(vis(0)); + + friend class basic_format_args; + friend class dynamic_format_arg_store; + + using char_type = typename Context::char_type; + + template + friend struct detail::arg_data; + + basic_format_arg(const detail::named_arg_info* args, size_t size) + : value_(args, size) {} + + public: + class handle { + public: + explicit handle(detail::custom_value custom) : custom_(custom) {} + + void format(typename Context::parse_context_type& parse_ctx, + Context& ctx) const { + custom_.format(custom_.value, parse_ctx, ctx); + } + + private: + detail::custom_value custom_; + }; + + constexpr basic_format_arg() : type_(detail::type::none_type) {} + + constexpr explicit operator bool() const noexcept { + return type_ != detail::type::none_type; + } + + auto type() const -> detail::type { return type_; } + + auto is_integral() const -> bool { return detail::is_integral_type(type_); } + auto is_arithmetic() const -> bool { + return detail::is_arithmetic_type(type_); + } +}; + +/** + \rst + Visits an argument dispatching to the appropriate visit method based on + the argument type. For example, if the argument type is ``double`` then + ``vis(value)`` will be called with the value of type ``double``. + \endrst + */ +template +FMT_CONSTEXPR FMT_INLINE auto visit_format_arg( + Visitor&& vis, const basic_format_arg& arg) -> decltype(vis(0)) { + switch (arg.type_) { + case detail::type::none_type: + break; + case detail::type::int_type: + return vis(arg.value_.int_value); + case detail::type::uint_type: + return vis(arg.value_.uint_value); + case detail::type::long_long_type: + return vis(arg.value_.long_long_value); + case detail::type::ulong_long_type: + return vis(arg.value_.ulong_long_value); + case detail::type::int128_type: + return vis(detail::convert_for_visit(arg.value_.int128_value)); + case detail::type::uint128_type: + return vis(detail::convert_for_visit(arg.value_.uint128_value)); + case detail::type::bool_type: + return vis(arg.value_.bool_value); + case detail::type::char_type: + return vis(arg.value_.char_value); + case detail::type::float_type: + return vis(arg.value_.float_value); + case detail::type::double_type: + return vis(arg.value_.double_value); + case detail::type::long_double_type: + return vis(arg.value_.long_double_value); + case detail::type::cstring_type: + return vis(arg.value_.string.data); + case detail::type::string_type: + using sv = basic_string_view; + return vis(sv(arg.value_.string.data, arg.value_.string.size)); + case detail::type::pointer_type: + return vis(arg.value_.pointer); + case detail::type::custom_type: + return vis(typename basic_format_arg::handle(arg.value_.custom)); + } + return vis(monostate()); +} + +FMT_BEGIN_DETAIL_NAMESPACE + +template +auto copy_str(InputIt begin, InputIt end, appender out) -> appender { + get_container(out).append(begin, end); + return out; +} + +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 500 +// A workaround for gcc 4.8 to make void_t work in a SFINAE context. +template struct void_t_impl { using type = void; }; +template +using void_t = typename detail::void_t_impl::type; +#else +template using void_t = void; +#endif + +template +struct is_output_iterator : std::false_type {}; + +template +struct is_output_iterator< + It, T, + void_t::iterator_category, + decltype(*std::declval() = std::declval())>> + : std::true_type {}; + +template +struct is_back_insert_iterator : std::false_type {}; +template +struct is_back_insert_iterator> + : std::true_type {}; + +template +struct is_contiguous_back_insert_iterator : std::false_type {}; +template +struct is_contiguous_back_insert_iterator> + : is_contiguous {}; +template <> +struct is_contiguous_back_insert_iterator : std::true_type {}; + +// A type-erased reference to an std::locale to avoid heavy include. +class locale_ref { + private: + const void* locale_; // A type-erased pointer to std::locale. + + public: + constexpr locale_ref() : locale_(nullptr) {} + template explicit locale_ref(const Locale& loc); + + explicit operator bool() const noexcept { return locale_ != nullptr; } + + template auto get() const -> Locale; +}; + +template constexpr auto encode_types() -> unsigned long long { + return 0; +} + +template +constexpr auto encode_types() -> unsigned long long { + return static_cast(mapped_type_constant::value) | + (encode_types() << packed_arg_bits); +} + +template +FMT_CONSTEXPR FMT_INLINE auto make_value(T&& val) -> value { + const auto& arg = arg_mapper().map(std::forward(val)); + + constexpr bool formattable_char = + !std::is_same::value; + static_assert(formattable_char, "Mixing character types is disallowed."); + + constexpr bool formattable_const = + !std::is_same::value; + static_assert(formattable_const, "Cannot format a const argument."); + + // Formatting of arbitrary pointers is disallowed. If you want to output + // a pointer cast it to "void *" or "const void *". In particular, this + // forbids formatting of "[const] volatile char *" which is printed as bool + // by iostreams. + constexpr bool formattable_pointer = + !std::is_same::value; + static_assert(formattable_pointer, + "Formatting of non-void pointers is disallowed."); + + constexpr bool formattable = + !std::is_same::value; + static_assert( + formattable, + "Cannot format an argument. To make type T formattable provide a " + "formatter specialization: https://fmt.dev/latest/api.html#udt"); + return {arg}; +} + +template +FMT_CONSTEXPR auto make_arg(T&& value) -> basic_format_arg { + basic_format_arg arg; + arg.type_ = mapped_type_constant::value; + arg.value_ = make_value(value); + return arg; +} + +// The type template parameter is there to avoid an ODR violation when using +// a fallback formatter in one translation unit and an implicit conversion in +// another (not recommended). +template +FMT_CONSTEXPR FMT_INLINE auto make_arg(T&& val) -> value { + return make_value(val); +} + +template +FMT_CONSTEXPR inline auto make_arg(T&& value) -> basic_format_arg { + return make_arg(value); +} +FMT_END_DETAIL_NAMESPACE + +// Formatting context. +template class basic_format_context { + public: + /** The character type for the output. */ + using char_type = Char; + + private: + OutputIt out_; + basic_format_args args_; + detail::locale_ref loc_; + + public: + using iterator = OutputIt; + using format_arg = basic_format_arg; + using parse_context_type = basic_format_parse_context; + template using formatter_type = formatter; + + basic_format_context(basic_format_context&&) = default; + basic_format_context(const basic_format_context&) = delete; + void operator=(const basic_format_context&) = delete; + /** + Constructs a ``basic_format_context`` object. References to the arguments are + stored in the object so make sure they have appropriate lifetimes. + */ + constexpr basic_format_context( + OutputIt out, basic_format_args ctx_args, + detail::locale_ref loc = detail::locale_ref()) + : out_(out), args_(ctx_args), loc_(loc) {} + + constexpr auto arg(int id) const -> format_arg { return args_.get(id); } + FMT_CONSTEXPR auto arg(basic_string_view name) -> format_arg { + return args_.get(name); + } + FMT_CONSTEXPR auto arg_id(basic_string_view name) -> int { + return args_.get_id(name); + } + auto args() const -> const basic_format_args& { + return args_; + } + + FMT_CONSTEXPR auto error_handler() -> detail::error_handler { return {}; } + void on_error(const char* message) { error_handler().on_error(message); } + + // Returns an iterator to the beginning of the output range. + FMT_CONSTEXPR auto out() -> iterator { return out_; } + + // Advances the begin iterator to ``it``. + void advance_to(iterator it) { + if (!detail::is_back_insert_iterator()) out_ = it; + } + + FMT_CONSTEXPR auto locale() -> detail::locale_ref { return loc_; } +}; + +template +using buffer_context = + basic_format_context, Char>; +using format_context = buffer_context; + +// Workaround an alias issue: https://stackoverflow.com/q/62767544/471164. +#define FMT_BUFFER_CONTEXT(Char) \ + basic_format_context, Char> + +template +using is_formattable = bool_constant< + !std::is_base_of>().map( + std::declval()))>::value && + !detail::has_fallback_formatter::value>; + +/** + \rst + An array of references to arguments. It can be implicitly converted into + `~fmt::basic_format_args` for passing into type-erased formatting functions + such as `~fmt::vformat`. + \endrst + */ +template +class format_arg_store +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 + // Workaround a GCC template argument substitution bug. + : public basic_format_args +#endif +{ + private: + static const size_t num_args = sizeof...(Args); + static const size_t num_named_args = detail::count_named_args(); + static const bool is_packed = num_args <= detail::max_packed_args; + + using value_type = conditional_t, + basic_format_arg>; + + detail::arg_data + data_; + + friend class basic_format_args; + + static constexpr unsigned long long desc = + (is_packed ? detail::encode_types() + : detail::is_unpacked_bit | num_args) | + (num_named_args != 0 + ? static_cast(detail::has_named_args_bit) + : 0); + + public: + template + FMT_CONSTEXPR FMT_INLINE format_arg_store(T&&... args) + : +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 + basic_format_args(*this), +#endif + data_{detail::make_arg< + is_packed, Context, + detail::mapped_type_constant, Context>::value>( + std::forward(args))...} { + detail::init_named_args(data_.named_args(), 0, 0, args...); + } +}; + +/** + \rst + Constructs a `~fmt::format_arg_store` object that contains references to + arguments and can be implicitly converted to `~fmt::format_args`. `Context` + can be omitted in which case it defaults to `~fmt::context`. + See `~fmt::arg` for lifetime considerations. + \endrst + */ +template +constexpr auto make_format_args(Args&&... args) + -> format_arg_store...> { + return {std::forward(args)...}; +} + +/** + \rst + Returns a named argument to be used in a formatting function. + It should only be used in a call to a formatting function or + `dynamic_format_arg_store::push_back`. + + **Example**:: + + fmt::print("Elapsed time: {s:.2f} seconds", fmt::arg("s", 1.23)); + \endrst + */ +template +inline auto arg(const Char* name, const T& arg) -> detail::named_arg { + static_assert(!detail::is_named_arg(), "nested named arguments"); + return {name, arg}; +} + +/** + \rst + A view of a collection of formatting arguments. To avoid lifetime issues it + should only be used as a parameter type in type-erased functions such as + ``vformat``:: + + void vlog(string_view format_str, format_args args); // OK + format_args args = make_format_args(42); // Error: dangling reference + \endrst + */ +template class basic_format_args { + public: + using size_type = int; + using format_arg = basic_format_arg; + + private: + // A descriptor that contains information about formatting arguments. + // If the number of arguments is less or equal to max_packed_args then + // argument types are passed in the descriptor. This reduces binary code size + // per formatting function call. + unsigned long long desc_; + union { + // If is_packed() returns true then argument values are stored in values_; + // otherwise they are stored in args_. This is done to improve cache + // locality and reduce compiled code size since storing larger objects + // may require more code (at least on x86-64) even if the same amount of + // data is actually copied to stack. It saves ~10% on the bloat test. + const detail::value* values_; + const format_arg* args_; + }; + + constexpr auto is_packed() const -> bool { + return (desc_ & detail::is_unpacked_bit) == 0; + } + auto has_named_args() const -> bool { + return (desc_ & detail::has_named_args_bit) != 0; + } + + FMT_CONSTEXPR auto type(int index) const -> detail::type { + int shift = index * detail::packed_arg_bits; + unsigned int mask = (1 << detail::packed_arg_bits) - 1; + return static_cast((desc_ >> shift) & mask); + } + + constexpr FMT_INLINE basic_format_args(unsigned long long desc, + const detail::value* values) + : desc_(desc), values_(values) {} + constexpr basic_format_args(unsigned long long desc, const format_arg* args) + : desc_(desc), args_(args) {} + + public: + constexpr basic_format_args() : desc_(0), args_(nullptr) {} + + /** + \rst + Constructs a `basic_format_args` object from `~fmt::format_arg_store`. + \endrst + */ + template + constexpr FMT_INLINE basic_format_args( + const format_arg_store& store) + : basic_format_args(format_arg_store::desc, + store.data_.args()) {} + + /** + \rst + Constructs a `basic_format_args` object from + `~fmt::dynamic_format_arg_store`. + \endrst + */ + constexpr FMT_INLINE basic_format_args( + const dynamic_format_arg_store& store) + : basic_format_args(store.get_types(), store.data()) {} + + /** + \rst + Constructs a `basic_format_args` object from a dynamic set of arguments. + \endrst + */ + constexpr basic_format_args(const format_arg* args, int count) + : basic_format_args(detail::is_unpacked_bit | detail::to_unsigned(count), + args) {} + + /** Returns the argument with the specified id. */ + FMT_CONSTEXPR auto get(int id) const -> format_arg { + format_arg arg; + if (!is_packed()) { + if (id < max_size()) arg = args_[id]; + return arg; + } + if (id >= detail::max_packed_args) return arg; + arg.type_ = type(id); + if (arg.type_ == detail::type::none_type) return arg; + arg.value_ = values_[id]; + return arg; + } + + template + auto get(basic_string_view name) const -> format_arg { + int id = get_id(name); + return id >= 0 ? get(id) : format_arg(); + } + + template + auto get_id(basic_string_view name) const -> int { + if (!has_named_args()) return -1; + const auto& named_args = + (is_packed() ? values_[-1] : args_[-1].value_).named_args; + for (size_t i = 0; i < named_args.size; ++i) { + if (named_args.data[i].name == name) return named_args.data[i].id; + } + return -1; + } + + auto max_size() const -> int { + unsigned long long max_packed = detail::max_packed_args; + return static_cast(is_packed() ? max_packed + : desc_ & ~detail::is_unpacked_bit); + } +}; + +/** An alias to ``basic_format_args``. */ +// A separate type would result in shorter symbols but break ABI compatibility +// between clang and gcc on ARM (#1919). +using format_args = basic_format_args; + +// We cannot use enum classes as bit fields because of a gcc bug +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414. +namespace align { +enum type { none, left, right, center, numeric }; +} +using align_t = align::type; +namespace sign { +enum type { none, minus, plus, space }; +} +using sign_t = sign::type; + +FMT_BEGIN_DETAIL_NAMESPACE + +// Workaround an array initialization issue in gcc 4.8. +template struct fill_t { + private: + enum { max_size = 4 }; + Char data_[max_size] = {Char(' '), Char(0), Char(0), Char(0)}; + unsigned char size_ = 1; + + public: + FMT_CONSTEXPR void operator=(basic_string_view s) { + auto size = s.size(); + if (size > max_size) return throw_format_error("invalid fill"); + for (size_t i = 0; i < size; ++i) data_[i] = s[i]; + size_ = static_cast(size); + } + + constexpr auto size() const -> size_t { return size_; } + constexpr auto data() const -> const Char* { return data_; } + + FMT_CONSTEXPR auto operator[](size_t index) -> Char& { return data_[index]; } + FMT_CONSTEXPR auto operator[](size_t index) const -> const Char& { + return data_[index]; + } +}; +FMT_END_DETAIL_NAMESPACE + +enum class presentation_type : unsigned char { + none, + // Integer types should go first, + dec, // 'd' + oct, // 'o' + hex_lower, // 'x' + hex_upper, // 'X' + bin_lower, // 'b' + bin_upper, // 'B' + hexfloat_lower, // 'a' + hexfloat_upper, // 'A' + exp_lower, // 'e' + exp_upper, // 'E' + fixed_lower, // 'f' + fixed_upper, // 'F' + general_lower, // 'g' + general_upper, // 'G' + chr, // 'c' + string, // 's' + pointer, // 'p' + debug // '?' +}; + +// Format specifiers for built-in and string types. +template struct basic_format_specs { + int width; + int precision; + presentation_type type; + align_t align : 4; + sign_t sign : 3; + bool alt : 1; // Alternate form ('#'). + bool localized : 1; + detail::fill_t fill; + + constexpr basic_format_specs() + : width(0), + precision(-1), + type(presentation_type::none), + align(align::none), + sign(sign::none), + alt(false), + localized(false) {} +}; + +using format_specs = basic_format_specs; + +FMT_BEGIN_DETAIL_NAMESPACE + +enum class arg_id_kind { none, index, name }; + +// An argument reference. +template struct arg_ref { + FMT_CONSTEXPR arg_ref() : kind(arg_id_kind::none), val() {} + + FMT_CONSTEXPR explicit arg_ref(int index) + : kind(arg_id_kind::index), val(index) {} + FMT_CONSTEXPR explicit arg_ref(basic_string_view name) + : kind(arg_id_kind::name), val(name) {} + + FMT_CONSTEXPR auto operator=(int idx) -> arg_ref& { + kind = arg_id_kind::index; + val.index = idx; + return *this; + } + + arg_id_kind kind; + union value { + FMT_CONSTEXPR value(int id = 0) : index{id} {} + FMT_CONSTEXPR value(basic_string_view n) : name(n) {} + + int index; + basic_string_view name; + } val; +}; + +// Format specifiers with width and precision resolved at formatting rather +// than parsing time to allow re-using the same parsed specifiers with +// different sets of arguments (precompilation of format strings). +template +struct dynamic_format_specs : basic_format_specs { + arg_ref width_ref; + arg_ref precision_ref; +}; + +struct auto_id {}; + +// A format specifier handler that sets fields in basic_format_specs. +template class specs_setter { + protected: + basic_format_specs& specs_; + + public: + explicit FMT_CONSTEXPR specs_setter(basic_format_specs& specs) + : specs_(specs) {} + + FMT_CONSTEXPR specs_setter(const specs_setter& other) + : specs_(other.specs_) {} + + FMT_CONSTEXPR void on_align(align_t align) { specs_.align = align; } + FMT_CONSTEXPR void on_fill(basic_string_view fill) { + specs_.fill = fill; + } + FMT_CONSTEXPR void on_sign(sign_t s) { specs_.sign = s; } + FMT_CONSTEXPR void on_hash() { specs_.alt = true; } + FMT_CONSTEXPR void on_localized() { specs_.localized = true; } + + FMT_CONSTEXPR void on_zero() { + if (specs_.align == align::none) specs_.align = align::numeric; + specs_.fill[0] = Char('0'); + } + + FMT_CONSTEXPR void on_width(int width) { specs_.width = width; } + FMT_CONSTEXPR void on_precision(int precision) { + specs_.precision = precision; + } + FMT_CONSTEXPR void end_precision() {} + + FMT_CONSTEXPR void on_type(presentation_type type) { specs_.type = type; } +}; + +// Format spec handler that saves references to arguments representing dynamic +// width and precision to be resolved at formatting time. +template +class dynamic_specs_handler + : public specs_setter { + public: + using char_type = typename ParseContext::char_type; + + FMT_CONSTEXPR dynamic_specs_handler(dynamic_format_specs& specs, + ParseContext& ctx) + : specs_setter(specs), specs_(specs), context_(ctx) {} + + FMT_CONSTEXPR dynamic_specs_handler(const dynamic_specs_handler& other) + : specs_setter(other), + specs_(other.specs_), + context_(other.context_) {} + + template FMT_CONSTEXPR void on_dynamic_width(Id arg_id) { + specs_.width_ref = make_arg_ref(arg_id); + } + + template FMT_CONSTEXPR void on_dynamic_precision(Id arg_id) { + specs_.precision_ref = make_arg_ref(arg_id); + } + + FMT_CONSTEXPR void on_error(const char* message) { + context_.on_error(message); + } + + private: + dynamic_format_specs& specs_; + ParseContext& context_; + + using arg_ref_type = arg_ref; + + FMT_CONSTEXPR auto make_arg_ref(int arg_id) -> arg_ref_type { + context_.check_arg_id(arg_id); + return arg_ref_type(arg_id); + } + + FMT_CONSTEXPR auto make_arg_ref(auto_id) -> arg_ref_type { + return arg_ref_type(context_.next_arg_id()); + } + + FMT_CONSTEXPR auto make_arg_ref(basic_string_view arg_id) + -> arg_ref_type { + context_.check_arg_id(arg_id); + basic_string_view format_str( + context_.begin(), to_unsigned(context_.end() - context_.begin())); + return arg_ref_type(arg_id); + } +}; + +template constexpr bool is_ascii_letter(Char c) { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); +} + +// Converts a character to ASCII. Returns a number > 127 on conversion failure. +template ::value)> +constexpr auto to_ascii(Char c) -> Char { + return c; +} +template ::value)> +constexpr auto to_ascii(Char c) -> underlying_t { + return c; +} + +template +FMT_CONSTEXPR auto code_point_length(const Char* begin) -> int { + if (const_check(sizeof(Char) != 1)) return 1; + auto lengths = + "\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\1\0\0\0\0\0\0\0\0\2\2\2\2\3\3\4"; + int len = lengths[static_cast(*begin) >> 3]; + + // Compute the pointer to the next character early so that the next + // iteration can start working on the next character. Neither Clang + // nor GCC figure out this reordering on their own. + return len + !len; +} + +// Return the result via the out param to workaround gcc bug 77539. +template +FMT_CONSTEXPR auto find(Ptr first, Ptr last, T value, Ptr& out) -> bool { + for (out = first; out != last; ++out) { + if (*out == value) return true; + } + return false; +} + +template <> +inline auto find(const char* first, const char* last, char value, + const char*& out) -> bool { + out = static_cast( + std::memchr(first, value, to_unsigned(last - first))); + return out != nullptr; +} + +// Parses the range [begin, end) as an unsigned integer. This function assumes +// that the range is non-empty and the first character is a digit. +template +FMT_CONSTEXPR auto parse_nonnegative_int(const Char*& begin, const Char* end, + int error_value) noexcept -> int { + FMT_ASSERT(begin != end && '0' <= *begin && *begin <= '9', ""); + unsigned value = 0, prev = 0; + auto p = begin; + do { + prev = value; + value = value * 10 + unsigned(*p - '0'); + ++p; + } while (p != end && '0' <= *p && *p <= '9'); + auto num_digits = p - begin; + begin = p; + if (num_digits <= std::numeric_limits::digits10) + return static_cast(value); + // Check for overflow. + const unsigned max = to_unsigned((std::numeric_limits::max)()); + return num_digits == std::numeric_limits::digits10 + 1 && + prev * 10ull + unsigned(p[-1] - '0') <= max + ? static_cast(value) + : error_value; +} + +// Parses fill and alignment. +template +FMT_CONSTEXPR auto parse_align(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + FMT_ASSERT(begin != end, ""); + auto align = align::none; + auto p = begin + code_point_length(begin); + if (end - p <= 0) p = begin; + for (;;) { + switch (to_ascii(*p)) { + case '<': + align = align::left; + break; + case '>': + align = align::right; + break; + case '^': + align = align::center; + break; + default: + break; + } + if (align != align::none) { + if (p != begin) { + auto c = *begin; + if (c == '{') + return handler.on_error("invalid fill character '{'"), begin; + handler.on_fill(basic_string_view(begin, to_unsigned(p - begin))); + begin = p + 1; + } else + ++begin; + handler.on_align(align); + break; + } else if (p == begin) { + break; + } + p = begin; + } + return begin; +} + +template FMT_CONSTEXPR bool is_name_start(Char c) { + return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || '_' == c; +} + +template +FMT_CONSTEXPR auto do_parse_arg_id(const Char* begin, const Char* end, + IDHandler&& handler) -> const Char* { + FMT_ASSERT(begin != end, ""); + Char c = *begin; + if (c >= '0' && c <= '9') { + int index = 0; + if (c != '0') + index = + parse_nonnegative_int(begin, end, (std::numeric_limits::max)()); + else + ++begin; + if (begin == end || (*begin != '}' && *begin != ':')) + handler.on_error("invalid format string"); + else + handler(index); + return begin; + } + if (!is_name_start(c)) { + handler.on_error("invalid format string"); + return begin; + } + auto it = begin; + do { + ++it; + } while (it != end && (is_name_start(c = *it) || ('0' <= c && c <= '9'))); + handler(basic_string_view(begin, to_unsigned(it - begin))); + return it; +} + +template +FMT_CONSTEXPR FMT_INLINE auto parse_arg_id(const Char* begin, const Char* end, + IDHandler&& handler) -> const Char* { + Char c = *begin; + if (c != '}' && c != ':') return do_parse_arg_id(begin, end, handler); + handler(); + return begin; +} + +template +FMT_CONSTEXPR auto parse_width(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + using detail::auto_id; + struct width_adapter { + Handler& handler; + + FMT_CONSTEXPR void operator()() { handler.on_dynamic_width(auto_id()); } + FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_width(id); } + FMT_CONSTEXPR void operator()(basic_string_view id) { + handler.on_dynamic_width(id); + } + FMT_CONSTEXPR void on_error(const char* message) { + if (message) handler.on_error(message); + } + }; + + FMT_ASSERT(begin != end, ""); + if ('0' <= *begin && *begin <= '9') { + int width = parse_nonnegative_int(begin, end, -1); + if (width != -1) + handler.on_width(width); + else + handler.on_error("number is too big"); + } else if (*begin == '{') { + ++begin; + if (begin != end) begin = parse_arg_id(begin, end, width_adapter{handler}); + if (begin == end || *begin != '}') + return handler.on_error("invalid format string"), begin; + ++begin; + } + return begin; +} + +template +FMT_CONSTEXPR auto parse_precision(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + using detail::auto_id; + struct precision_adapter { + Handler& handler; + + FMT_CONSTEXPR void operator()() { handler.on_dynamic_precision(auto_id()); } + FMT_CONSTEXPR void operator()(int id) { handler.on_dynamic_precision(id); } + FMT_CONSTEXPR void operator()(basic_string_view id) { + handler.on_dynamic_precision(id); + } + FMT_CONSTEXPR void on_error(const char* message) { + if (message) handler.on_error(message); + } + }; + + ++begin; + auto c = begin != end ? *begin : Char(); + if ('0' <= c && c <= '9') { + auto precision = parse_nonnegative_int(begin, end, -1); + if (precision != -1) + handler.on_precision(precision); + else + handler.on_error("number is too big"); + } else if (c == '{') { + ++begin; + if (begin != end) + begin = parse_arg_id(begin, end, precision_adapter{handler}); + if (begin == end || *begin++ != '}') + return handler.on_error("invalid format string"), begin; + } else { + return handler.on_error("missing precision specifier"), begin; + } + handler.end_precision(); + return begin; +} + +template +FMT_CONSTEXPR auto parse_presentation_type(Char type) -> presentation_type { + switch (to_ascii(type)) { + case 'd': + return presentation_type::dec; + case 'o': + return presentation_type::oct; + case 'x': + return presentation_type::hex_lower; + case 'X': + return presentation_type::hex_upper; + case 'b': + return presentation_type::bin_lower; + case 'B': + return presentation_type::bin_upper; + case 'a': + return presentation_type::hexfloat_lower; + case 'A': + return presentation_type::hexfloat_upper; + case 'e': + return presentation_type::exp_lower; + case 'E': + return presentation_type::exp_upper; + case 'f': + return presentation_type::fixed_lower; + case 'F': + return presentation_type::fixed_upper; + case 'g': + return presentation_type::general_lower; + case 'G': + return presentation_type::general_upper; + case 'c': + return presentation_type::chr; + case 's': + return presentation_type::string; + case 'p': + return presentation_type::pointer; + case '?': + return presentation_type::debug; + default: + return presentation_type::none; + } +} + +// Parses standard format specifiers and sends notifications about parsed +// components to handler. +template +FMT_CONSTEXPR FMT_INLINE auto parse_format_specs(const Char* begin, + const Char* end, + SpecHandler&& handler) + -> const Char* { + if (1 < end - begin && begin[1] == '}' && is_ascii_letter(*begin) && + *begin != 'L') { + presentation_type type = parse_presentation_type(*begin++); + if (type == presentation_type::none) + handler.on_error("invalid type specifier"); + handler.on_type(type); + return begin; + } + + if (begin == end) return begin; + + begin = parse_align(begin, end, handler); + if (begin == end) return begin; + + // Parse sign. + switch (to_ascii(*begin)) { + case '+': + handler.on_sign(sign::plus); + ++begin; + break; + case '-': + handler.on_sign(sign::minus); + ++begin; + break; + case ' ': + handler.on_sign(sign::space); + ++begin; + break; + default: + break; + } + if (begin == end) return begin; + + if (*begin == '#') { + handler.on_hash(); + if (++begin == end) return begin; + } + + // Parse zero flag. + if (*begin == '0') { + handler.on_zero(); + if (++begin == end) return begin; + } + + begin = parse_width(begin, end, handler); + if (begin == end) return begin; + + // Parse precision. + if (*begin == '.') { + begin = parse_precision(begin, end, handler); + if (begin == end) return begin; + } + + if (*begin == 'L') { + handler.on_localized(); + ++begin; + } + + // Parse type. + if (begin != end && *begin != '}') { + presentation_type type = parse_presentation_type(*begin++); + if (type == presentation_type::none) + handler.on_error("invalid type specifier"); + handler.on_type(type); + } + return begin; +} + +template +FMT_CONSTEXPR auto parse_replacement_field(const Char* begin, const Char* end, + Handler&& handler) -> const Char* { + struct id_adapter { + Handler& handler; + int arg_id; + + FMT_CONSTEXPR void operator()() { arg_id = handler.on_arg_id(); } + FMT_CONSTEXPR void operator()(int id) { arg_id = handler.on_arg_id(id); } + FMT_CONSTEXPR void operator()(basic_string_view id) { + arg_id = handler.on_arg_id(id); + } + FMT_CONSTEXPR void on_error(const char* message) { + if (message) handler.on_error(message); + } + }; + + ++begin; + if (begin == end) return handler.on_error("invalid format string"), end; + if (*begin == '}') { + handler.on_replacement_field(handler.on_arg_id(), begin); + } else if (*begin == '{') { + handler.on_text(begin, begin + 1); + } else { + auto adapter = id_adapter{handler, 0}; + begin = parse_arg_id(begin, end, adapter); + Char c = begin != end ? *begin : Char(); + if (c == '}') { + handler.on_replacement_field(adapter.arg_id, begin); + } else if (c == ':') { + begin = handler.on_format_specs(adapter.arg_id, begin + 1, end); + if (begin == end || *begin != '}') + return handler.on_error("unknown format specifier"), end; + } else { + return handler.on_error("missing '}' in format string"), end; + } + } + return begin + 1; +} + +template +FMT_CONSTEXPR FMT_INLINE void parse_format_string( + basic_string_view format_str, Handler&& handler) { + // Workaround a name-lookup bug in MSVC's modules implementation. + using detail::find; + + auto begin = format_str.data(); + auto end = begin + format_str.size(); + if (end - begin < 32) { + // Use a simple loop instead of memchr for small strings. + const Char* p = begin; + while (p != end) { + auto c = *p++; + if (c == '{') { + handler.on_text(begin, p - 1); + begin = p = parse_replacement_field(p - 1, end, handler); + } else if (c == '}') { + if (p == end || *p != '}') + return handler.on_error("unmatched '}' in format string"); + handler.on_text(begin, p); + begin = ++p; + } + } + handler.on_text(begin, end); + return; + } + struct writer { + FMT_CONSTEXPR void operator()(const Char* pbegin, const Char* pend) { + if (pbegin == pend) return; + for (;;) { + const Char* p = nullptr; + if (!find(pbegin, pend, Char('}'), p)) + return handler_.on_text(pbegin, pend); + ++p; + if (p == pend || *p != '}') + return handler_.on_error("unmatched '}' in format string"); + handler_.on_text(pbegin, p); + pbegin = p + 1; + } + } + Handler& handler_; + } write{handler}; + while (begin != end) { + // Doing two passes with memchr (one for '{' and another for '}') is up to + // 2.5x faster than the naive one-pass implementation on big format strings. + const Char* p = begin; + if (*begin != '{' && !find(begin + 1, end, Char('{'), p)) + return write(begin, end); + write(begin, p); + begin = parse_replacement_field(p, end, handler); + } +} + +template ::value> struct strip_named_arg { + using type = T; +}; + +template struct strip_named_arg { + using type = remove_cvref_t; +}; + +template +FMT_CONSTEXPR auto parse_format_specs(ParseContext& ctx) + -> decltype(ctx.begin()) { + using char_type = typename ParseContext::char_type; + using context = buffer_context; + using stripped_type = typename strip_named_arg::type; + using mapped_type = conditional_t< + mapped_type_constant::value != type::custom_type, + decltype(arg_mapper().map(std::declval())), + stripped_type>; + auto f = conditional_t::value, + formatter, + fallback_formatter>(); + return f.parse(ctx); +} + +// A parse context with extra argument id checks. It is only used at compile +// time because adding checks at runtime would introduce substantial overhead +// and would be redundant since argument ids are checked when arguments are +// retrieved anyway. +template +class compile_parse_context + : public basic_format_parse_context { + private: + int num_args_; + using base = basic_format_parse_context; + + public: + explicit FMT_CONSTEXPR compile_parse_context( + basic_string_view format_str, + int num_args = (std::numeric_limits::max)(), ErrorHandler eh = {}) + : base(format_str, eh), num_args_(num_args) {} + + FMT_CONSTEXPR auto next_arg_id() -> int { + int id = base::next_arg_id(); + if (id >= num_args_) this->on_error("argument not found"); + return id; + } + + FMT_CONSTEXPR void check_arg_id(int id) { + base::check_arg_id(id); + if (id >= num_args_) this->on_error("argument not found"); + } + using base::check_arg_id; +}; + +template +FMT_CONSTEXPR void check_int_type_spec(presentation_type type, + ErrorHandler&& eh) { + if (type > presentation_type::bin_upper && type != presentation_type::chr) + eh.on_error("invalid type specifier"); +} + +// Checks char specs and returns true if the type spec is char (and not int). +template +FMT_CONSTEXPR auto check_char_specs(const basic_format_specs& specs, + ErrorHandler&& eh = {}) -> bool { + if (specs.type != presentation_type::none && + specs.type != presentation_type::chr && + specs.type != presentation_type::debug) { + check_int_type_spec(specs.type, eh); + return false; + } + if (specs.align == align::numeric || specs.sign != sign::none || specs.alt) + eh.on_error("invalid format specifier for char"); + return true; +} + +// A floating-point presentation format. +enum class float_format : unsigned char { + general, // General: exponent notation or fixed point based on magnitude. + exp, // Exponent notation with the default precision of 6, e.g. 1.2e-3. + fixed, // Fixed point with the default precision of 6, e.g. 0.0012. + hex +}; + +struct float_specs { + int precision; + float_format format : 8; + sign_t sign : 8; + bool upper : 1; + bool locale : 1; + bool binary32 : 1; + bool showpoint : 1; +}; + +template +FMT_CONSTEXPR auto parse_float_type_spec(const basic_format_specs& specs, + ErrorHandler&& eh = {}) + -> float_specs { + auto result = float_specs(); + result.showpoint = specs.alt; + result.locale = specs.localized; + switch (specs.type) { + case presentation_type::none: + result.format = float_format::general; + break; + case presentation_type::general_upper: + result.upper = true; + FMT_FALLTHROUGH; + case presentation_type::general_lower: + result.format = float_format::general; + break; + case presentation_type::exp_upper: + result.upper = true; + FMT_FALLTHROUGH; + case presentation_type::exp_lower: + result.format = float_format::exp; + result.showpoint |= specs.precision != 0; + break; + case presentation_type::fixed_upper: + result.upper = true; + FMT_FALLTHROUGH; + case presentation_type::fixed_lower: + result.format = float_format::fixed; + result.showpoint |= specs.precision != 0; + break; + case presentation_type::hexfloat_upper: + result.upper = true; + FMT_FALLTHROUGH; + case presentation_type::hexfloat_lower: + result.format = float_format::hex; + break; + default: + eh.on_error("invalid type specifier"); + break; + } + return result; +} + +template +FMT_CONSTEXPR auto check_cstring_type_spec(presentation_type type, + ErrorHandler&& eh = {}) -> bool { + if (type == presentation_type::none || type == presentation_type::string) + return true; + if (type != presentation_type::pointer) eh.on_error("invalid type specifier"); + return false; +} + +template +FMT_CONSTEXPR void check_string_type_spec(presentation_type type, + ErrorHandler&& eh = {}) { + if (type != presentation_type::none && type != presentation_type::string && + type != presentation_type::debug) + eh.on_error("invalid type specifier"); +} + +template +FMT_CONSTEXPR void check_pointer_type_spec(presentation_type type, + ErrorHandler&& eh) { + if (type != presentation_type::none && type != presentation_type::pointer) + eh.on_error("invalid type specifier"); +} + +// A parse_format_specs handler that checks if specifiers are consistent with +// the argument type. +template class specs_checker : public Handler { + private: + detail::type arg_type_; + + FMT_CONSTEXPR void require_numeric_argument() { + if (!is_arithmetic_type(arg_type_)) + this->on_error("format specifier requires numeric argument"); + } + + public: + FMT_CONSTEXPR specs_checker(const Handler& handler, detail::type arg_type) + : Handler(handler), arg_type_(arg_type) {} + + FMT_CONSTEXPR void on_align(align_t align) { + if (align == align::numeric) require_numeric_argument(); + Handler::on_align(align); + } + + FMT_CONSTEXPR void on_sign(sign_t s) { + require_numeric_argument(); + if (is_integral_type(arg_type_) && arg_type_ != type::int_type && + arg_type_ != type::long_long_type && arg_type_ != type::int128_type && + arg_type_ != type::char_type) { + this->on_error("format specifier requires signed argument"); + } + Handler::on_sign(s); + } + + FMT_CONSTEXPR void on_hash() { + require_numeric_argument(); + Handler::on_hash(); + } + + FMT_CONSTEXPR void on_localized() { + require_numeric_argument(); + Handler::on_localized(); + } + + FMT_CONSTEXPR void on_zero() { + require_numeric_argument(); + Handler::on_zero(); + } + + FMT_CONSTEXPR void end_precision() { + if (is_integral_type(arg_type_) || arg_type_ == type::pointer_type) + this->on_error("precision not allowed for this argument type"); + } +}; + +constexpr int invalid_arg_index = -1; + +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +template +constexpr auto get_arg_index_by_name(basic_string_view name) -> int { + if constexpr (detail::is_statically_named_arg()) { + if (name == T::name) return N; + } + if constexpr (sizeof...(Args) > 0) + return get_arg_index_by_name(name); + (void)name; // Workaround an MSVC bug about "unused" parameter. + return invalid_arg_index; +} +#endif + +template +FMT_CONSTEXPR auto get_arg_index_by_name(basic_string_view name) -> int { +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS + if constexpr (sizeof...(Args) > 0) + return get_arg_index_by_name<0, Args...>(name); +#endif + (void)name; + return invalid_arg_index; +} + +template +class format_string_checker { + private: + using parse_context_type = compile_parse_context; + enum { num_args = sizeof...(Args) }; + + // Format specifier parsing function. + using parse_func = const Char* (*)(parse_context_type&); + + parse_context_type context_; + parse_func parse_funcs_[num_args > 0 ? num_args : 1]; + + public: + explicit FMT_CONSTEXPR format_string_checker( + basic_string_view format_str, ErrorHandler eh) + : context_(format_str, num_args, eh), + parse_funcs_{&parse_format_specs...} {} + + FMT_CONSTEXPR void on_text(const Char*, const Char*) {} + + FMT_CONSTEXPR auto on_arg_id() -> int { return context_.next_arg_id(); } + FMT_CONSTEXPR auto on_arg_id(int id) -> int { + return context_.check_arg_id(id), id; + } + FMT_CONSTEXPR auto on_arg_id(basic_string_view id) -> int { +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS + auto index = get_arg_index_by_name(id); + if (index == invalid_arg_index) on_error("named argument is not found"); + return context_.check_arg_id(index), index; +#else + (void)id; + on_error("compile-time checks for named arguments require C++20 support"); + return 0; +#endif + } + + FMT_CONSTEXPR void on_replacement_field(int, const Char*) {} + + FMT_CONSTEXPR auto on_format_specs(int id, const Char* begin, const Char*) + -> const Char* { + context_.advance_to(context_.begin() + (begin - &*context_.begin())); + // id >= 0 check is a workaround for gcc 10 bug (#2065). + return id >= 0 && id < num_args ? parse_funcs_[id](context_) : begin; + } + + FMT_CONSTEXPR void on_error(const char* message) { + context_.on_error(message); + } +}; + +template ::value), int>> +void check_format_string(S format_str) { + FMT_CONSTEXPR auto s = to_string_view(format_str); + using checker = format_string_checker...>; + FMT_CONSTEXPR bool invalid_format = + (parse_format_string(s, checker(s, {})), true); + ignore_unused(invalid_format); +} + +template +void vformat_to( + buffer& buf, basic_string_view fmt, + basic_format_args)> args, + locale_ref loc = {}); + +FMT_API void vprint_mojibake(std::FILE*, string_view, format_args); +#ifndef _WIN32 +inline void vprint_mojibake(std::FILE*, string_view, format_args) {} +#endif +FMT_END_DETAIL_NAMESPACE + +// A formatter specialization for the core types corresponding to detail::type +// constants. +template +struct formatter::value != + detail::type::custom_type>> { + private: + detail::dynamic_format_specs specs_; + + public: + // Parses format specifiers stopping either at the end of the range or at the + // terminating '}'. + template + FMT_CONSTEXPR auto parse(ParseContext& ctx) -> decltype(ctx.begin()) { + auto begin = ctx.begin(), end = ctx.end(); + if (begin == end) return begin; + using handler_type = detail::dynamic_specs_handler; + auto type = detail::type_constant::value; + auto checker = + detail::specs_checker(handler_type(specs_, ctx), type); + auto it = detail::parse_format_specs(begin, end, checker); + auto eh = ctx.error_handler(); + switch (type) { + case detail::type::none_type: + FMT_ASSERT(false, "invalid argument type"); + break; + case detail::type::bool_type: + if (specs_.type == presentation_type::none || + specs_.type == presentation_type::string) { + break; + } + FMT_FALLTHROUGH; + case detail::type::int_type: + case detail::type::uint_type: + case detail::type::long_long_type: + case detail::type::ulong_long_type: + case detail::type::int128_type: + case detail::type::uint128_type: + detail::check_int_type_spec(specs_.type, eh); + break; + case detail::type::char_type: + detail::check_char_specs(specs_, eh); + break; + case detail::type::float_type: + if (detail::const_check(FMT_USE_FLOAT)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "float support disabled"); + break; + case detail::type::double_type: + if (detail::const_check(FMT_USE_DOUBLE)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "double support disabled"); + break; + case detail::type::long_double_type: + if (detail::const_check(FMT_USE_LONG_DOUBLE)) + detail::parse_float_type_spec(specs_, eh); + else + FMT_ASSERT(false, "long double support disabled"); + break; + case detail::type::cstring_type: + detail::check_cstring_type_spec(specs_.type, eh); + break; + case detail::type::string_type: + detail::check_string_type_spec(specs_.type, eh); + break; + case detail::type::pointer_type: + detail::check_pointer_type_spec(specs_.type, eh); + break; + case detail::type::custom_type: + // Custom format specifiers are checked in parse functions of + // formatter specializations. + break; + } + return it; + } + + template + FMT_CONSTEXPR auto format(const T& val, FormatContext& ctx) const + -> decltype(ctx.out()); +}; + +#define FMT_FORMAT_AS(Type, Base) \ + template \ + struct formatter : formatter { \ + template \ + auto format(Type const& val, FormatContext& ctx) const \ + -> decltype(ctx.out()) { \ + return formatter::format(static_cast(val), ctx); \ + } \ + } + +FMT_FORMAT_AS(signed char, int); +FMT_FORMAT_AS(unsigned char, unsigned); +FMT_FORMAT_AS(short, int); +FMT_FORMAT_AS(unsigned short, unsigned); +FMT_FORMAT_AS(long, long long); +FMT_FORMAT_AS(unsigned long, unsigned long long); +FMT_FORMAT_AS(Char*, const Char*); +FMT_FORMAT_AS(std::basic_string, basic_string_view); +FMT_FORMAT_AS(std::nullptr_t, const void*); +FMT_FORMAT_AS(detail::std_string_view, basic_string_view); + +template struct basic_runtime { basic_string_view str; }; + +/** A compile-time format string. */ +template class basic_format_string { + private: + basic_string_view str_; + + public: + template >::value)> + FMT_CONSTEVAL FMT_INLINE basic_format_string(const S& s) : str_(s) { + static_assert( + detail::count< + (std::is_base_of>::value && + std::is_reference::value)...>() == 0, + "passing views as lvalues is disallowed"); +#ifdef FMT_HAS_CONSTEVAL + if constexpr (detail::count_named_args() == + detail::count_statically_named_args()) { + using checker = detail::format_string_checker...>; + detail::parse_format_string(str_, checker(s, {})); + } +#else + detail::check_format_string(s); +#endif + } + basic_format_string(basic_runtime r) : str_(r.str) {} + + FMT_INLINE operator basic_string_view() const { return str_; } +}; + +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 +// Workaround broken conversion on older gcc. +template using format_string = string_view; +template auto runtime(const S& s) -> basic_string_view> { + return s; +} +#else +template +using format_string = basic_format_string...>; +/** + \rst + Creates a runtime format string. + + **Example**:: + + // Check format string at runtime instead of compile-time. + fmt::print(fmt::runtime("{:d}"), "I am not a number"); + \endrst + */ +template auto runtime(const S& s) -> basic_runtime> { + return {{s}}; +} +#endif + +FMT_API auto vformat(string_view fmt, format_args args) -> std::string; + +/** + \rst + Formats ``args`` according to specifications in ``fmt`` and returns the result + as a string. + + **Example**:: + + #include + std::string message = fmt::format("The answer is {}.", 42); + \endrst +*/ +template +FMT_NODISCARD FMT_INLINE auto format(format_string fmt, T&&... args) + -> std::string { + return vformat(fmt, fmt::make_format_args(args...)); +} + +/** Formats a string and writes the output to ``out``. */ +template ::value)> +auto vformat_to(OutputIt out, string_view fmt, format_args args) -> OutputIt { + using detail::get_buffer; + auto&& buf = get_buffer(out); + detail::vformat_to(buf, fmt, args, {}); + return detail::get_iterator(buf); +} + +/** + \rst + Formats ``args`` according to specifications in ``fmt``, writes the result to + the output iterator ``out`` and returns the iterator past the end of the output + range. `format_to` does not append a terminating null character. + + **Example**:: + + auto out = std::vector(); + fmt::format_to(std::back_inserter(out), "{}", 42); + \endrst + */ +template ::value)> +FMT_INLINE auto format_to(OutputIt out, format_string fmt, T&&... args) + -> OutputIt { + return vformat_to(out, fmt, fmt::make_format_args(args...)); +} + +template struct format_to_n_result { + /** Iterator past the end of the output range. */ + OutputIt out; + /** Total (not truncated) output size. */ + size_t size; +}; + +template ::value)> +auto vformat_to_n(OutputIt out, size_t n, string_view fmt, format_args args) + -> format_to_n_result { + using traits = detail::fixed_buffer_traits; + auto buf = detail::iterator_buffer(out, n); + detail::vformat_to(buf, fmt, args, {}); + return {buf.out(), buf.count()}; +} + +/** + \rst + Formats ``args`` according to specifications in ``fmt``, writes up to ``n`` + characters of the result to the output iterator ``out`` and returns the total + (not truncated) output size and the iterator past the end of the output range. + `format_to_n` does not append a terminating null character. + \endrst + */ +template ::value)> +FMT_INLINE auto format_to_n(OutputIt out, size_t n, format_string fmt, + T&&... args) -> format_to_n_result { + return vformat_to_n(out, n, fmt, fmt::make_format_args(args...)); +} + +/** Returns the number of chars in the output of ``format(fmt, args...)``. */ +template +FMT_NODISCARD FMT_INLINE auto formatted_size(format_string fmt, + T&&... args) -> size_t { + auto buf = detail::counting_buffer<>(); + detail::vformat_to(buf, string_view(fmt), fmt::make_format_args(args...), {}); + return buf.count(); +} + +FMT_API void vprint(string_view fmt, format_args args); +FMT_API void vprint(std::FILE* f, string_view fmt, format_args args); + +/** + \rst + Formats ``args`` according to specifications in ``fmt`` and writes the output + to ``stdout``. + + **Example**:: + + fmt::print("Elapsed time: {0:.2f} seconds", 1.23); + \endrst + */ +template +FMT_INLINE void print(format_string fmt, T&&... args) { + const auto& vargs = fmt::make_format_args(args...); + return detail::is_utf8() ? vprint(fmt, vargs) + : detail::vprint_mojibake(stdout, fmt, vargs); +} + +/** + \rst + Formats ``args`` according to specifications in ``fmt`` and writes the + output to the file ``f``. + + **Example**:: + + fmt::print(stderr, "Don't {}!", "panic"); + \endrst + */ +template +FMT_INLINE void print(std::FILE* f, format_string fmt, T&&... args) { + const auto& vargs = fmt::make_format_args(args...); + return detail::is_utf8() ? vprint(f, fmt, vargs) + : detail::vprint_mojibake(f, fmt, vargs); +} + +FMT_MODULE_EXPORT_END +FMT_GCC_PRAGMA("GCC pop_options") +FMT_END_NAMESPACE + +#ifdef FMT_HEADER_ONLY +# include "format.h" +#endif +#endif // FMT_CORE_H_ diff --git a/external/fmt/include/fmt/format-inl.h b/external/fmt/include/fmt/format-inl.h new file mode 100644 index 00000000..feb7815d --- /dev/null +++ b/external/fmt/include/fmt/format-inl.h @@ -0,0 +1,2594 @@ +// Formatting library for C++ - implementation +// +// Copyright (c) 2012 - 2016, Victor Zverovich +// All rights reserved. +// +// For the license information refer to format.h. + +#ifndef FMT_FORMAT_INL_H_ +#define FMT_FORMAT_INL_H_ + +#include +#include +#include // errno +#include +#include +#include +#include // std::memmove +#include +#include + +#ifndef FMT_STATIC_THOUSANDS_SEPARATOR +# include +#endif + +#ifdef _WIN32 +# include // _isatty +#endif + +#include "format.h" + +FMT_BEGIN_NAMESPACE +namespace detail { + +FMT_FUNC void assert_fail(const char* file, int line, const char* message) { + // Use unchecked std::fprintf to avoid triggering another assertion when + // writing to stderr fails + std::fprintf(stderr, "%s:%d: assertion failed: %s", file, line, message); + // Chosen instead of std::abort to satisfy Clang in CUDA mode during device + // code pass. + std::terminate(); +} + +FMT_FUNC void throw_format_error(const char* message) { + FMT_THROW(format_error(message)); +} + +FMT_FUNC void format_error_code(detail::buffer& out, int error_code, + string_view message) noexcept { + // Report error code making sure that the output fits into + // inline_buffer_size to avoid dynamic memory allocation and potential + // bad_alloc. + out.try_resize(0); + static const char SEP[] = ": "; + static const char ERROR_STR[] = "error "; + // Subtract 2 to account for terminating null characters in SEP and ERROR_STR. + size_t error_code_size = sizeof(SEP) + sizeof(ERROR_STR) - 2; + auto abs_value = static_cast>(error_code); + if (detail::is_negative(error_code)) { + abs_value = 0 - abs_value; + ++error_code_size; + } + error_code_size += detail::to_unsigned(detail::count_digits(abs_value)); + auto it = buffer_appender(out); + if (message.size() <= inline_buffer_size - error_code_size) + format_to(it, FMT_STRING("{}{}"), message, SEP); + format_to(it, FMT_STRING("{}{}"), ERROR_STR, error_code); + FMT_ASSERT(out.size() <= inline_buffer_size, ""); +} + +FMT_FUNC void report_error(format_func func, int error_code, + const char* message) noexcept { + memory_buffer full_message; + func(full_message, error_code, message); + // Don't use fwrite_fully because the latter may throw. + if (std::fwrite(full_message.data(), full_message.size(), 1, stderr) > 0) + std::fputc('\n', stderr); +} + +// A wrapper around fwrite that throws on error. +inline void fwrite_fully(const void* ptr, size_t size, size_t count, + FILE* stream) { + size_t written = std::fwrite(ptr, size, count, stream); + if (written < count) FMT_THROW(system_error(errno, "cannot write to file")); +} + +#ifndef FMT_STATIC_THOUSANDS_SEPARATOR +template +locale_ref::locale_ref(const Locale& loc) : locale_(&loc) { + static_assert(std::is_same::value, ""); +} + +template Locale locale_ref::get() const { + static_assert(std::is_same::value, ""); + return locale_ ? *static_cast(locale_) : std::locale(); +} + +template +FMT_FUNC auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result { + auto& facet = std::use_facet>(loc.get()); + auto grouping = facet.grouping(); + auto thousands_sep = grouping.empty() ? Char() : facet.thousands_sep(); + return {std::move(grouping), thousands_sep}; +} +template FMT_FUNC Char decimal_point_impl(locale_ref loc) { + return std::use_facet>(loc.get()) + .decimal_point(); +} +#else +template +FMT_FUNC auto thousands_sep_impl(locale_ref) -> thousands_sep_result { + return {"\03", FMT_STATIC_THOUSANDS_SEPARATOR}; +} +template FMT_FUNC Char decimal_point_impl(locale_ref) { + return '.'; +} +#endif +} // namespace detail + +#if !FMT_MSC_VER +FMT_API FMT_FUNC format_error::~format_error() noexcept = default; +#endif + +FMT_FUNC std::system_error vsystem_error(int error_code, string_view format_str, + format_args args) { + auto ec = std::error_code(error_code, std::generic_category()); + return std::system_error(ec, vformat(format_str, args)); +} + +namespace detail { + +template struct basic_impl_data { + // Normalized 64-bit significands of pow(10, k), for k = -348, -340, ..., 340. + // These are generated by support/compute-powers.py. + static constexpr uint64_t pow10_significands[87] = { + 0xfa8fd5a0081c0288, 0xbaaee17fa23ebf76, 0x8b16fb203055ac76, + 0xcf42894a5dce35ea, 0x9a6bb0aa55653b2d, 0xe61acf033d1a45df, + 0xab70fe17c79ac6ca, 0xff77b1fcbebcdc4f, 0xbe5691ef416bd60c, + 0x8dd01fad907ffc3c, 0xd3515c2831559a83, 0x9d71ac8fada6c9b5, + 0xea9c227723ee8bcb, 0xaecc49914078536d, 0x823c12795db6ce57, + 0xc21094364dfb5637, 0x9096ea6f3848984f, 0xd77485cb25823ac7, + 0xa086cfcd97bf97f4, 0xef340a98172aace5, 0xb23867fb2a35b28e, + 0x84c8d4dfd2c63f3b, 0xc5dd44271ad3cdba, 0x936b9fcebb25c996, + 0xdbac6c247d62a584, 0xa3ab66580d5fdaf6, 0xf3e2f893dec3f126, + 0xb5b5ada8aaff80b8, 0x87625f056c7c4a8b, 0xc9bcff6034c13053, + 0x964e858c91ba2655, 0xdff9772470297ebd, 0xa6dfbd9fb8e5b88f, + 0xf8a95fcf88747d94, 0xb94470938fa89bcf, 0x8a08f0f8bf0f156b, + 0xcdb02555653131b6, 0x993fe2c6d07b7fac, 0xe45c10c42a2b3b06, + 0xaa242499697392d3, 0xfd87b5f28300ca0e, 0xbce5086492111aeb, + 0x8cbccc096f5088cc, 0xd1b71758e219652c, 0x9c40000000000000, + 0xe8d4a51000000000, 0xad78ebc5ac620000, 0x813f3978f8940984, + 0xc097ce7bc90715b3, 0x8f7e32ce7bea5c70, 0xd5d238a4abe98068, + 0x9f4f2726179a2245, 0xed63a231d4c4fb27, 0xb0de65388cc8ada8, + 0x83c7088e1aab65db, 0xc45d1df942711d9a, 0x924d692ca61be758, + 0xda01ee641a708dea, 0xa26da3999aef774a, 0xf209787bb47d6b85, + 0xb454e4a179dd1877, 0x865b86925b9bc5c2, 0xc83553c5c8965d3d, + 0x952ab45cfa97a0b3, 0xde469fbd99a05fe3, 0xa59bc234db398c25, + 0xf6c69a72a3989f5c, 0xb7dcbf5354e9bece, 0x88fcf317f22241e2, + 0xcc20ce9bd35c78a5, 0x98165af37b2153df, 0xe2a0b5dc971f303a, + 0xa8d9d1535ce3b396, 0xfb9b7cd9a4a7443c, 0xbb764c4ca7a44410, + 0x8bab8eefb6409c1a, 0xd01fef10a657842c, 0x9b10a4e5e9913129, + 0xe7109bfba19c0c9d, 0xac2820d9623bf429, 0x80444b5e7aa7cf85, + 0xbf21e44003acdd2d, 0x8e679c2f5e44ff8f, 0xd433179d9c8cb841, + 0x9e19db92b4e31ba9, 0xeb96bf6ebadf77d9, 0xaf87023b9bf0ee6b, + }; + +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wnarrowing" +#endif + // Binary exponents of pow(10, k), for k = -348, -340, ..., 340, corresponding + // to significands above. + static constexpr int16_t pow10_exponents[87] = { + -1220, -1193, -1166, -1140, -1113, -1087, -1060, -1034, -1007, -980, -954, + -927, -901, -874, -847, -821, -794, -768, -741, -715, -688, -661, + -635, -608, -582, -555, -529, -502, -475, -449, -422, -396, -369, + -343, -316, -289, -263, -236, -210, -183, -157, -130, -103, -77, + -50, -24, 3, 30, 56, 83, 109, 136, 162, 189, 216, + 242, 269, 295, 322, 348, 375, 402, 428, 455, 481, 508, + 534, 561, 588, 614, 641, 667, 694, 720, 747, 774, 800, + 827, 853, 880, 907, 933, 960, 986, 1013, 1039, 1066}; +#if FMT_GCC_VERSION && FMT_GCC_VERSION < 409 +# pragma GCC diagnostic pop +#endif + + static constexpr uint64_t power_of_10_64[20] = { + 1, FMT_POWERS_OF_10(1ULL), FMT_POWERS_OF_10(1000000000ULL), + 10000000000000000000ULL}; +}; + +// This is a struct rather than an alias to avoid shadowing warnings in gcc. +struct impl_data : basic_impl_data<> {}; + +#if __cplusplus < 201703L +template +constexpr uint64_t basic_impl_data::pow10_significands[]; +template constexpr int16_t basic_impl_data::pow10_exponents[]; +template constexpr uint64_t basic_impl_data::power_of_10_64[]; +#endif + +template struct bits { + static FMT_CONSTEXPR_DECL const int value = + static_cast(sizeof(T) * std::numeric_limits::digits); +}; + +// A floating-point number f * pow(2, e) where F is an unsigned type. +template struct basic_fp { + F f; + int e; + + static constexpr const int num_significand_bits = bits::value; + + constexpr basic_fp() : f(0), e(0) {} + constexpr basic_fp(uint64_t f_val, int e_val) : f(f_val), e(e_val) {} + + // Constructs fp from an IEEE754 floating-point number. + template FMT_CONSTEXPR basic_fp(Float n) { assign(n); } + + // Assigns n to this and return true iff predecessor is closer than successor. + template FMT_CONSTEXPR auto assign(Float n) -> bool { + static_assert((std::numeric_limits::is_iec559 && + std::numeric_limits::digits <= 113) || + is_float128::value, + "unsupported FP"); + // Assume Float is in the format [sign][exponent][significand]. + using carrier_uint = typename dragonbox::float_info::carrier_uint; + const auto num_float_significand_bits = + detail::num_significand_bits(); + const auto implicit_bit = carrier_uint(1) << num_float_significand_bits; + const auto significand_mask = implicit_bit - 1; + auto u = bit_cast(n); + f = static_cast(u & significand_mask); + auto biased_e = static_cast((u & exponent_mask()) >> + num_float_significand_bits); + // The predecessor is closer if n is a normalized power of 2 (f == 0) other + // than the smallest normalized number (biased_e > 1). + auto is_predecessor_closer = f == 0 && biased_e > 1; + if (biased_e == 0) + biased_e = 1; // Subnormals use biased exponent 1 (min exponent). + else if (has_implicit_bit()) + f += static_cast(implicit_bit); + e = biased_e - exponent_bias() - num_float_significand_bits; + if (!has_implicit_bit()) ++e; + return is_predecessor_closer; + } +}; + +using fp = basic_fp; + +// Normalizes the value converted from double and multiplied by (1 << SHIFT). +template +FMT_CONSTEXPR basic_fp normalize(basic_fp value) { + // Handle subnormals. + const auto implicit_bit = F(1) << num_significand_bits(); + const auto shifted_implicit_bit = implicit_bit << SHIFT; + while ((value.f & shifted_implicit_bit) == 0) { + value.f <<= 1; + --value.e; + } + // Subtract 1 to account for hidden bit. + const auto offset = + fp::num_significand_bits - num_significand_bits() - SHIFT - 1; + value.f <<= offset; + value.e -= offset; + return value; +} + +template inline bool operator==(basic_fp x, basic_fp y) { + return x.f == y.f && x.e == y.e; +} + +// Computes lhs * rhs / pow(2, 64) rounded to nearest with half-up tie breaking. +FMT_CONSTEXPR inline uint64_t multiply(uint64_t lhs, uint64_t rhs) { +#if FMT_USE_INT128 + auto product = static_cast<__uint128_t>(lhs) * rhs; + auto f = static_cast(product >> 64); + return (static_cast(product) & (1ULL << 63)) != 0 ? f + 1 : f; +#else + // Multiply 32-bit parts of significands. + uint64_t mask = (1ULL << 32) - 1; + uint64_t a = lhs >> 32, b = lhs & mask; + uint64_t c = rhs >> 32, d = rhs & mask; + uint64_t ac = a * c, bc = b * c, ad = a * d, bd = b * d; + // Compute mid 64-bit of result and round. + uint64_t mid = (bd >> 32) + (ad & mask) + (bc & mask) + (1U << 31); + return ac + (ad >> 32) + (bc >> 32) + (mid >> 32); +#endif +} + +FMT_CONSTEXPR inline fp operator*(fp x, fp y) { + return {multiply(x.f, y.f), x.e + y.e + 64}; +} + +// Returns a cached power of 10 `c_k = c_k.f * pow(2, c_k.e)` such that its +// (binary) exponent satisfies `min_exponent <= c_k.e <= min_exponent + 28`. +FMT_CONSTEXPR inline fp get_cached_power(int min_exponent, + int& pow10_exponent) { + const int shift = 32; + // log10(2) = 0x0.4d104d427de7fbcc... + const int64_t significand = 0x4d104d427de7fbcc; + int index = static_cast( + ((min_exponent + fp::num_significand_bits - 1) * (significand >> shift) + + ((int64_t(1) << shift) - 1)) // ceil + >> 32 // arithmetic shift + ); + // Decimal exponent of the first (smallest) cached power of 10. + const int first_dec_exp = -348; + // Difference between 2 consecutive decimal exponents in cached powers of 10. + const int dec_exp_step = 8; + index = (index - first_dec_exp - 1) / dec_exp_step + 1; + pow10_exponent = first_dec_exp + index * dec_exp_step; + return {impl_data::pow10_significands[index], + impl_data::pow10_exponents[index]}; +} + +class bigint { + private: + // A bigint is stored as an array of bigits (big digits), with bigit at index + // 0 being the least significant one. + using bigit = uint32_t; + using double_bigit = uint64_t; + enum { bigits_capacity = 32 }; + basic_memory_buffer bigits_; + int exp_; + + FMT_CONSTEXPR20 bigit operator[](int index) const { + return bigits_[to_unsigned(index)]; + } + FMT_CONSTEXPR20 bigit& operator[](int index) { + return bigits_[to_unsigned(index)]; + } + + static FMT_CONSTEXPR_DECL const int bigit_bits = bits::value; + + friend struct formatter; + + FMT_CONSTEXPR20 void subtract_bigits(int index, bigit other, bigit& borrow) { + auto result = static_cast((*this)[index]) - other - borrow; + (*this)[index] = static_cast(result); + borrow = static_cast(result >> (bigit_bits * 2 - 1)); + } + + FMT_CONSTEXPR20 void remove_leading_zeros() { + int num_bigits = static_cast(bigits_.size()) - 1; + while (num_bigits > 0 && (*this)[num_bigits] == 0) --num_bigits; + bigits_.resize(to_unsigned(num_bigits + 1)); + } + + // Computes *this -= other assuming aligned bigints and *this >= other. + FMT_CONSTEXPR20 void subtract_aligned(const bigint& other) { + FMT_ASSERT(other.exp_ >= exp_, "unaligned bigints"); + FMT_ASSERT(compare(*this, other) >= 0, ""); + bigit borrow = 0; + int i = other.exp_ - exp_; + for (size_t j = 0, n = other.bigits_.size(); j != n; ++i, ++j) + subtract_bigits(i, other.bigits_[j], borrow); + while (borrow > 0) subtract_bigits(i, 0, borrow); + remove_leading_zeros(); + } + + FMT_CONSTEXPR20 void multiply(uint32_t value) { + const double_bigit wide_value = value; + bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + double_bigit result = bigits_[i] * wide_value + carry; + bigits_[i] = static_cast(result); + carry = static_cast(result >> bigit_bits); + } + if (carry != 0) bigits_.push_back(carry); + } + + template ::value || + std::is_same::value)> + FMT_CONSTEXPR20 void multiply(UInt value) { + using half_uint = + conditional_t::value, uint64_t, uint32_t>; + const int shift = num_bits() - bigit_bits; + const UInt lower = static_cast(value); + const UInt upper = value >> num_bits(); + UInt carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + UInt result = lower * bigits_[i] + static_cast(carry); + carry = (upper * bigits_[i] << shift) + (result >> bigit_bits) + + (carry >> bigit_bits); + bigits_[i] = static_cast(result); + } + while (carry != 0) { + bigits_.push_back(static_cast(carry)); + carry >>= bigit_bits; + } + } + + template ::value || + std::is_same::value)> + FMT_CONSTEXPR20 void assign(UInt n) { + size_t num_bigits = 0; + do { + bigits_[num_bigits++] = static_cast(n); + n >>= bigit_bits; + } while (n != 0); + bigits_.resize(num_bigits); + exp_ = 0; + } + + public: + FMT_CONSTEXPR20 bigint() : exp_(0) {} + explicit bigint(uint64_t n) { assign(n); } + + bigint(const bigint&) = delete; + void operator=(const bigint&) = delete; + + FMT_CONSTEXPR20 void assign(const bigint& other) { + auto size = other.bigits_.size(); + bigits_.resize(size); + auto data = other.bigits_.data(); + std::copy(data, data + size, make_checked(bigits_.data(), size)); + exp_ = other.exp_; + } + + template FMT_CONSTEXPR20 void operator=(Int n) { + FMT_ASSERT(n > 0, ""); + assign(uint64_or_128_t(n)); + } + + FMT_CONSTEXPR20 int num_bigits() const { + return static_cast(bigits_.size()) + exp_; + } + + FMT_NOINLINE FMT_CONSTEXPR20 bigint& operator<<=(int shift) { + FMT_ASSERT(shift >= 0, ""); + exp_ += shift / bigit_bits; + shift %= bigit_bits; + if (shift == 0) return *this; + bigit carry = 0; + for (size_t i = 0, n = bigits_.size(); i < n; ++i) { + bigit c = bigits_[i] >> (bigit_bits - shift); + bigits_[i] = (bigits_[i] << shift) + carry; + carry = c; + } + if (carry != 0) bigits_.push_back(carry); + return *this; + } + + template FMT_CONSTEXPR20 bigint& operator*=(Int value) { + FMT_ASSERT(value > 0, ""); + multiply(uint32_or_64_or_128_t(value)); + return *this; + } + + friend FMT_CONSTEXPR20 int compare(const bigint& lhs, const bigint& rhs) { + int num_lhs_bigits = lhs.num_bigits(), num_rhs_bigits = rhs.num_bigits(); + if (num_lhs_bigits != num_rhs_bigits) + return num_lhs_bigits > num_rhs_bigits ? 1 : -1; + int i = static_cast(lhs.bigits_.size()) - 1; + int j = static_cast(rhs.bigits_.size()) - 1; + int end = i - j; + if (end < 0) end = 0; + for (; i >= end; --i, --j) { + bigit lhs_bigit = lhs[i], rhs_bigit = rhs[j]; + if (lhs_bigit != rhs_bigit) return lhs_bigit > rhs_bigit ? 1 : -1; + } + if (i != j) return i > j ? 1 : -1; + return 0; + } + + // Returns compare(lhs1 + lhs2, rhs). + friend FMT_CONSTEXPR20 int add_compare(const bigint& lhs1, const bigint& lhs2, + const bigint& rhs) { + int max_lhs_bigits = (std::max)(lhs1.num_bigits(), lhs2.num_bigits()); + int num_rhs_bigits = rhs.num_bigits(); + if (max_lhs_bigits + 1 < num_rhs_bigits) return -1; + if (max_lhs_bigits > num_rhs_bigits) return 1; + auto get_bigit = [](const bigint& n, int i) -> bigit { + return i >= n.exp_ && i < n.num_bigits() ? n[i - n.exp_] : 0; + }; + double_bigit borrow = 0; + int min_exp = (std::min)((std::min)(lhs1.exp_, lhs2.exp_), rhs.exp_); + for (int i = num_rhs_bigits - 1; i >= min_exp; --i) { + double_bigit sum = + static_cast(get_bigit(lhs1, i)) + get_bigit(lhs2, i); + bigit rhs_bigit = get_bigit(rhs, i); + if (sum > rhs_bigit + borrow) return 1; + borrow = rhs_bigit + borrow - sum; + if (borrow > 1) return -1; + borrow <<= bigit_bits; + } + return borrow != 0 ? -1 : 0; + } + + // Assigns pow(10, exp) to this bigint. + FMT_CONSTEXPR20 void assign_pow10(int exp) { + FMT_ASSERT(exp >= 0, ""); + if (exp == 0) return *this = 1; + // Find the top bit. + int bitmask = 1; + while (exp >= bitmask) bitmask <<= 1; + bitmask >>= 1; + // pow(10, exp) = pow(5, exp) * pow(2, exp). First compute pow(5, exp) by + // repeated squaring and multiplication. + *this = 5; + bitmask >>= 1; + while (bitmask != 0) { + square(); + if ((exp & bitmask) != 0) *this *= 5; + bitmask >>= 1; + } + *this <<= exp; // Multiply by pow(2, exp) by shifting. + } + + FMT_CONSTEXPR20 void square() { + int num_bigits = static_cast(bigits_.size()); + int num_result_bigits = 2 * num_bigits; + basic_memory_buffer n(std::move(bigits_)); + bigits_.resize(to_unsigned(num_result_bigits)); + auto sum = uint128_t(); + for (int bigit_index = 0; bigit_index < num_bigits; ++bigit_index) { + // Compute bigit at position bigit_index of the result by adding + // cross-product terms n[i] * n[j] such that i + j == bigit_index. + for (int i = 0, j = bigit_index; j >= 0; ++i, --j) { + // Most terms are multiplied twice which can be optimized in the future. + sum += static_cast(n[i]) * n[j]; + } + (*this)[bigit_index] = static_cast(sum); + sum >>= bits::value; // Compute the carry. + } + // Do the same for the top half. + for (int bigit_index = num_bigits; bigit_index < num_result_bigits; + ++bigit_index) { + for (int j = num_bigits - 1, i = bigit_index - j; i < num_bigits;) + sum += static_cast(n[i++]) * n[j--]; + (*this)[bigit_index] = static_cast(sum); + sum >>= bits::value; + } + remove_leading_zeros(); + exp_ *= 2; + } + + // If this bigint has a bigger exponent than other, adds trailing zero to make + // exponents equal. This simplifies some operations such as subtraction. + FMT_CONSTEXPR20 void align(const bigint& other) { + int exp_difference = exp_ - other.exp_; + if (exp_difference <= 0) return; + int num_bigits = static_cast(bigits_.size()); + bigits_.resize(to_unsigned(num_bigits + exp_difference)); + for (int i = num_bigits - 1, j = i + exp_difference; i >= 0; --i, --j) + bigits_[j] = bigits_[i]; + std::uninitialized_fill_n(bigits_.data(), exp_difference, 0); + exp_ -= exp_difference; + } + + // Divides this bignum by divisor, assigning the remainder to this and + // returning the quotient. + FMT_CONSTEXPR20 int divmod_assign(const bigint& divisor) { + FMT_ASSERT(this != &divisor, ""); + if (compare(*this, divisor) < 0) return 0; + FMT_ASSERT(divisor.bigits_[divisor.bigits_.size() - 1u] != 0, ""); + align(divisor); + int quotient = 0; + do { + subtract_aligned(divisor); + ++quotient; + } while (compare(*this, divisor) >= 0); + return quotient; + } +}; + +enum class round_direction { unknown, up, down }; + +// Given the divisor (normally a power of 10), the remainder = v % divisor for +// some number v and the error, returns whether v should be rounded up, down, or +// whether the rounding direction can't be determined due to error. +// error should be less than divisor / 2. +FMT_CONSTEXPR inline round_direction get_round_direction(uint64_t divisor, + uint64_t remainder, + uint64_t error) { + FMT_ASSERT(remainder < divisor, ""); // divisor - remainder won't overflow. + FMT_ASSERT(error < divisor, ""); // divisor - error won't overflow. + FMT_ASSERT(error < divisor - error, ""); // error * 2 won't overflow. + // Round down if (remainder + error) * 2 <= divisor. + if (remainder <= divisor - remainder && error * 2 <= divisor - remainder * 2) + return round_direction::down; + // Round up if (remainder - error) * 2 >= divisor. + if (remainder >= error && + remainder - error >= divisor - (remainder - error)) { + return round_direction::up; + } + return round_direction::unknown; +} + +namespace digits { +enum result { + more, // Generate more digits. + done, // Done generating digits. + error // Digit generation cancelled due to an error. +}; +} + +struct gen_digits_handler { + char* buf; + int size; + int precision; + int exp10; + bool fixed; + + FMT_CONSTEXPR digits::result on_digit(char digit, uint64_t divisor, + uint64_t remainder, uint64_t error, + bool integral) { + FMT_ASSERT(remainder < divisor, ""); + buf[size++] = digit; + if (!integral && error >= remainder) return digits::error; + if (size < precision) return digits::more; + if (!integral) { + // Check if error * 2 < divisor with overflow prevention. + // The check is not needed for the integral part because error = 1 + // and divisor > (1 << 32) there. + if (error >= divisor || error >= divisor - error) return digits::error; + } else { + FMT_ASSERT(error == 1 && divisor > 2, ""); + } + auto dir = get_round_direction(divisor, remainder, error); + if (dir != round_direction::up) + return dir == round_direction::down ? digits::done : digits::error; + ++buf[size - 1]; + for (int i = size - 1; i > 0 && buf[i] > '9'; --i) { + buf[i] = '0'; + ++buf[i - 1]; + } + if (buf[0] > '9') { + buf[0] = '1'; + if (fixed) + buf[size++] = '0'; + else + ++exp10; + } + return digits::done; + } +}; + +inline FMT_CONSTEXPR20 void adjust_precision(int& precision, int exp10) { + // Adjust fixed precision by exponent because it is relative to decimal + // point. + if (exp10 > 0 && precision > max_value() - exp10) + FMT_THROW(format_error("number is too big")); + precision += exp10; +} + +// Generates output using the Grisu digit-gen algorithm. +// error: the size of the region (lower, upper) outside of which numbers +// definitely do not round to value (Delta in Grisu3). +FMT_INLINE FMT_CONSTEXPR20 digits::result grisu_gen_digits( + fp value, uint64_t error, int& exp, gen_digits_handler& handler) { + const fp one(1ULL << -value.e, value.e); + // The integral part of scaled value (p1 in Grisu) = value / one. It cannot be + // zero because it contains a product of two 64-bit numbers with MSB set (due + // to normalization) - 1, shifted right by at most 60 bits. + auto integral = static_cast(value.f >> -one.e); + FMT_ASSERT(integral != 0, ""); + FMT_ASSERT(integral == value.f >> -one.e, ""); + // The fractional part of scaled value (p2 in Grisu) c = value % one. + uint64_t fractional = value.f & (one.f - 1); + exp = count_digits(integral); // kappa in Grisu. + // Non-fixed formats require at least one digit and no precision adjustment. + if (handler.fixed) { + adjust_precision(handler.precision, exp + handler.exp10); + // Check if precision is satisfied just by leading zeros, e.g. + // format("{:.2f}", 0.001) gives "0.00" without generating any digits. + if (handler.precision <= 0) { + if (handler.precision < 0) return digits::done; + // Divide by 10 to prevent overflow. + uint64_t divisor = impl_data::power_of_10_64[exp - 1] << -one.e; + auto dir = get_round_direction(divisor, value.f / 10, error * 10); + if (dir == round_direction::unknown) return digits::error; + handler.buf[handler.size++] = dir == round_direction::up ? '1' : '0'; + return digits::done; + } + } + // Generate digits for the integral part. This can produce up to 10 digits. + do { + uint32_t digit = 0; + auto divmod_integral = [&](uint32_t divisor) { + digit = integral / divisor; + integral %= divisor; + }; + // This optimization by Milo Yip reduces the number of integer divisions by + // one per iteration. + switch (exp) { + case 10: + divmod_integral(1000000000); + break; + case 9: + divmod_integral(100000000); + break; + case 8: + divmod_integral(10000000); + break; + case 7: + divmod_integral(1000000); + break; + case 6: + divmod_integral(100000); + break; + case 5: + divmod_integral(10000); + break; + case 4: + divmod_integral(1000); + break; + case 3: + divmod_integral(100); + break; + case 2: + divmod_integral(10); + break; + case 1: + digit = integral; + integral = 0; + break; + default: + FMT_ASSERT(false, "invalid number of digits"); + } + --exp; + auto remainder = (static_cast(integral) << -one.e) + fractional; + auto result = handler.on_digit(static_cast('0' + digit), + impl_data::power_of_10_64[exp] << -one.e, + remainder, error, true); + if (result != digits::more) return result; + } while (exp > 0); + // Generate digits for the fractional part. + for (;;) { + fractional *= 10; + error *= 10; + char digit = static_cast('0' + (fractional >> -one.e)); + fractional &= one.f - 1; + --exp; + auto result = handler.on_digit(digit, one.f, fractional, error, false); + if (result != digits::more) return result; + } +} + +inline FMT_CONSTEXPR20 uint128_fallback& uint128_fallback::operator+=( + uint64_t n) noexcept { + if (is_constant_evaluated()) { + lo_ += n; + hi_ += (lo_ < n ? 1 : 0); + return *this; + } +#if FMT_HAS_BUILTIN(__builtin_addcll) + unsigned long long carry; + lo_ = __builtin_addcll(lo_, n, 0, &carry); + hi_ += carry; +#elif FMT_HAS_BUILTIN(__builtin_ia32_addcarryx_u64) + unsigned long long result; + auto carry = __builtin_ia32_addcarryx_u64(0, lo_, n, &result); + lo_ = result; + hi_ += carry; +#elif defined(_MSC_VER) && defined(_M_X64) + auto carry = _addcarry_u64(0, lo_, n, &lo_); + _addcarry_u64(carry, hi_, 0, &hi_); +#else + lo_ += n; + hi_ += (lo_ < n ? 1 : 0); +#endif + return *this; +} + +// Compilers should be able to optimize this into the ror instruction. +FMT_CONSTEXPR inline uint32_t rotr(uint32_t n, uint32_t r) noexcept { + r &= 31; + return (n >> r) | (n << (32 - r)); +} +FMT_CONSTEXPR inline uint64_t rotr(uint64_t n, uint32_t r) noexcept { + r &= 63; + return (n >> r) | (n << (64 - r)); +} + +// Computes 128-bit result of multiplication of two 64-bit unsigned integers. +inline uint128_fallback umul128(uint64_t x, uint64_t y) noexcept { +#if FMT_USE_INT128 + auto p = static_cast(x) * static_cast(y); + return {static_cast(p >> 64), static_cast(p)}; +#elif defined(_MSC_VER) && defined(_M_X64) + auto result = uint128_fallback(); + result.lo_ = _umul128(x, y, &result.hi_); + return result; +#else + const uint64_t mask = static_cast(max_value()); + + uint64_t a = x >> 32; + uint64_t b = x & mask; + uint64_t c = y >> 32; + uint64_t d = y & mask; + + uint64_t ac = a * c; + uint64_t bc = b * c; + uint64_t ad = a * d; + uint64_t bd = b * d; + + uint64_t intermediate = (bd >> 32) + (ad & mask) + (bc & mask); + + return {ac + (intermediate >> 32) + (ad >> 32) + (bc >> 32), + (intermediate << 32) + (bd & mask)}; +#endif +} + +// Implementation of Dragonbox algorithm: https://github.com/jk-jeon/dragonbox. +namespace dragonbox { +// Computes upper 64 bits of multiplication of two 64-bit unsigned integers. +inline uint64_t umul128_upper64(uint64_t x, uint64_t y) noexcept { +#if FMT_USE_INT128 + auto p = static_cast(x) * static_cast(y); + return static_cast(p >> 64); +#elif defined(_MSC_VER) && defined(_M_X64) + return __umulh(x, y); +#else + return umul128(x, y).high(); +#endif +} + +// Computes upper 128 bits of multiplication of a 64-bit unsigned integer and a +// 128-bit unsigned integer. +inline uint128_fallback umul192_upper128(uint64_t x, + uint128_fallback y) noexcept { + uint128_fallback r = umul128(x, y.high()); + r += umul128_upper64(x, y.low()); + return r; +} + +// Computes upper 64 bits of multiplication of a 32-bit unsigned integer and a +// 64-bit unsigned integer. +inline uint64_t umul96_upper64(uint32_t x, uint64_t y) noexcept { + return umul128_upper64(static_cast(x) << 32, y); +} + +// Computes lower 128 bits of multiplication of a 64-bit unsigned integer and a +// 128-bit unsigned integer. +inline uint128_fallback umul192_lower128(uint64_t x, + uint128_fallback y) noexcept { + uint64_t high = x * y.high(); + uint128_fallback high_low = umul128(x, y.low()); + return {high + high_low.high(), high_low.low()}; +} + +// Computes lower 64 bits of multiplication of a 32-bit unsigned integer and a +// 64-bit unsigned integer. +inline uint64_t umul96_lower64(uint32_t x, uint64_t y) noexcept { + return x * y; +} + +// Computes floor(log10(pow(2, e))) for e in [-2620, 2620] using the method from +// https://fmt.dev/papers/Dragonbox.pdf#page=28, section 6.1. +inline int floor_log10_pow2(int e) noexcept { + FMT_ASSERT(e <= 2620 && e >= -2620, "too large exponent"); + static_assert((-1 >> 1) == -1, "right shift is not arithmetic"); + return (e * 315653) >> 20; +} + +// Various fast log computations. +inline int floor_log2_pow10(int e) noexcept { + FMT_ASSERT(e <= 1233 && e >= -1233, "too large exponent"); + return (e * 1741647) >> 19; +} +inline int floor_log10_pow2_minus_log10_4_over_3(int e) noexcept { + FMT_ASSERT(e <= 2936 && e >= -2985, "too large exponent"); + return (e * 631305 - 261663) >> 21; +} + +static constexpr struct { + uint32_t divisor; + int shift_amount; +} div_small_pow10_infos[] = {{10, 16}, {100, 16}}; + +// Replaces n by floor(n / pow(10, N)) returning true if and only if n is +// divisible by pow(10, N). +// Precondition: n <= pow(10, N + 1). +template +bool check_divisibility_and_divide_by_pow10(uint32_t& n) noexcept { + // The numbers below are chosen such that: + // 1. floor(n/d) = floor(nm / 2^k) where d=10 or d=100, + // 2. nm mod 2^k < m if and only if n is divisible by d, + // where m is magic_number, k is shift_amount + // and d is divisor. + // + // Item 1 is a common technique of replacing division by a constant with + // multiplication, see e.g. "Division by Invariant Integers Using + // Multiplication" by Granlund and Montgomery (1994). magic_number (m) is set + // to ceil(2^k/d) for large enough k. + // The idea for item 2 originates from Schubfach. + constexpr auto info = div_small_pow10_infos[N - 1]; + FMT_ASSERT(n <= info.divisor * 10, "n is too large"); + constexpr uint32_t magic_number = + (1u << info.shift_amount) / info.divisor + 1; + n *= magic_number; + const uint32_t comparison_mask = (1u << info.shift_amount) - 1; + bool result = (n & comparison_mask) < magic_number; + n >>= info.shift_amount; + return result; +} + +// Computes floor(n / pow(10, N)) for small n and N. +// Precondition: n <= pow(10, N + 1). +template uint32_t small_division_by_pow10(uint32_t n) noexcept { + constexpr auto info = div_small_pow10_infos[N - 1]; + FMT_ASSERT(n <= info.divisor * 10, "n is too large"); + constexpr uint32_t magic_number = + (1u << info.shift_amount) / info.divisor + 1; + return (n * magic_number) >> info.shift_amount; +} + +// Computes floor(n / 10^(kappa + 1)) (float) +inline uint32_t divide_by_10_to_kappa_plus_1(uint32_t n) noexcept { + // 1374389535 = ceil(2^37/100) + return static_cast((static_cast(n) * 1374389535) >> 37); +} +// Computes floor(n / 10^(kappa + 1)) (double) +inline uint64_t divide_by_10_to_kappa_plus_1(uint64_t n) noexcept { + // 2361183241434822607 = ceil(2^(64+7)/1000) + return umul128_upper64(n, 2361183241434822607ull) >> 7; +} + +// Various subroutines using pow10 cache +template struct cache_accessor; + +template <> struct cache_accessor { + using carrier_uint = float_info::carrier_uint; + using cache_entry_type = uint64_t; + + static uint64_t get_cached_power(int k) noexcept { + FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, + "k is out of range"); + static constexpr const uint64_t pow10_significands[] = { + 0x81ceb32c4b43fcf5, 0xa2425ff75e14fc32, 0xcad2f7f5359a3b3f, + 0xfd87b5f28300ca0e, 0x9e74d1b791e07e49, 0xc612062576589ddb, + 0xf79687aed3eec552, 0x9abe14cd44753b53, 0xc16d9a0095928a28, + 0xf1c90080baf72cb2, 0x971da05074da7bef, 0xbce5086492111aeb, + 0xec1e4a7db69561a6, 0x9392ee8e921d5d08, 0xb877aa3236a4b44a, + 0xe69594bec44de15c, 0x901d7cf73ab0acda, 0xb424dc35095cd810, + 0xe12e13424bb40e14, 0x8cbccc096f5088cc, 0xafebff0bcb24aaff, + 0xdbe6fecebdedd5bf, 0x89705f4136b4a598, 0xabcc77118461cefd, + 0xd6bf94d5e57a42bd, 0x8637bd05af6c69b6, 0xa7c5ac471b478424, + 0xd1b71758e219652c, 0x83126e978d4fdf3c, 0xa3d70a3d70a3d70b, + 0xcccccccccccccccd, 0x8000000000000000, 0xa000000000000000, + 0xc800000000000000, 0xfa00000000000000, 0x9c40000000000000, + 0xc350000000000000, 0xf424000000000000, 0x9896800000000000, + 0xbebc200000000000, 0xee6b280000000000, 0x9502f90000000000, + 0xba43b74000000000, 0xe8d4a51000000000, 0x9184e72a00000000, + 0xb5e620f480000000, 0xe35fa931a0000000, 0x8e1bc9bf04000000, + 0xb1a2bc2ec5000000, 0xde0b6b3a76400000, 0x8ac7230489e80000, + 0xad78ebc5ac620000, 0xd8d726b7177a8000, 0x878678326eac9000, + 0xa968163f0a57b400, 0xd3c21bcecceda100, 0x84595161401484a0, + 0xa56fa5b99019a5c8, 0xcecb8f27f4200f3a, 0x813f3978f8940985, + 0xa18f07d736b90be6, 0xc9f2c9cd04674edf, 0xfc6f7c4045812297, + 0x9dc5ada82b70b59e, 0xc5371912364ce306, 0xf684df56c3e01bc7, + 0x9a130b963a6c115d, 0xc097ce7bc90715b4, 0xf0bdc21abb48db21, + 0x96769950b50d88f5, 0xbc143fa4e250eb32, 0xeb194f8e1ae525fe, + 0x92efd1b8d0cf37bf, 0xb7abc627050305ae, 0xe596b7b0c643c71a, + 0x8f7e32ce7bea5c70, 0xb35dbf821ae4f38c, 0xe0352f62a19e306f}; + return pow10_significands[k - float_info::min_k]; + } + + struct compute_mul_result { + carrier_uint result; + bool is_integer; + }; + struct compute_mul_parity_result { + bool parity; + bool is_integer; + }; + + static compute_mul_result compute_mul( + carrier_uint u, const cache_entry_type& cache) noexcept { + auto r = umul96_upper64(u, cache); + return {static_cast(r >> 32), + static_cast(r) == 0}; + } + + static uint32_t compute_delta(const cache_entry_type& cache, + int beta) noexcept { + return static_cast(cache >> (64 - 1 - beta)); + } + + static compute_mul_parity_result compute_mul_parity( + carrier_uint two_f, const cache_entry_type& cache, int beta) noexcept { + FMT_ASSERT(beta >= 1, ""); + FMT_ASSERT(beta < 64, ""); + + auto r = umul96_lower64(two_f, cache); + return {((r >> (64 - beta)) & 1) != 0, + static_cast(r >> (32 - beta)) == 0}; + } + + static carrier_uint compute_left_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta) noexcept { + return static_cast( + (cache - (cache >> (num_significand_bits() + 2))) >> + (64 - num_significand_bits() - 1 - beta)); + } + + static carrier_uint compute_right_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta) noexcept { + return static_cast( + (cache + (cache >> (num_significand_bits() + 1))) >> + (64 - num_significand_bits() - 1 - beta)); + } + + static carrier_uint compute_round_up_for_shorter_interval_case( + const cache_entry_type& cache, int beta) noexcept { + return (static_cast( + cache >> (64 - num_significand_bits() - 2 - beta)) + + 1) / + 2; + } +}; + +template <> struct cache_accessor { + using carrier_uint = float_info::carrier_uint; + using cache_entry_type = uint128_fallback; + + static uint128_fallback get_cached_power(int k) noexcept { + FMT_ASSERT(k >= float_info::min_k && k <= float_info::max_k, + "k is out of range"); + + static constexpr const uint128_fallback pow10_significands[] = { +#if FMT_USE_FULL_CACHE_DRAGONBOX + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, + {0x9faacf3df73609b1, 0x77b191618c54e9ad}, + {0xc795830d75038c1d, 0xd59df5b9ef6a2418}, + {0xf97ae3d0d2446f25, 0x4b0573286b44ad1e}, + {0x9becce62836ac577, 0x4ee367f9430aec33}, + {0xc2e801fb244576d5, 0x229c41f793cda740}, + {0xf3a20279ed56d48a, 0x6b43527578c11110}, + {0x9845418c345644d6, 0x830a13896b78aaaa}, + {0xbe5691ef416bd60c, 0x23cc986bc656d554}, + {0xedec366b11c6cb8f, 0x2cbfbe86b7ec8aa9}, + {0x94b3a202eb1c3f39, 0x7bf7d71432f3d6aa}, + {0xb9e08a83a5e34f07, 0xdaf5ccd93fb0cc54}, + {0xe858ad248f5c22c9, 0xd1b3400f8f9cff69}, + {0x91376c36d99995be, 0x23100809b9c21fa2}, + {0xb58547448ffffb2d, 0xabd40a0c2832a78b}, + {0xe2e69915b3fff9f9, 0x16c90c8f323f516d}, + {0x8dd01fad907ffc3b, 0xae3da7d97f6792e4}, + {0xb1442798f49ffb4a, 0x99cd11cfdf41779d}, + {0xdd95317f31c7fa1d, 0x40405643d711d584}, + {0x8a7d3eef7f1cfc52, 0x482835ea666b2573}, + {0xad1c8eab5ee43b66, 0xda3243650005eed0}, + {0xd863b256369d4a40, 0x90bed43e40076a83}, + {0x873e4f75e2224e68, 0x5a7744a6e804a292}, + {0xa90de3535aaae202, 0x711515d0a205cb37}, + {0xd3515c2831559a83, 0x0d5a5b44ca873e04}, + {0x8412d9991ed58091, 0xe858790afe9486c3}, + {0xa5178fff668ae0b6, 0x626e974dbe39a873}, + {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, + {0x80fa687f881c7f8e, 0x7ce66634bc9d0b9a}, + {0xa139029f6a239f72, 0x1c1fffc1ebc44e81}, + {0xc987434744ac874e, 0xa327ffb266b56221}, + {0xfbe9141915d7a922, 0x4bf1ff9f0062baa9}, + {0x9d71ac8fada6c9b5, 0x6f773fc3603db4aa}, + {0xc4ce17b399107c22, 0xcb550fb4384d21d4}, + {0xf6019da07f549b2b, 0x7e2a53a146606a49}, + {0x99c102844f94e0fb, 0x2eda7444cbfc426e}, + {0xc0314325637a1939, 0xfa911155fefb5309}, + {0xf03d93eebc589f88, 0x793555ab7eba27cb}, + {0x96267c7535b763b5, 0x4bc1558b2f3458df}, + {0xbbb01b9283253ca2, 0x9eb1aaedfb016f17}, + {0xea9c227723ee8bcb, 0x465e15a979c1cadd}, + {0x92a1958a7675175f, 0x0bfacd89ec191eca}, + {0xb749faed14125d36, 0xcef980ec671f667c}, + {0xe51c79a85916f484, 0x82b7e12780e7401b}, + {0x8f31cc0937ae58d2, 0xd1b2ecb8b0908811}, + {0xb2fe3f0b8599ef07, 0x861fa7e6dcb4aa16}, + {0xdfbdcece67006ac9, 0x67a791e093e1d49b}, + {0x8bd6a141006042bd, 0xe0c8bb2c5c6d24e1}, + {0xaecc49914078536d, 0x58fae9f773886e19}, + {0xda7f5bf590966848, 0xaf39a475506a899f}, + {0x888f99797a5e012d, 0x6d8406c952429604}, + {0xaab37fd7d8f58178, 0xc8e5087ba6d33b84}, + {0xd5605fcdcf32e1d6, 0xfb1e4a9a90880a65}, + {0x855c3be0a17fcd26, 0x5cf2eea09a550680}, + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, + {0xd0601d8efc57b08b, 0xf13b94daf124da27}, + {0x823c12795db6ce57, 0x76c53d08d6b70859}, + {0xa2cb1717b52481ed, 0x54768c4b0c64ca6f}, + {0xcb7ddcdda26da268, 0xa9942f5dcf7dfd0a}, + {0xfe5d54150b090b02, 0xd3f93b35435d7c4d}, + {0x9efa548d26e5a6e1, 0xc47bc5014a1a6db0}, + {0xc6b8e9b0709f109a, 0x359ab6419ca1091c}, + {0xf867241c8cc6d4c0, 0xc30163d203c94b63}, + {0x9b407691d7fc44f8, 0x79e0de63425dcf1e}, + {0xc21094364dfb5636, 0x985915fc12f542e5}, + {0xf294b943e17a2bc4, 0x3e6f5b7b17b2939e}, + {0x979cf3ca6cec5b5a, 0xa705992ceecf9c43}, + {0xbd8430bd08277231, 0x50c6ff782a838354}, + {0xece53cec4a314ebd, 0xa4f8bf5635246429}, + {0x940f4613ae5ed136, 0x871b7795e136be9a}, + {0xb913179899f68584, 0x28e2557b59846e40}, + {0xe757dd7ec07426e5, 0x331aeada2fe589d0}, + {0x9096ea6f3848984f, 0x3ff0d2c85def7622}, + {0xb4bca50b065abe63, 0x0fed077a756b53aa}, + {0xe1ebce4dc7f16dfb, 0xd3e8495912c62895}, + {0x8d3360f09cf6e4bd, 0x64712dd7abbbd95d}, + {0xb080392cc4349dec, 0xbd8d794d96aacfb4}, + {0xdca04777f541c567, 0xecf0d7a0fc5583a1}, + {0x89e42caaf9491b60, 0xf41686c49db57245}, + {0xac5d37d5b79b6239, 0x311c2875c522ced6}, + {0xd77485cb25823ac7, 0x7d633293366b828c}, + {0x86a8d39ef77164bc, 0xae5dff9c02033198}, + {0xa8530886b54dbdeb, 0xd9f57f830283fdfd}, + {0xd267caa862a12d66, 0xd072df63c324fd7c}, + {0x8380dea93da4bc60, 0x4247cb9e59f71e6e}, + {0xa46116538d0deb78, 0x52d9be85f074e609}, + {0xcd795be870516656, 0x67902e276c921f8c}, + {0x806bd9714632dff6, 0x00ba1cd8a3db53b7}, + {0xa086cfcd97bf97f3, 0x80e8a40eccd228a5}, + {0xc8a883c0fdaf7df0, 0x6122cd128006b2ce}, + {0xfad2a4b13d1b5d6c, 0x796b805720085f82}, + {0x9cc3a6eec6311a63, 0xcbe3303674053bb1}, + {0xc3f490aa77bd60fc, 0xbedbfc4411068a9d}, + {0xf4f1b4d515acb93b, 0xee92fb5515482d45}, + {0x991711052d8bf3c5, 0x751bdd152d4d1c4b}, + {0xbf5cd54678eef0b6, 0xd262d45a78a0635e}, + {0xef340a98172aace4, 0x86fb897116c87c35}, + {0x9580869f0e7aac0e, 0xd45d35e6ae3d4da1}, + {0xbae0a846d2195712, 0x8974836059cca10a}, + {0xe998d258869facd7, 0x2bd1a438703fc94c}, + {0x91ff83775423cc06, 0x7b6306a34627ddd0}, + {0xb67f6455292cbf08, 0x1a3bc84c17b1d543}, + {0xe41f3d6a7377eeca, 0x20caba5f1d9e4a94}, + {0x8e938662882af53e, 0x547eb47b7282ee9d}, + {0xb23867fb2a35b28d, 0xe99e619a4f23aa44}, + {0xdec681f9f4c31f31, 0x6405fa00e2ec94d5}, + {0x8b3c113c38f9f37e, 0xde83bc408dd3dd05}, + {0xae0b158b4738705e, 0x9624ab50b148d446}, + {0xd98ddaee19068c76, 0x3badd624dd9b0958}, + {0x87f8a8d4cfa417c9, 0xe54ca5d70a80e5d7}, + {0xa9f6d30a038d1dbc, 0x5e9fcf4ccd211f4d}, + {0xd47487cc8470652b, 0x7647c32000696720}, + {0x84c8d4dfd2c63f3b, 0x29ecd9f40041e074}, + {0xa5fb0a17c777cf09, 0xf468107100525891}, + {0xcf79cc9db955c2cc, 0x7182148d4066eeb5}, + {0x81ac1fe293d599bf, 0xc6f14cd848405531}, + {0xa21727db38cb002f, 0xb8ada00e5a506a7d}, + {0xca9cf1d206fdc03b, 0xa6d90811f0e4851d}, + {0xfd442e4688bd304a, 0x908f4a166d1da664}, + {0x9e4a9cec15763e2e, 0x9a598e4e043287ff}, + {0xc5dd44271ad3cdba, 0x40eff1e1853f29fe}, + {0xf7549530e188c128, 0xd12bee59e68ef47d}, + {0x9a94dd3e8cf578b9, 0x82bb74f8301958cf}, + {0xc13a148e3032d6e7, 0xe36a52363c1faf02}, + {0xf18899b1bc3f8ca1, 0xdc44e6c3cb279ac2}, + {0x96f5600f15a7b7e5, 0x29ab103a5ef8c0ba}, + {0xbcb2b812db11a5de, 0x7415d448f6b6f0e8}, + {0xebdf661791d60f56, 0x111b495b3464ad22}, + {0x936b9fcebb25c995, 0xcab10dd900beec35}, + {0xb84687c269ef3bfb, 0x3d5d514f40eea743}, + {0xe65829b3046b0afa, 0x0cb4a5a3112a5113}, + {0x8ff71a0fe2c2e6dc, 0x47f0e785eaba72ac}, + {0xb3f4e093db73a093, 0x59ed216765690f57}, + {0xe0f218b8d25088b8, 0x306869c13ec3532d}, + {0x8c974f7383725573, 0x1e414218c73a13fc}, + {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, + {0xdbac6c247d62a583, 0xdf45f746b74abf3a}, + {0x894bc396ce5da772, 0x6b8bba8c328eb784}, + {0xab9eb47c81f5114f, 0x066ea92f3f326565}, + {0xd686619ba27255a2, 0xc80a537b0efefebe}, + {0x8613fd0145877585, 0xbd06742ce95f5f37}, + {0xa798fc4196e952e7, 0x2c48113823b73705}, + {0xd17f3b51fca3a7a0, 0xf75a15862ca504c6}, + {0x82ef85133de648c4, 0x9a984d73dbe722fc}, + {0xa3ab66580d5fdaf5, 0xc13e60d0d2e0ebbb}, + {0xcc963fee10b7d1b3, 0x318df905079926a9}, + {0xffbbcfe994e5c61f, 0xfdf17746497f7053}, + {0x9fd561f1fd0f9bd3, 0xfeb6ea8bedefa634}, + {0xc7caba6e7c5382c8, 0xfe64a52ee96b8fc1}, + {0xf9bd690a1b68637b, 0x3dfdce7aa3c673b1}, + {0x9c1661a651213e2d, 0x06bea10ca65c084f}, + {0xc31bfa0fe5698db8, 0x486e494fcff30a63}, + {0xf3e2f893dec3f126, 0x5a89dba3c3efccfb}, + {0x986ddb5c6b3a76b7, 0xf89629465a75e01d}, + {0xbe89523386091465, 0xf6bbb397f1135824}, + {0xee2ba6c0678b597f, 0x746aa07ded582e2d}, + {0x94db483840b717ef, 0xa8c2a44eb4571cdd}, + {0xba121a4650e4ddeb, 0x92f34d62616ce414}, + {0xe896a0d7e51e1566, 0x77b020baf9c81d18}, + {0x915e2486ef32cd60, 0x0ace1474dc1d122f}, + {0xb5b5ada8aaff80b8, 0x0d819992132456bb}, + {0xe3231912d5bf60e6, 0x10e1fff697ed6c6a}, + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, + {0xb1736b96b6fd83b3, 0xbd308ff8a6b17cb3}, + {0xddd0467c64bce4a0, 0xac7cb3f6d05ddbdf}, + {0x8aa22c0dbef60ee4, 0x6bcdf07a423aa96c}, + {0xad4ab7112eb3929d, 0x86c16c98d2c953c7}, + {0xd89d64d57a607744, 0xe871c7bf077ba8b8}, + {0x87625f056c7c4a8b, 0x11471cd764ad4973}, + {0xa93af6c6c79b5d2d, 0xd598e40d3dd89bd0}, + {0xd389b47879823479, 0x4aff1d108d4ec2c4}, + {0x843610cb4bf160cb, 0xcedf722a585139bb}, + {0xa54394fe1eedb8fe, 0xc2974eb4ee658829}, + {0xce947a3da6a9273e, 0x733d226229feea33}, + {0x811ccc668829b887, 0x0806357d5a3f5260}, + {0xa163ff802a3426a8, 0xca07c2dcb0cf26f8}, + {0xc9bcff6034c13052, 0xfc89b393dd02f0b6}, + {0xfc2c3f3841f17c67, 0xbbac2078d443ace3}, + {0x9d9ba7832936edc0, 0xd54b944b84aa4c0e}, + {0xc5029163f384a931, 0x0a9e795e65d4df12}, + {0xf64335bcf065d37d, 0x4d4617b5ff4a16d6}, + {0x99ea0196163fa42e, 0x504bced1bf8e4e46}, + {0xc06481fb9bcf8d39, 0xe45ec2862f71e1d7}, + {0xf07da27a82c37088, 0x5d767327bb4e5a4d}, + {0x964e858c91ba2655, 0x3a6a07f8d510f870}, + {0xbbe226efb628afea, 0x890489f70a55368c}, + {0xeadab0aba3b2dbe5, 0x2b45ac74ccea842f}, + {0x92c8ae6b464fc96f, 0x3b0b8bc90012929e}, + {0xb77ada0617e3bbcb, 0x09ce6ebb40173745}, + {0xe55990879ddcaabd, 0xcc420a6a101d0516}, + {0x8f57fa54c2a9eab6, 0x9fa946824a12232e}, + {0xb32df8e9f3546564, 0x47939822dc96abfa}, + {0xdff9772470297ebd, 0x59787e2b93bc56f8}, + {0x8bfbea76c619ef36, 0x57eb4edb3c55b65b}, + {0xaefae51477a06b03, 0xede622920b6b23f2}, + {0xdab99e59958885c4, 0xe95fab368e45ecee}, + {0x88b402f7fd75539b, 0x11dbcb0218ebb415}, + {0xaae103b5fcd2a881, 0xd652bdc29f26a11a}, + {0xd59944a37c0752a2, 0x4be76d3346f04960}, + {0x857fcae62d8493a5, 0x6f70a4400c562ddc}, + {0xa6dfbd9fb8e5b88e, 0xcb4ccd500f6bb953}, + {0xd097ad07a71f26b2, 0x7e2000a41346a7a8}, + {0x825ecc24c873782f, 0x8ed400668c0c28c9}, + {0xa2f67f2dfa90563b, 0x728900802f0f32fb}, + {0xcbb41ef979346bca, 0x4f2b40a03ad2ffba}, + {0xfea126b7d78186bc, 0xe2f610c84987bfa9}, + {0x9f24b832e6b0f436, 0x0dd9ca7d2df4d7ca}, + {0xc6ede63fa05d3143, 0x91503d1c79720dbc}, + {0xf8a95fcf88747d94, 0x75a44c6397ce912b}, + {0x9b69dbe1b548ce7c, 0xc986afbe3ee11abb}, + {0xc24452da229b021b, 0xfbe85badce996169}, + {0xf2d56790ab41c2a2, 0xfae27299423fb9c4}, + {0x97c560ba6b0919a5, 0xdccd879fc967d41b}, + {0xbdb6b8e905cb600f, 0x5400e987bbc1c921}, + {0xed246723473e3813, 0x290123e9aab23b69}, + {0x9436c0760c86e30b, 0xf9a0b6720aaf6522}, + {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, + {0xe7958cb87392c2c2, 0xb60b1d1230b20e05}, + {0x90bd77f3483bb9b9, 0xb1c6f22b5e6f48c3}, + {0xb4ecd5f01a4aa828, 0x1e38aeb6360b1af4}, + {0xe2280b6c20dd5232, 0x25c6da63c38de1b1}, + {0x8d590723948a535f, 0x579c487e5a38ad0f}, + {0xb0af48ec79ace837, 0x2d835a9df0c6d852}, + {0xdcdb1b2798182244, 0xf8e431456cf88e66}, + {0x8a08f0f8bf0f156b, 0x1b8e9ecb641b5900}, + {0xac8b2d36eed2dac5, 0xe272467e3d222f40}, + {0xd7adf884aa879177, 0x5b0ed81dcc6abb10}, + {0x86ccbb52ea94baea, 0x98e947129fc2b4ea}, + {0xa87fea27a539e9a5, 0x3f2398d747b36225}, + {0xd29fe4b18e88640e, 0x8eec7f0d19a03aae}, + {0x83a3eeeef9153e89, 0x1953cf68300424ad}, + {0xa48ceaaab75a8e2b, 0x5fa8c3423c052dd8}, + {0xcdb02555653131b6, 0x3792f412cb06794e}, + {0x808e17555f3ebf11, 0xe2bbd88bbee40bd1}, + {0xa0b19d2ab70e6ed6, 0x5b6aceaeae9d0ec5}, + {0xc8de047564d20a8b, 0xf245825a5a445276}, + {0xfb158592be068d2e, 0xeed6e2f0f0d56713}, + {0x9ced737bb6c4183d, 0x55464dd69685606c}, + {0xc428d05aa4751e4c, 0xaa97e14c3c26b887}, + {0xf53304714d9265df, 0xd53dd99f4b3066a9}, + {0x993fe2c6d07b7fab, 0xe546a8038efe402a}, + {0xbf8fdb78849a5f96, 0xde98520472bdd034}, + {0xef73d256a5c0f77c, 0x963e66858f6d4441}, + {0x95a8637627989aad, 0xdde7001379a44aa9}, + {0xbb127c53b17ec159, 0x5560c018580d5d53}, + {0xe9d71b689dde71af, 0xaab8f01e6e10b4a7}, + {0x9226712162ab070d, 0xcab3961304ca70e9}, + {0xb6b00d69bb55c8d1, 0x3d607b97c5fd0d23}, + {0xe45c10c42a2b3b05, 0x8cb89a7db77c506b}, + {0x8eb98a7a9a5b04e3, 0x77f3608e92adb243}, + {0xb267ed1940f1c61c, 0x55f038b237591ed4}, + {0xdf01e85f912e37a3, 0x6b6c46dec52f6689}, + {0x8b61313bbabce2c6, 0x2323ac4b3b3da016}, + {0xae397d8aa96c1b77, 0xabec975e0a0d081b}, + {0xd9c7dced53c72255, 0x96e7bd358c904a22}, + {0x881cea14545c7575, 0x7e50d64177da2e55}, + {0xaa242499697392d2, 0xdde50bd1d5d0b9ea}, + {0xd4ad2dbfc3d07787, 0x955e4ec64b44e865}, + {0x84ec3c97da624ab4, 0xbd5af13bef0b113f}, + {0xa6274bbdd0fadd61, 0xecb1ad8aeacdd58f}, + {0xcfb11ead453994ba, 0x67de18eda5814af3}, + {0x81ceb32c4b43fcf4, 0x80eacf948770ced8}, + {0xa2425ff75e14fc31, 0xa1258379a94d028e}, + {0xcad2f7f5359a3b3e, 0x096ee45813a04331}, + {0xfd87b5f28300ca0d, 0x8bca9d6e188853fd}, + {0x9e74d1b791e07e48, 0x775ea264cf55347e}, + {0xc612062576589dda, 0x95364afe032a819e}, + {0xf79687aed3eec551, 0x3a83ddbd83f52205}, + {0x9abe14cd44753b52, 0xc4926a9672793543}, + {0xc16d9a0095928a27, 0x75b7053c0f178294}, + {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, + {0x971da05074da7bee, 0xd3f6fc16ebca5e04}, + {0xbce5086492111aea, 0x88f4bb1ca6bcf585}, + {0xec1e4a7db69561a5, 0x2b31e9e3d06c32e6}, + {0x9392ee8e921d5d07, 0x3aff322e62439fd0}, + {0xb877aa3236a4b449, 0x09befeb9fad487c3}, + {0xe69594bec44de15b, 0x4c2ebe687989a9b4}, + {0x901d7cf73ab0acd9, 0x0f9d37014bf60a11}, + {0xb424dc35095cd80f, 0x538484c19ef38c95}, + {0xe12e13424bb40e13, 0x2865a5f206b06fba}, + {0x8cbccc096f5088cb, 0xf93f87b7442e45d4}, + {0xafebff0bcb24aafe, 0xf78f69a51539d749}, + {0xdbe6fecebdedd5be, 0xb573440e5a884d1c}, + {0x89705f4136b4a597, 0x31680a88f8953031}, + {0xabcc77118461cefc, 0xfdc20d2b36ba7c3e}, + {0xd6bf94d5e57a42bc, 0x3d32907604691b4d}, + {0x8637bd05af6c69b5, 0xa63f9a49c2c1b110}, + {0xa7c5ac471b478423, 0x0fcf80dc33721d54}, + {0xd1b71758e219652b, 0xd3c36113404ea4a9}, + {0x83126e978d4fdf3b, 0x645a1cac083126ea}, + {0xa3d70a3d70a3d70a, 0x3d70a3d70a3d70a4}, + {0xcccccccccccccccc, 0xcccccccccccccccd}, + {0x8000000000000000, 0x0000000000000000}, + {0xa000000000000000, 0x0000000000000000}, + {0xc800000000000000, 0x0000000000000000}, + {0xfa00000000000000, 0x0000000000000000}, + {0x9c40000000000000, 0x0000000000000000}, + {0xc350000000000000, 0x0000000000000000}, + {0xf424000000000000, 0x0000000000000000}, + {0x9896800000000000, 0x0000000000000000}, + {0xbebc200000000000, 0x0000000000000000}, + {0xee6b280000000000, 0x0000000000000000}, + {0x9502f90000000000, 0x0000000000000000}, + {0xba43b74000000000, 0x0000000000000000}, + {0xe8d4a51000000000, 0x0000000000000000}, + {0x9184e72a00000000, 0x0000000000000000}, + {0xb5e620f480000000, 0x0000000000000000}, + {0xe35fa931a0000000, 0x0000000000000000}, + {0x8e1bc9bf04000000, 0x0000000000000000}, + {0xb1a2bc2ec5000000, 0x0000000000000000}, + {0xde0b6b3a76400000, 0x0000000000000000}, + {0x8ac7230489e80000, 0x0000000000000000}, + {0xad78ebc5ac620000, 0x0000000000000000}, + {0xd8d726b7177a8000, 0x0000000000000000}, + {0x878678326eac9000, 0x0000000000000000}, + {0xa968163f0a57b400, 0x0000000000000000}, + {0xd3c21bcecceda100, 0x0000000000000000}, + {0x84595161401484a0, 0x0000000000000000}, + {0xa56fa5b99019a5c8, 0x0000000000000000}, + {0xcecb8f27f4200f3a, 0x0000000000000000}, + {0x813f3978f8940984, 0x4000000000000000}, + {0xa18f07d736b90be5, 0x5000000000000000}, + {0xc9f2c9cd04674ede, 0xa400000000000000}, + {0xfc6f7c4045812296, 0x4d00000000000000}, + {0x9dc5ada82b70b59d, 0xf020000000000000}, + {0xc5371912364ce305, 0x6c28000000000000}, + {0xf684df56c3e01bc6, 0xc732000000000000}, + {0x9a130b963a6c115c, 0x3c7f400000000000}, + {0xc097ce7bc90715b3, 0x4b9f100000000000}, + {0xf0bdc21abb48db20, 0x1e86d40000000000}, + {0x96769950b50d88f4, 0x1314448000000000}, + {0xbc143fa4e250eb31, 0x17d955a000000000}, + {0xeb194f8e1ae525fd, 0x5dcfab0800000000}, + {0x92efd1b8d0cf37be, 0x5aa1cae500000000}, + {0xb7abc627050305ad, 0xf14a3d9e40000000}, + {0xe596b7b0c643c719, 0x6d9ccd05d0000000}, + {0x8f7e32ce7bea5c6f, 0xe4820023a2000000}, + {0xb35dbf821ae4f38b, 0xdda2802c8a800000}, + {0xe0352f62a19e306e, 0xd50b2037ad200000}, + {0x8c213d9da502de45, 0x4526f422cc340000}, + {0xaf298d050e4395d6, 0x9670b12b7f410000}, + {0xdaf3f04651d47b4c, 0x3c0cdd765f114000}, + {0x88d8762bf324cd0f, 0xa5880a69fb6ac800}, + {0xab0e93b6efee0053, 0x8eea0d047a457a00}, + {0xd5d238a4abe98068, 0x72a4904598d6d880}, + {0x85a36366eb71f041, 0x47a6da2b7f864750}, + {0xa70c3c40a64e6c51, 0x999090b65f67d924}, + {0xd0cf4b50cfe20765, 0xfff4b4e3f741cf6d}, + {0x82818f1281ed449f, 0xbff8f10e7a8921a5}, + {0xa321f2d7226895c7, 0xaff72d52192b6a0e}, + {0xcbea6f8ceb02bb39, 0x9bf4f8a69f764491}, + {0xfee50b7025c36a08, 0x02f236d04753d5b5}, + {0x9f4f2726179a2245, 0x01d762422c946591}, + {0xc722f0ef9d80aad6, 0x424d3ad2b7b97ef6}, + {0xf8ebad2b84e0d58b, 0xd2e0898765a7deb3}, + {0x9b934c3b330c8577, 0x63cc55f49f88eb30}, + {0xc2781f49ffcfa6d5, 0x3cbf6b71c76b25fc}, + {0xf316271c7fc3908a, 0x8bef464e3945ef7b}, + {0x97edd871cfda3a56, 0x97758bf0e3cbb5ad}, + {0xbde94e8e43d0c8ec, 0x3d52eeed1cbea318}, + {0xed63a231d4c4fb27, 0x4ca7aaa863ee4bde}, + {0x945e455f24fb1cf8, 0x8fe8caa93e74ef6b}, + {0xb975d6b6ee39e436, 0xb3e2fd538e122b45}, + {0xe7d34c64a9c85d44, 0x60dbbca87196b617}, + {0x90e40fbeea1d3a4a, 0xbc8955e946fe31ce}, + {0xb51d13aea4a488dd, 0x6babab6398bdbe42}, + {0xe264589a4dcdab14, 0xc696963c7eed2dd2}, + {0x8d7eb76070a08aec, 0xfc1e1de5cf543ca3}, + {0xb0de65388cc8ada8, 0x3b25a55f43294bcc}, + {0xdd15fe86affad912, 0x49ef0eb713f39ebf}, + {0x8a2dbf142dfcc7ab, 0x6e3569326c784338}, + {0xacb92ed9397bf996, 0x49c2c37f07965405}, + {0xd7e77a8f87daf7fb, 0xdc33745ec97be907}, + {0x86f0ac99b4e8dafd, 0x69a028bb3ded71a4}, + {0xa8acd7c0222311bc, 0xc40832ea0d68ce0d}, + {0xd2d80db02aabd62b, 0xf50a3fa490c30191}, + {0x83c7088e1aab65db, 0x792667c6da79e0fb}, + {0xa4b8cab1a1563f52, 0x577001b891185939}, + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f87}, + {0x80b05e5ac60b6178, 0x544f8158315b05b5}, + {0xa0dc75f1778e39d6, 0x696361ae3db1c722}, + {0xc913936dd571c84c, 0x03bc3a19cd1e38ea}, + {0xfb5878494ace3a5f, 0x04ab48a04065c724}, + {0x9d174b2dcec0e47b, 0x62eb0d64283f9c77}, + {0xc45d1df942711d9a, 0x3ba5d0bd324f8395}, + {0xf5746577930d6500, 0xca8f44ec7ee3647a}, + {0x9968bf6abbe85f20, 0x7e998b13cf4e1ecc}, + {0xbfc2ef456ae276e8, 0x9e3fedd8c321a67f}, + {0xefb3ab16c59b14a2, 0xc5cfe94ef3ea101f}, + {0x95d04aee3b80ece5, 0xbba1f1d158724a13}, + {0xbb445da9ca61281f, 0x2a8a6e45ae8edc98}, + {0xea1575143cf97226, 0xf52d09d71a3293be}, + {0x924d692ca61be758, 0x593c2626705f9c57}, + {0xb6e0c377cfa2e12e, 0x6f8b2fb00c77836d}, + {0xe498f455c38b997a, 0x0b6dfb9c0f956448}, + {0x8edf98b59a373fec, 0x4724bd4189bd5ead}, + {0xb2977ee300c50fe7, 0x58edec91ec2cb658}, + {0xdf3d5e9bc0f653e1, 0x2f2967b66737e3ee}, + {0x8b865b215899f46c, 0xbd79e0d20082ee75}, + {0xae67f1e9aec07187, 0xecd8590680a3aa12}, + {0xda01ee641a708de9, 0xe80e6f4820cc9496}, + {0x884134fe908658b2, 0x3109058d147fdcde}, + {0xaa51823e34a7eede, 0xbd4b46f0599fd416}, + {0xd4e5e2cdc1d1ea96, 0x6c9e18ac7007c91b}, + {0x850fadc09923329e, 0x03e2cf6bc604ddb1}, + {0xa6539930bf6bff45, 0x84db8346b786151d}, + {0xcfe87f7cef46ff16, 0xe612641865679a64}, + {0x81f14fae158c5f6e, 0x4fcb7e8f3f60c07f}, + {0xa26da3999aef7749, 0xe3be5e330f38f09e}, + {0xcb090c8001ab551c, 0x5cadf5bfd3072cc6}, + {0xfdcb4fa002162a63, 0x73d9732fc7c8f7f7}, + {0x9e9f11c4014dda7e, 0x2867e7fddcdd9afb}, + {0xc646d63501a1511d, 0xb281e1fd541501b9}, + {0xf7d88bc24209a565, 0x1f225a7ca91a4227}, + {0x9ae757596946075f, 0x3375788de9b06959}, + {0xc1a12d2fc3978937, 0x0052d6b1641c83af}, + {0xf209787bb47d6b84, 0xc0678c5dbd23a49b}, + {0x9745eb4d50ce6332, 0xf840b7ba963646e1}, + {0xbd176620a501fbff, 0xb650e5a93bc3d899}, + {0xec5d3fa8ce427aff, 0xa3e51f138ab4cebf}, + {0x93ba47c980e98cdf, 0xc66f336c36b10138}, + {0xb8a8d9bbe123f017, 0xb80b0047445d4185}, + {0xe6d3102ad96cec1d, 0xa60dc059157491e6}, + {0x9043ea1ac7e41392, 0x87c89837ad68db30}, + {0xb454e4a179dd1877, 0x29babe4598c311fc}, + {0xe16a1dc9d8545e94, 0xf4296dd6fef3d67b}, + {0x8ce2529e2734bb1d, 0x1899e4a65f58660d}, + {0xb01ae745b101e9e4, 0x5ec05dcff72e7f90}, + {0xdc21a1171d42645d, 0x76707543f4fa1f74}, + {0x899504ae72497eba, 0x6a06494a791c53a9}, + {0xabfa45da0edbde69, 0x0487db9d17636893}, + {0xd6f8d7509292d603, 0x45a9d2845d3c42b7}, + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3}, + {0xa7f26836f282b732, 0x8e6cac7768d7141f}, + {0xd1ef0244af2364ff, 0x3207d795430cd927}, + {0x8335616aed761f1f, 0x7f44e6bd49e807b9}, + {0xa402b9c5a8d3a6e7, 0x5f16206c9c6209a7}, + {0xcd036837130890a1, 0x36dba887c37a8c10}, + {0x802221226be55a64, 0xc2494954da2c978a}, + {0xa02aa96b06deb0fd, 0xf2db9baa10b7bd6d}, + {0xc83553c5c8965d3d, 0x6f92829494e5acc8}, + {0xfa42a8b73abbf48c, 0xcb772339ba1f17fa}, + {0x9c69a97284b578d7, 0xff2a760414536efc}, + {0xc38413cf25e2d70d, 0xfef5138519684abb}, + {0xf46518c2ef5b8cd1, 0x7eb258665fc25d6a}, + {0x98bf2f79d5993802, 0xef2f773ffbd97a62}, + {0xbeeefb584aff8603, 0xaafb550ffacfd8fb}, + {0xeeaaba2e5dbf6784, 0x95ba2a53f983cf39}, + {0x952ab45cfa97a0b2, 0xdd945a747bf26184}, + {0xba756174393d88df, 0x94f971119aeef9e5}, + {0xe912b9d1478ceb17, 0x7a37cd5601aab85e}, + {0x91abb422ccb812ee, 0xac62e055c10ab33b}, + {0xb616a12b7fe617aa, 0x577b986b314d600a}, + {0xe39c49765fdf9d94, 0xed5a7e85fda0b80c}, + {0x8e41ade9fbebc27d, 0x14588f13be847308}, + {0xb1d219647ae6b31c, 0x596eb2d8ae258fc9}, + {0xde469fbd99a05fe3, 0x6fca5f8ed9aef3bc}, + {0x8aec23d680043bee, 0x25de7bb9480d5855}, + {0xada72ccc20054ae9, 0xaf561aa79a10ae6b}, + {0xd910f7ff28069da4, 0x1b2ba1518094da05}, + {0x87aa9aff79042286, 0x90fb44d2f05d0843}, + {0xa99541bf57452b28, 0x353a1607ac744a54}, + {0xd3fa922f2d1675f2, 0x42889b8997915ce9}, + {0x847c9b5d7c2e09b7, 0x69956135febada12}, + {0xa59bc234db398c25, 0x43fab9837e699096}, + {0xcf02b2c21207ef2e, 0x94f967e45e03f4bc}, + {0x8161afb94b44f57d, 0x1d1be0eebac278f6}, + {0xa1ba1ba79e1632dc, 0x6462d92a69731733}, + {0xca28a291859bbf93, 0x7d7b8f7503cfdcff}, + {0xfcb2cb35e702af78, 0x5cda735244c3d43f}, + {0x9defbf01b061adab, 0x3a0888136afa64a8}, + {0xc56baec21c7a1916, 0x088aaa1845b8fdd1}, + {0xf6c69a72a3989f5b, 0x8aad549e57273d46}, + {0x9a3c2087a63f6399, 0x36ac54e2f678864c}, + {0xc0cb28a98fcf3c7f, 0x84576a1bb416a7de}, + {0xf0fdf2d3f3c30b9f, 0x656d44a2a11c51d6}, + {0x969eb7c47859e743, 0x9f644ae5a4b1b326}, + {0xbc4665b596706114, 0x873d5d9f0dde1fef}, + {0xeb57ff22fc0c7959, 0xa90cb506d155a7eb}, + {0x9316ff75dd87cbd8, 0x09a7f12442d588f3}, + {0xb7dcbf5354e9bece, 0x0c11ed6d538aeb30}, + {0xe5d3ef282a242e81, 0x8f1668c8a86da5fb}, + {0x8fa475791a569d10, 0xf96e017d694487bd}, + {0xb38d92d760ec4455, 0x37c981dcc395a9ad}, + {0xe070f78d3927556a, 0x85bbe253f47b1418}, + {0x8c469ab843b89562, 0x93956d7478ccec8f}, + {0xaf58416654a6babb, 0x387ac8d1970027b3}, + {0xdb2e51bfe9d0696a, 0x06997b05fcc0319f}, + {0x88fcf317f22241e2, 0x441fece3bdf81f04}, + {0xab3c2fddeeaad25a, 0xd527e81cad7626c4}, + {0xd60b3bd56a5586f1, 0x8a71e223d8d3b075}, + {0x85c7056562757456, 0xf6872d5667844e4a}, + {0xa738c6bebb12d16c, 0xb428f8ac016561dc}, + {0xd106f86e69d785c7, 0xe13336d701beba53}, + {0x82a45b450226b39c, 0xecc0024661173474}, + {0xa34d721642b06084, 0x27f002d7f95d0191}, + {0xcc20ce9bd35c78a5, 0x31ec038df7b441f5}, + {0xff290242c83396ce, 0x7e67047175a15272}, + {0x9f79a169bd203e41, 0x0f0062c6e984d387}, + {0xc75809c42c684dd1, 0x52c07b78a3e60869}, + {0xf92e0c3537826145, 0xa7709a56ccdf8a83}, + {0x9bbcc7a142b17ccb, 0x88a66076400bb692}, + {0xc2abf989935ddbfe, 0x6acff893d00ea436}, + {0xf356f7ebf83552fe, 0x0583f6b8c4124d44}, + {0x98165af37b2153de, 0xc3727a337a8b704b}, + {0xbe1bf1b059e9a8d6, 0x744f18c0592e4c5d}, + {0xeda2ee1c7064130c, 0x1162def06f79df74}, + {0x9485d4d1c63e8be7, 0x8addcb5645ac2ba9}, + {0xb9a74a0637ce2ee1, 0x6d953e2bd7173693}, + {0xe8111c87c5c1ba99, 0xc8fa8db6ccdd0438}, + {0x910ab1d4db9914a0, 0x1d9c9892400a22a3}, + {0xb54d5e4a127f59c8, 0x2503beb6d00cab4c}, + {0xe2a0b5dc971f303a, 0x2e44ae64840fd61e}, + {0x8da471a9de737e24, 0x5ceaecfed289e5d3}, + {0xb10d8e1456105dad, 0x7425a83e872c5f48}, + {0xdd50f1996b947518, 0xd12f124e28f7771a}, + {0x8a5296ffe33cc92f, 0x82bd6b70d99aaa70}, + {0xace73cbfdc0bfb7b, 0x636cc64d1001550c}, + {0xd8210befd30efa5a, 0x3c47f7e05401aa4f}, + {0x8714a775e3e95c78, 0x65acfaec34810a72}, + {0xa8d9d1535ce3b396, 0x7f1839a741a14d0e}, + {0xd31045a8341ca07c, 0x1ede48111209a051}, + {0x83ea2b892091e44d, 0x934aed0aab460433}, + {0xa4e4b66b68b65d60, 0xf81da84d56178540}, + {0xce1de40642e3f4b9, 0x36251260ab9d668f}, + {0x80d2ae83e9ce78f3, 0xc1d72b7c6b42601a}, + {0xa1075a24e4421730, 0xb24cf65b8612f820}, + {0xc94930ae1d529cfc, 0xdee033f26797b628}, + {0xfb9b7cd9a4a7443c, 0x169840ef017da3b2}, + {0x9d412e0806e88aa5, 0x8e1f289560ee864f}, + {0xc491798a08a2ad4e, 0xf1a6f2bab92a27e3}, + {0xf5b5d7ec8acb58a2, 0xae10af696774b1dc}, + {0x9991a6f3d6bf1765, 0xacca6da1e0a8ef2a}, + {0xbff610b0cc6edd3f, 0x17fd090a58d32af4}, + {0xeff394dcff8a948e, 0xddfc4b4cef07f5b1}, + {0x95f83d0a1fb69cd9, 0x4abdaf101564f98f}, + {0xbb764c4ca7a4440f, 0x9d6d1ad41abe37f2}, + {0xea53df5fd18d5513, 0x84c86189216dc5ee}, + {0x92746b9be2f8552c, 0x32fd3cf5b4e49bb5}, + {0xb7118682dbb66a77, 0x3fbc8c33221dc2a2}, + {0xe4d5e82392a40515, 0x0fabaf3feaa5334b}, + {0x8f05b1163ba6832d, 0x29cb4d87f2a7400f}, + {0xb2c71d5bca9023f8, 0x743e20e9ef511013}, + {0xdf78e4b2bd342cf6, 0x914da9246b255417}, + {0x8bab8eefb6409c1a, 0x1ad089b6c2f7548f}, + {0xae9672aba3d0c320, 0xa184ac2473b529b2}, + {0xda3c0f568cc4f3e8, 0xc9e5d72d90a2741f}, + {0x8865899617fb1871, 0x7e2fa67c7a658893}, + {0xaa7eebfb9df9de8d, 0xddbb901b98feeab8}, + {0xd51ea6fa85785631, 0x552a74227f3ea566}, + {0x8533285c936b35de, 0xd53a88958f872760}, + {0xa67ff273b8460356, 0x8a892abaf368f138}, + {0xd01fef10a657842c, 0x2d2b7569b0432d86}, + {0x8213f56a67f6b29b, 0x9c3b29620e29fc74}, + {0xa298f2c501f45f42, 0x8349f3ba91b47b90}, + {0xcb3f2f7642717713, 0x241c70a936219a74}, + {0xfe0efb53d30dd4d7, 0xed238cd383aa0111}, + {0x9ec95d1463e8a506, 0xf4363804324a40ab}, + {0xc67bb4597ce2ce48, 0xb143c6053edcd0d6}, + {0xf81aa16fdc1b81da, 0xdd94b7868e94050b}, + {0x9b10a4e5e9913128, 0xca7cf2b4191c8327}, + {0xc1d4ce1f63f57d72, 0xfd1c2f611f63a3f1}, + {0xf24a01a73cf2dccf, 0xbc633b39673c8ced}, + {0x976e41088617ca01, 0xd5be0503e085d814}, + {0xbd49d14aa79dbc82, 0x4b2d8644d8a74e19}, + {0xec9c459d51852ba2, 0xddf8e7d60ed1219f}, + {0x93e1ab8252f33b45, 0xcabb90e5c942b504}, + {0xb8da1662e7b00a17, 0x3d6a751f3b936244}, + {0xe7109bfba19c0c9d, 0x0cc512670a783ad5}, + {0x906a617d450187e2, 0x27fb2b80668b24c6}, + {0xb484f9dc9641e9da, 0xb1f9f660802dedf7}, + {0xe1a63853bbd26451, 0x5e7873f8a0396974}, + {0x8d07e33455637eb2, 0xdb0b487b6423e1e9}, + {0xb049dc016abc5e5f, 0x91ce1a9a3d2cda63}, + {0xdc5c5301c56b75f7, 0x7641a140cc7810fc}, + {0x89b9b3e11b6329ba, 0xa9e904c87fcb0a9e}, + {0xac2820d9623bf429, 0x546345fa9fbdcd45}, + {0xd732290fbacaf133, 0xa97c177947ad4096}, + {0x867f59a9d4bed6c0, 0x49ed8eabcccc485e}, + {0xa81f301449ee8c70, 0x5c68f256bfff5a75}, + {0xd226fc195c6a2f8c, 0x73832eec6fff3112}, + {0x83585d8fd9c25db7, 0xc831fd53c5ff7eac}, + {0xa42e74f3d032f525, 0xba3e7ca8b77f5e56}, + {0xcd3a1230c43fb26f, 0x28ce1bd2e55f35ec}, + {0x80444b5e7aa7cf85, 0x7980d163cf5b81b4}, + {0xa0555e361951c366, 0xd7e105bcc3326220}, + {0xc86ab5c39fa63440, 0x8dd9472bf3fefaa8}, + {0xfa856334878fc150, 0xb14f98f6f0feb952}, + {0x9c935e00d4b9d8d2, 0x6ed1bf9a569f33d4}, + {0xc3b8358109e84f07, 0x0a862f80ec4700c9}, + {0xf4a642e14c6262c8, 0xcd27bb612758c0fb}, + {0x98e7e9cccfbd7dbd, 0x8038d51cb897789d}, + {0xbf21e44003acdd2c, 0xe0470a63e6bd56c4}, + {0xeeea5d5004981478, 0x1858ccfce06cac75}, + {0x95527a5202df0ccb, 0x0f37801e0c43ebc9}, + {0xbaa718e68396cffd, 0xd30560258f54e6bb}, + {0xe950df20247c83fd, 0x47c6b82ef32a206a}, + {0x91d28b7416cdd27e, 0x4cdc331d57fa5442}, + {0xb6472e511c81471d, 0xe0133fe4adf8e953}, + {0xe3d8f9e563a198e5, 0x58180fddd97723a7}, + {0x8e679c2f5e44ff8f, 0x570f09eaa7ea7649}, + {0xb201833b35d63f73, 0x2cd2cc6551e513db}, + {0xde81e40a034bcf4f, 0xf8077f7ea65e58d2}, + {0x8b112e86420f6191, 0xfb04afaf27faf783}, + {0xadd57a27d29339f6, 0x79c5db9af1f9b564}, + {0xd94ad8b1c7380874, 0x18375281ae7822bd}, + {0x87cec76f1c830548, 0x8f2293910d0b15b6}, + {0xa9c2794ae3a3c69a, 0xb2eb3875504ddb23}, + {0xd433179d9c8cb841, 0x5fa60692a46151ec}, + {0x849feec281d7f328, 0xdbc7c41ba6bcd334}, + {0xa5c7ea73224deff3, 0x12b9b522906c0801}, + {0xcf39e50feae16bef, 0xd768226b34870a01}, + {0x81842f29f2cce375, 0xe6a1158300d46641}, + {0xa1e53af46f801c53, 0x60495ae3c1097fd1}, + {0xca5e89b18b602368, 0x385bb19cb14bdfc5}, + {0xfcf62c1dee382c42, 0x46729e03dd9ed7b6}, + {0x9e19db92b4e31ba9, 0x6c07a2c26a8346d2}, + {0xc5a05277621be293, 0xc7098b7305241886}, + { 0xf70867153aa2db38, + 0xb8cbee4fc66d1ea8 } +#else + {0xff77b1fcbebcdc4f, 0x25e8e89c13bb0f7b}, + {0xce5d73ff402d98e3, 0xfb0a3d212dc81290}, + {0xa6b34ad8c9dfc06f, 0xf42faa48c0ea481f}, + {0x86a8d39ef77164bc, 0xae5dff9c02033198}, + {0xd98ddaee19068c76, 0x3badd624dd9b0958}, + {0xafbd2350644eeacf, 0xe5d1929ef90898fb}, + {0x8df5efabc5979c8f, 0xca8d3ffa1ef463c2}, + {0xe55990879ddcaabd, 0xcc420a6a101d0516}, + {0xb94470938fa89bce, 0xf808e40e8d5b3e6a}, + {0x95a8637627989aad, 0xdde7001379a44aa9}, + {0xf1c90080baf72cb1, 0x5324c68b12dd6339}, + {0xc350000000000000, 0x0000000000000000}, + {0x9dc5ada82b70b59d, 0xf020000000000000}, + {0xfee50b7025c36a08, 0x02f236d04753d5b5}, + {0xcde6fd5e09abcf26, 0xed4c0226b55e6f87}, + {0xa6539930bf6bff45, 0x84db8346b786151d}, + {0x865b86925b9bc5c2, 0x0b8a2392ba45a9b3}, + {0xd910f7ff28069da4, 0x1b2ba1518094da05}, + {0xaf58416654a6babb, 0x387ac8d1970027b3}, + {0x8da471a9de737e24, 0x5ceaecfed289e5d3}, + {0xe4d5e82392a40515, 0x0fabaf3feaa5334b}, + {0xb8da1662e7b00a17, 0x3d6a751f3b936244}, + { 0x95527a5202df0ccb, + 0x0f37801e0c43ebc9 } +#endif + }; + +#if FMT_USE_FULL_CACHE_DRAGONBOX + return pow10_significands[k - float_info::min_k]; +#else + static constexpr const uint64_t powers_of_5_64[] = { + 0x0000000000000001, 0x0000000000000005, 0x0000000000000019, + 0x000000000000007d, 0x0000000000000271, 0x0000000000000c35, + 0x0000000000003d09, 0x000000000001312d, 0x000000000005f5e1, + 0x00000000001dcd65, 0x00000000009502f9, 0x0000000002e90edd, + 0x000000000e8d4a51, 0x0000000048c27395, 0x000000016bcc41e9, + 0x000000071afd498d, 0x0000002386f26fc1, 0x000000b1a2bc2ec5, + 0x000003782dace9d9, 0x00001158e460913d, 0x000056bc75e2d631, + 0x0001b1ae4d6e2ef5, 0x000878678326eac9, 0x002a5a058fc295ed, + 0x00d3c21bcecceda1, 0x0422ca8b0a00a425, 0x14adf4b7320334b9}; + + static const int compression_ratio = 27; + + // Compute base index. + int cache_index = (k - float_info::min_k) / compression_ratio; + int kb = cache_index * compression_ratio + float_info::min_k; + int offset = k - kb; + + // Get base cache. + uint128_fallback base_cache = pow10_significands[cache_index]; + if (offset == 0) return base_cache; + + // Compute the required amount of bit-shift. + int alpha = floor_log2_pow10(kb + offset) - floor_log2_pow10(kb) - offset; + FMT_ASSERT(alpha > 0 && alpha < 64, "shifting error detected"); + + // Try to recover the real cache. + uint64_t pow5 = powers_of_5_64[offset]; + uint128_fallback recovered_cache = umul128(base_cache.high(), pow5); + uint128_fallback middle_low = umul128(base_cache.low(), pow5); + + recovered_cache += middle_low.high(); + + uint64_t high_to_middle = recovered_cache.high() << (64 - alpha); + uint64_t middle_to_low = recovered_cache.low() << (64 - alpha); + + recovered_cache = + uint128_fallback{(recovered_cache.low() >> alpha) | high_to_middle, + ((middle_low.low() >> alpha) | middle_to_low)}; + FMT_ASSERT(recovered_cache.low() + 1 != 0, ""); + return {recovered_cache.high(), recovered_cache.low() + 1}; +#endif + } + + struct compute_mul_result { + carrier_uint result; + bool is_integer; + }; + struct compute_mul_parity_result { + bool parity; + bool is_integer; + }; + + static compute_mul_result compute_mul( + carrier_uint u, const cache_entry_type& cache) noexcept { + auto r = umul192_upper128(u, cache); + return {r.high(), r.low() == 0}; + } + + static uint32_t compute_delta(cache_entry_type const& cache, + int beta) noexcept { + return static_cast(cache.high() >> (64 - 1 - beta)); + } + + static compute_mul_parity_result compute_mul_parity( + carrier_uint two_f, const cache_entry_type& cache, int beta) noexcept { + FMT_ASSERT(beta >= 1, ""); + FMT_ASSERT(beta < 64, ""); + + auto r = umul192_lower128(two_f, cache); + return {((r.high() >> (64 - beta)) & 1) != 0, + ((r.high() << beta) | (r.low() >> (64 - beta))) == 0}; + } + + static carrier_uint compute_left_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta) noexcept { + return (cache.high() - + (cache.high() >> (num_significand_bits() + 2))) >> + (64 - num_significand_bits() - 1 - beta); + } + + static carrier_uint compute_right_endpoint_for_shorter_interval_case( + const cache_entry_type& cache, int beta) noexcept { + return (cache.high() + + (cache.high() >> (num_significand_bits() + 1))) >> + (64 - num_significand_bits() - 1 - beta); + } + + static carrier_uint compute_round_up_for_shorter_interval_case( + const cache_entry_type& cache, int beta) noexcept { + return ((cache.high() >> (64 - num_significand_bits() - 2 - beta)) + + 1) / + 2; + } +}; + +// Various integer checks +template +bool is_left_endpoint_integer_shorter_interval(int exponent) noexcept { + const int case_shorter_interval_left_endpoint_lower_threshold = 2; + const int case_shorter_interval_left_endpoint_upper_threshold = 3; + return exponent >= case_shorter_interval_left_endpoint_lower_threshold && + exponent <= case_shorter_interval_left_endpoint_upper_threshold; +} + +// Remove trailing zeros from n and return the number of zeros removed (float) +FMT_INLINE int remove_trailing_zeros(uint32_t& n) noexcept { + FMT_ASSERT(n != 0, ""); + const uint32_t mod_inv_5 = 0xcccccccd; + const uint32_t mod_inv_25 = mod_inv_5 * mod_inv_5; + + int s = 0; + while (true) { + auto q = rotr(n * mod_inv_25, 2); + if (q > max_value() / 100) break; + n = q; + s += 2; + } + auto q = rotr(n * mod_inv_5, 1); + if (q <= max_value() / 10) { + n = q; + s |= 1; + } + + return s; +} + +// Removes trailing zeros and returns the number of zeros removed (double) +FMT_INLINE int remove_trailing_zeros(uint64_t& n) noexcept { + FMT_ASSERT(n != 0, ""); + + // This magic number is ceil(2^90 / 10^8). + constexpr uint64_t magic_number = 12379400392853802749ull; + auto nm = umul128(n, magic_number); + + // Is n is divisible by 10^8? + if ((nm.high() & ((1ull << (90 - 64)) - 1)) == 0 && nm.low() < magic_number) { + // If yes, work with the quotient. + auto n32 = static_cast(nm.high() >> (90 - 64)); + + const uint32_t mod_inv_5 = 0xcccccccd; + const uint32_t mod_inv_25 = mod_inv_5 * mod_inv_5; + + int s = 8; + while (true) { + auto q = rotr(n32 * mod_inv_25, 2); + if (q > max_value() / 100) break; + n32 = q; + s += 2; + } + auto q = rotr(n32 * mod_inv_5, 1); + if (q <= max_value() / 10) { + n32 = q; + s |= 1; + } + + n = n32; + return s; + } + + // If n is not divisible by 10^8, work with n itself. + const uint64_t mod_inv_5 = 0xcccccccccccccccd; + const uint64_t mod_inv_25 = mod_inv_5 * mod_inv_5; + + int s = 0; + while (true) { + auto q = rotr(n * mod_inv_25, 2); + if (q > max_value() / 100) break; + n = q; + s += 2; + } + auto q = rotr(n * mod_inv_5, 1); + if (q <= max_value() / 10) { + n = q; + s |= 1; + } + + return s; +} + +// The main algorithm for shorter interval case +template +FMT_INLINE decimal_fp shorter_interval_case(int exponent) noexcept { + decimal_fp ret_value; + // Compute k and beta + const int minus_k = floor_log10_pow2_minus_log10_4_over_3(exponent); + const int beta = exponent + floor_log2_pow10(-minus_k); + + // Compute xi and zi + using cache_entry_type = typename cache_accessor::cache_entry_type; + const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); + + auto xi = cache_accessor::compute_left_endpoint_for_shorter_interval_case( + cache, beta); + auto zi = cache_accessor::compute_right_endpoint_for_shorter_interval_case( + cache, beta); + + // If the left endpoint is not an integer, increase it + if (!is_left_endpoint_integer_shorter_interval(exponent)) ++xi; + + // Try bigger divisor + ret_value.significand = zi / 10; + + // If succeed, remove trailing zeros if necessary and return + if (ret_value.significand * 10 >= xi) { + ret_value.exponent = minus_k + 1; + ret_value.exponent += remove_trailing_zeros(ret_value.significand); + return ret_value; + } + + // Otherwise, compute the round-up of y + ret_value.significand = + cache_accessor::compute_round_up_for_shorter_interval_case(cache, + beta); + ret_value.exponent = minus_k; + + // When tie occurs, choose one of them according to the rule + if (exponent >= float_info::shorter_interval_tie_lower_threshold && + exponent <= float_info::shorter_interval_tie_upper_threshold) { + ret_value.significand = ret_value.significand % 2 == 0 + ? ret_value.significand + : ret_value.significand - 1; + } else if (ret_value.significand < xi) { + ++ret_value.significand; + } + return ret_value; +} + +template decimal_fp to_decimal(T x) noexcept { + // Step 1: integer promotion & Schubfach multiplier calculation. + + using carrier_uint = typename float_info::carrier_uint; + using cache_entry_type = typename cache_accessor::cache_entry_type; + auto br = bit_cast(x); + + // Extract significand bits and exponent bits. + const carrier_uint significand_mask = + (static_cast(1) << num_significand_bits()) - 1; + carrier_uint significand = (br & significand_mask); + int exponent = + static_cast((br & exponent_mask()) >> num_significand_bits()); + + if (exponent != 0) { // Check if normal. + exponent -= exponent_bias() + num_significand_bits(); + + // Shorter interval case; proceed like Schubfach. + // In fact, when exponent == 1 and significand == 0, the interval is + // regular. However, it can be shown that the end-results are anyway same. + if (significand == 0) return shorter_interval_case(exponent); + + significand |= (static_cast(1) << num_significand_bits()); + } else { + // Subnormal case; the interval is always regular. + if (significand == 0) return {0, 0}; + exponent = + std::numeric_limits::min_exponent - num_significand_bits() - 1; + } + + const bool include_left_endpoint = (significand % 2 == 0); + const bool include_right_endpoint = include_left_endpoint; + + // Compute k and beta. + const int minus_k = floor_log10_pow2(exponent) - float_info::kappa; + const cache_entry_type cache = cache_accessor::get_cached_power(-minus_k); + const int beta = exponent + floor_log2_pow10(-minus_k); + + // Compute zi and deltai. + // 10^kappa <= deltai < 10^(kappa + 1) + const uint32_t deltai = cache_accessor::compute_delta(cache, beta); + const carrier_uint two_fc = significand << 1; + + // For the case of binary32, the result of integer check is not correct for + // 29711844 * 2^-82 + // = 6.1442653300000000008655037797566933477355632930994033813476... * 10^-18 + // and 29711844 * 2^-81 + // = 1.2288530660000000001731007559513386695471126586198806762695... * 10^-17, + // and they are the unique counterexamples. However, since 29711844 is even, + // this does not cause any problem for the endpoints calculations; it can only + // cause a problem when we need to perform integer check for the center. + // Fortunately, with these inputs, that branch is never executed, so we are + // fine. + const typename cache_accessor::compute_mul_result z_mul = + cache_accessor::compute_mul((two_fc | 1) << beta, cache); + + // Step 2: Try larger divisor; remove trailing zeros if necessary. + + // Using an upper bound on zi, we might be able to optimize the division + // better than the compiler; we are computing zi / big_divisor here. + decimal_fp ret_value; + ret_value.significand = divide_by_10_to_kappa_plus_1(z_mul.result); + uint32_t r = static_cast(z_mul.result - float_info::big_divisor * + ret_value.significand); + + if (r < deltai) { + // Exclude the right endpoint if necessary. + if (r == 0 && z_mul.is_integer && !include_right_endpoint) { + --ret_value.significand; + r = float_info::big_divisor; + goto small_divisor_case_label; + } + } else if (r > deltai) { + goto small_divisor_case_label; + } else { + // r == deltai; compare fractional parts. + const carrier_uint two_fl = two_fc - 1; + + if (!include_left_endpoint || + exponent < float_info::case_fc_pm_half_lower_threshold || + exponent > float_info::divisibility_check_by_5_threshold) { + // If the left endpoint is not included, the condition for + // success is z^(f) < delta^(f) (odd parity). + // Otherwise, the inequalities on exponent ensure that + // x is not an integer, so if z^(f) >= delta^(f) (even parity), we in fact + // have strict inequality. + if (!cache_accessor::compute_mul_parity(two_fl, cache, beta).parity) { + goto small_divisor_case_label; + } + } else { + const typename cache_accessor::compute_mul_parity_result x_mul = + cache_accessor::compute_mul_parity(two_fl, cache, beta); + if (!x_mul.parity && !x_mul.is_integer) { + goto small_divisor_case_label; + } + } + } + ret_value.exponent = minus_k + float_info::kappa + 1; + + // We may need to remove trailing zeros. + ret_value.exponent += remove_trailing_zeros(ret_value.significand); + return ret_value; + + // Step 3: Find the significand with the smaller divisor. + +small_divisor_case_label: + ret_value.significand *= 10; + ret_value.exponent = minus_k + float_info::kappa; + + uint32_t dist = r - (deltai / 2) + (float_info::small_divisor / 2); + const bool approx_y_parity = + ((dist ^ (float_info::small_divisor / 2)) & 1) != 0; + + // Is dist divisible by 10^kappa? + const bool divisible_by_small_divisor = + check_divisibility_and_divide_by_pow10::kappa>(dist); + + // Add dist / 10^kappa to the significand. + ret_value.significand += dist; + + if (!divisible_by_small_divisor) return ret_value; + + // Check z^(f) >= epsilon^(f). + // We have either yi == zi - epsiloni or yi == (zi - epsiloni) - 1, + // where yi == zi - epsiloni if and only if z^(f) >= epsilon^(f). + // Since there are only 2 possibilities, we only need to care about the + // parity. Also, zi and r should have the same parity since the divisor + // is an even number. + const auto y_mul = cache_accessor::compute_mul_parity(two_fc, cache, beta); + + // If z^(f) >= epsilon^(f), we might have a tie when z^(f) == epsilon^(f), + // or equivalently, when y is an integer. + if (y_mul.parity != approx_y_parity) + --ret_value.significand; + else if (y_mul.is_integer && ret_value.significand % 2 != 0) + --ret_value.significand; + return ret_value; +} +} // namespace dragonbox + +// format_dragon flags. +enum dragon { + predecessor_closer = 1, + fixup = 2, // Run fixup to correct exp10 which can be off by one. + fixed = 4, +}; + +// Formats a floating-point number using a variation of the Fixed-Precision +// Positive Floating-Point Printout ((FPP)^2) algorithm by Steele & White: +// https://fmt.dev/papers/p372-steele.pdf. +FMT_CONSTEXPR20 inline void format_dragon(basic_fp value, + unsigned flags, int num_digits, + buffer& buf, int& exp10) { + bigint numerator; // 2 * R in (FPP)^2. + bigint denominator; // 2 * S in (FPP)^2. + // lower and upper are differences between value and corresponding boundaries. + bigint lower; // (M^- in (FPP)^2). + bigint upper_store; // upper's value if different from lower. + bigint* upper = nullptr; // (M^+ in (FPP)^2). + // Shift numerator and denominator by an extra bit or two (if lower boundary + // is closer) to make lower and upper integers. This eliminates multiplication + // by 2 during later computations. + bool is_predecessor_closer = (flags & dragon::predecessor_closer) != 0; + int shift = is_predecessor_closer ? 2 : 1; + if (value.e >= 0) { + numerator = value.f; + numerator <<= value.e + shift; + lower = 1; + lower <<= value.e; + if (is_predecessor_closer) { + upper_store = 1; + upper_store <<= value.e + 1; + upper = &upper_store; + } + denominator.assign_pow10(exp10); + denominator <<= shift; + } else if (exp10 < 0) { + numerator.assign_pow10(-exp10); + lower.assign(numerator); + if (is_predecessor_closer) { + upper_store.assign(numerator); + upper_store <<= 1; + upper = &upper_store; + } + numerator *= value.f; + numerator <<= shift; + denominator = 1; + denominator <<= shift - value.e; + } else { + numerator = value.f; + numerator <<= shift; + denominator.assign_pow10(exp10); + denominator <<= shift - value.e; + lower = 1; + if (is_predecessor_closer) { + upper_store = 1ULL << 1; + upper = &upper_store; + } + } + bool even = (value.f & 1) == 0; + if (!upper) upper = &lower; + if ((flags & dragon::fixup) != 0) { + if (add_compare(numerator, *upper, denominator) + even <= 0) { + --exp10; + numerator *= 10; + if (num_digits < 0) { + lower *= 10; + if (upper != &lower) *upper *= 10; + } + } + if ((flags & dragon::fixed) != 0) adjust_precision(num_digits, exp10 + 1); + } + // Invariant: value == (numerator / denominator) * pow(10, exp10). + if (num_digits < 0) { + // Generate the shortest representation. + num_digits = 0; + char* data = buf.data(); + for (;;) { + int digit = numerator.divmod_assign(denominator); + bool low = compare(numerator, lower) - even < 0; // numerator <[=] lower. + // numerator + upper >[=] pow10: + bool high = add_compare(numerator, *upper, denominator) + even > 0; + data[num_digits++] = static_cast('0' + digit); + if (low || high) { + if (!low) { + ++data[num_digits - 1]; + } else if (high) { + int result = add_compare(numerator, numerator, denominator); + // Round half to even. + if (result > 0 || (result == 0 && (digit % 2) != 0)) + ++data[num_digits - 1]; + } + buf.try_resize(to_unsigned(num_digits)); + exp10 -= num_digits - 1; + return; + } + numerator *= 10; + lower *= 10; + if (upper != &lower) *upper *= 10; + } + } + // Generate the given number of digits. + exp10 -= num_digits - 1; + if (num_digits == 0) { + denominator *= 10; + auto digit = add_compare(numerator, numerator, denominator) > 0 ? '1' : '0'; + buf.push_back(digit); + return; + } + buf.try_resize(to_unsigned(num_digits)); + for (int i = 0; i < num_digits - 1; ++i) { + int digit = numerator.divmod_assign(denominator); + buf[i] = static_cast('0' + digit); + numerator *= 10; + } + int digit = numerator.divmod_assign(denominator); + auto result = add_compare(numerator, numerator, denominator); + if (result > 0 || (result == 0 && (digit % 2) != 0)) { + if (digit == 9) { + const auto overflow = '0' + 10; + buf[num_digits - 1] = overflow; + // Propagate the carry. + for (int i = num_digits - 1; i > 0 && buf[i] == overflow; --i) { + buf[i] = '0'; + ++buf[i - 1]; + } + if (buf[0] == overflow) { + buf[0] = '1'; + ++exp10; + } + return; + } + ++digit; + } + buf[num_digits - 1] = static_cast('0' + digit); +} + +#ifdef _MSC_VER +FMT_FUNC auto fmt_snprintf(char* buf, size_t size, const char* fmt, ...) + -> int { + auto args = va_list(); + va_start(args, fmt); + int result = vsnprintf_s(buf, size, _TRUNCATE, fmt, args); + va_end(args); + return result; +} +#endif + +template +FMT_HEADER_ONLY_CONSTEXPR20 int format_float(Float value, int precision, + float_specs specs, + buffer& buf) { + // float is passed as double to reduce the number of instantiations. + static_assert(!std::is_same::value, ""); + FMT_ASSERT(value >= 0, "value is negative"); + auto converted_value = convert_float(value); + + const bool fixed = specs.format == float_format::fixed; + if (value <= 0) { // <= instead of == to silence a warning. + if (precision <= 0 || !fixed) { + buf.push_back('0'); + return 0; + } + buf.try_resize(to_unsigned(precision)); + fill_n(buf.data(), precision, '0'); + return -precision; + } + + int exp = 0; + bool use_dragon = true; + unsigned dragon_flags = 0; + if (!is_fast_float()) { + const auto inv_log2_10 = 0.3010299956639812; // 1 / log2(10) + using info = dragonbox::float_info; + const auto f = basic_fp(converted_value); + // Compute exp, an approximate power of 10, such that + // 10^(exp - 1) <= value < 10^exp or 10^exp <= value < 10^(exp + 1). + // This is based on log10(value) == log2(value) / log2(10) and approximation + // of log2(value) by e + num_fraction_bits idea from double-conversion. + exp = static_cast( + std::ceil((f.e + count_digits<1>(f.f) - 1) * inv_log2_10 - 1e-10)); + dragon_flags = dragon::fixup; + } else if (!is_constant_evaluated() && precision < 0) { + // Use Dragonbox for the shortest format. + if (specs.binary32) { + auto dec = dragonbox::to_decimal(static_cast(value)); + write(buffer_appender(buf), dec.significand); + return dec.exponent; + } + auto dec = dragonbox::to_decimal(static_cast(value)); + write(buffer_appender(buf), dec.significand); + return dec.exponent; + } else { + // Use Grisu + Dragon4 for the given precision: + // https://www.cs.tufts.edu/~nr/cs257/archive/florian-loitsch/printf.pdf. + const int min_exp = -60; // alpha in Grisu. + int cached_exp10 = 0; // K in Grisu. + fp normalized = normalize(fp(converted_value)); + const auto cached_pow = get_cached_power( + min_exp - (normalized.e + fp::num_significand_bits), cached_exp10); + normalized = normalized * cached_pow; + gen_digits_handler handler{buf.data(), 0, precision, -cached_exp10, fixed}; + if (grisu_gen_digits(normalized, 1, exp, handler) != digits::error && + !is_constant_evaluated()) { + exp += handler.exp10; + buf.try_resize(to_unsigned(handler.size)); + use_dragon = false; + } else { + exp += handler.size - cached_exp10 - 1; + precision = handler.precision; + } + } + if (use_dragon) { + auto f = basic_fp(); + bool is_predecessor_closer = specs.binary32 + ? f.assign(static_cast(value)) + : f.assign(converted_value); + if (is_predecessor_closer) dragon_flags |= dragon::predecessor_closer; + if (fixed) dragon_flags |= dragon::fixed; + // Limit precision to the maximum possible number of significant digits in + // an IEEE754 double because we don't need to generate zeros. + const int max_double_digits = 767; + if (precision > max_double_digits) precision = max_double_digits; + format_dragon(f, dragon_flags, precision, buf, exp); + } + if (!fixed && !specs.showpoint) { + // Remove trailing zeros. + auto num_digits = buf.size(); + while (num_digits > 0 && buf[num_digits - 1] == '0') { + --num_digits; + ++exp; + } + buf.try_resize(num_digits); + } + return exp; +} +} // namespace detail + +template <> struct formatter { + FMT_CONSTEXPR auto parse(format_parse_context& ctx) + -> format_parse_context::iterator { + return ctx.begin(); + } + + template + auto format(const detail::bigint& n, FormatContext& ctx) const -> + typename FormatContext::iterator { + auto out = ctx.out(); + bool first = true; + for (auto i = n.bigits_.size(); i > 0; --i) { + auto value = n.bigits_[i - 1u]; + if (first) { + out = format_to(out, FMT_STRING("{:x}"), value); + first = false; + continue; + } + out = format_to(out, FMT_STRING("{:08x}"), value); + } + if (n.exp_ > 0) + out = format_to(out, FMT_STRING("p{}"), + n.exp_ * detail::bigint::bigit_bits); + return out; + } +}; + +FMT_FUNC detail::utf8_to_utf16::utf8_to_utf16(string_view s) { + for_each_codepoint(s, [this](uint32_t cp, string_view) { + if (cp == invalid_code_point) FMT_THROW(std::runtime_error("invalid utf8")); + if (cp <= 0xFFFF) { + buffer_.push_back(static_cast(cp)); + } else { + cp -= 0x10000; + buffer_.push_back(static_cast(0xD800 + (cp >> 10))); + buffer_.push_back(static_cast(0xDC00 + (cp & 0x3FF))); + } + return true; + }); + buffer_.push_back(0); +} + +FMT_FUNC void format_system_error(detail::buffer& out, int error_code, + const char* message) noexcept { + FMT_TRY { + auto ec = std::error_code(error_code, std::generic_category()); + write(std::back_inserter(out), std::system_error(ec, message).what()); + return; + } + FMT_CATCH(...) {} + format_error_code(out, error_code, message); +} + +FMT_FUNC void report_system_error(int error_code, + const char* message) noexcept { + report_error(format_system_error, error_code, message); +} + +// DEPRECATED! +// This function is defined here and not inline for ABI compatiblity. +FMT_FUNC void detail::error_handler::on_error(const char* message) { + throw_format_error(message); +} + +FMT_FUNC std::string vformat(string_view fmt, format_args args) { + // Don't optimize the "{}" case to keep the binary size small and because it + // can be better optimized in fmt::format anyway. + auto buffer = memory_buffer(); + detail::vformat_to(buffer, fmt, args); + return to_string(buffer); +} + +#ifdef _WIN32 +namespace detail { +using dword = conditional_t; +extern "C" __declspec(dllimport) int __stdcall WriteConsoleW( // + void*, const void*, dword, dword*, void*); +} // namespace detail +#endif + +namespace detail { +FMT_FUNC void print(std::FILE* f, string_view text) { +#ifdef _WIN32 + auto fd = _fileno(f); + if (_isatty(fd)) { + detail::utf8_to_utf16 u16(string_view(text.data(), text.size())); + auto written = detail::dword(); + if (detail::WriteConsoleW(reinterpret_cast(_get_osfhandle(fd)), + u16.c_str(), static_cast(u16.size()), + &written, nullptr)) { + return; + } + // Fallback to fwrite on failure. It can happen if the output has been + // redirected to NUL. + } +#endif + detail::fwrite_fully(text.data(), 1, text.size(), f); +} +} // namespace detail + +FMT_FUNC void vprint(std::FILE* f, string_view format_str, format_args args) { + memory_buffer buffer; + detail::vformat_to(buffer, format_str, args); + detail::print(f, {buffer.data(), buffer.size()}); +} + +#ifdef _WIN32 +// Print assuming legacy (non-Unicode) encoding. +FMT_FUNC void detail::vprint_mojibake(std::FILE* f, string_view format_str, + format_args args) { + memory_buffer buffer; + detail::vformat_to(buffer, format_str, + basic_format_args>(args)); + fwrite_fully(buffer.data(), 1, buffer.size(), f); +} +#endif + +FMT_FUNC void vprint(string_view format_str, format_args args) { + vprint(stdout, format_str, args); +} + +namespace detail { + +struct singleton { + unsigned char upper; + unsigned char lower_count; +}; + +inline auto is_printable(uint16_t x, const singleton* singletons, + size_t singletons_size, + const unsigned char* singleton_lowers, + const unsigned char* normal, size_t normal_size) + -> bool { + auto upper = x >> 8; + auto lower_start = 0; + for (size_t i = 0; i < singletons_size; ++i) { + auto s = singletons[i]; + auto lower_end = lower_start + s.lower_count; + if (upper < s.upper) break; + if (upper == s.upper) { + for (auto j = lower_start; j < lower_end; ++j) { + if (singleton_lowers[j] == (x & 0xff)) return false; + } + } + lower_start = lower_end; + } + + auto xsigned = static_cast(x); + auto current = true; + for (size_t i = 0; i < normal_size; ++i) { + auto v = static_cast(normal[i]); + auto len = (v & 0x80) != 0 ? (v & 0x7f) << 8 | normal[++i] : v; + xsigned -= len; + if (xsigned < 0) break; + current = !current; + } + return current; +} + +// This code is generated by support/printable.py. +FMT_FUNC auto is_printable(uint32_t cp) -> bool { + static constexpr singleton singletons0[] = { + {0x00, 1}, {0x03, 5}, {0x05, 6}, {0x06, 3}, {0x07, 6}, {0x08, 8}, + {0x09, 17}, {0x0a, 28}, {0x0b, 25}, {0x0c, 20}, {0x0d, 16}, {0x0e, 13}, + {0x0f, 4}, {0x10, 3}, {0x12, 18}, {0x13, 9}, {0x16, 1}, {0x17, 5}, + {0x18, 2}, {0x19, 3}, {0x1a, 7}, {0x1c, 2}, {0x1d, 1}, {0x1f, 22}, + {0x20, 3}, {0x2b, 3}, {0x2c, 2}, {0x2d, 11}, {0x2e, 1}, {0x30, 3}, + {0x31, 2}, {0x32, 1}, {0xa7, 2}, {0xa9, 2}, {0xaa, 4}, {0xab, 8}, + {0xfa, 2}, {0xfb, 5}, {0xfd, 4}, {0xfe, 3}, {0xff, 9}, + }; + static constexpr unsigned char singletons0_lower[] = { + 0xad, 0x78, 0x79, 0x8b, 0x8d, 0xa2, 0x30, 0x57, 0x58, 0x8b, 0x8c, 0x90, + 0x1c, 0x1d, 0xdd, 0x0e, 0x0f, 0x4b, 0x4c, 0xfb, 0xfc, 0x2e, 0x2f, 0x3f, + 0x5c, 0x5d, 0x5f, 0xb5, 0xe2, 0x84, 0x8d, 0x8e, 0x91, 0x92, 0xa9, 0xb1, + 0xba, 0xbb, 0xc5, 0xc6, 0xc9, 0xca, 0xde, 0xe4, 0xe5, 0xff, 0x00, 0x04, + 0x11, 0x12, 0x29, 0x31, 0x34, 0x37, 0x3a, 0x3b, 0x3d, 0x49, 0x4a, 0x5d, + 0x84, 0x8e, 0x92, 0xa9, 0xb1, 0xb4, 0xba, 0xbb, 0xc6, 0xca, 0xce, 0xcf, + 0xe4, 0xe5, 0x00, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, + 0x3b, 0x45, 0x46, 0x49, 0x4a, 0x5e, 0x64, 0x65, 0x84, 0x91, 0x9b, 0x9d, + 0xc9, 0xce, 0xcf, 0x0d, 0x11, 0x29, 0x45, 0x49, 0x57, 0x64, 0x65, 0x8d, + 0x91, 0xa9, 0xb4, 0xba, 0xbb, 0xc5, 0xc9, 0xdf, 0xe4, 0xe5, 0xf0, 0x0d, + 0x11, 0x45, 0x49, 0x64, 0x65, 0x80, 0x84, 0xb2, 0xbc, 0xbe, 0xbf, 0xd5, + 0xd7, 0xf0, 0xf1, 0x83, 0x85, 0x8b, 0xa4, 0xa6, 0xbe, 0xbf, 0xc5, 0xc7, + 0xce, 0xcf, 0xda, 0xdb, 0x48, 0x98, 0xbd, 0xcd, 0xc6, 0xce, 0xcf, 0x49, + 0x4e, 0x4f, 0x57, 0x59, 0x5e, 0x5f, 0x89, 0x8e, 0x8f, 0xb1, 0xb6, 0xb7, + 0xbf, 0xc1, 0xc6, 0xc7, 0xd7, 0x11, 0x16, 0x17, 0x5b, 0x5c, 0xf6, 0xf7, + 0xfe, 0xff, 0x80, 0x0d, 0x6d, 0x71, 0xde, 0xdf, 0x0e, 0x0f, 0x1f, 0x6e, + 0x6f, 0x1c, 0x1d, 0x5f, 0x7d, 0x7e, 0xae, 0xaf, 0xbb, 0xbc, 0xfa, 0x16, + 0x17, 0x1e, 0x1f, 0x46, 0x47, 0x4e, 0x4f, 0x58, 0x5a, 0x5c, 0x5e, 0x7e, + 0x7f, 0xb5, 0xc5, 0xd4, 0xd5, 0xdc, 0xf0, 0xf1, 0xf5, 0x72, 0x73, 0x8f, + 0x74, 0x75, 0x96, 0x2f, 0x5f, 0x26, 0x2e, 0x2f, 0xa7, 0xaf, 0xb7, 0xbf, + 0xc7, 0xcf, 0xd7, 0xdf, 0x9a, 0x40, 0x97, 0x98, 0x30, 0x8f, 0x1f, 0xc0, + 0xc1, 0xce, 0xff, 0x4e, 0x4f, 0x5a, 0x5b, 0x07, 0x08, 0x0f, 0x10, 0x27, + 0x2f, 0xee, 0xef, 0x6e, 0x6f, 0x37, 0x3d, 0x3f, 0x42, 0x45, 0x90, 0x91, + 0xfe, 0xff, 0x53, 0x67, 0x75, 0xc8, 0xc9, 0xd0, 0xd1, 0xd8, 0xd9, 0xe7, + 0xfe, 0xff, + }; + static constexpr singleton singletons1[] = { + {0x00, 6}, {0x01, 1}, {0x03, 1}, {0x04, 2}, {0x08, 8}, {0x09, 2}, + {0x0a, 5}, {0x0b, 2}, {0x0e, 4}, {0x10, 1}, {0x11, 2}, {0x12, 5}, + {0x13, 17}, {0x14, 1}, {0x15, 2}, {0x17, 2}, {0x19, 13}, {0x1c, 5}, + {0x1d, 8}, {0x24, 1}, {0x6a, 3}, {0x6b, 2}, {0xbc, 2}, {0xd1, 2}, + {0xd4, 12}, {0xd5, 9}, {0xd6, 2}, {0xd7, 2}, {0xda, 1}, {0xe0, 5}, + {0xe1, 2}, {0xe8, 2}, {0xee, 32}, {0xf0, 4}, {0xf8, 2}, {0xf9, 2}, + {0xfa, 2}, {0xfb, 1}, + }; + static constexpr unsigned char singletons1_lower[] = { + 0x0c, 0x27, 0x3b, 0x3e, 0x4e, 0x4f, 0x8f, 0x9e, 0x9e, 0x9f, 0x06, 0x07, + 0x09, 0x36, 0x3d, 0x3e, 0x56, 0xf3, 0xd0, 0xd1, 0x04, 0x14, 0x18, 0x36, + 0x37, 0x56, 0x57, 0x7f, 0xaa, 0xae, 0xaf, 0xbd, 0x35, 0xe0, 0x12, 0x87, + 0x89, 0x8e, 0x9e, 0x04, 0x0d, 0x0e, 0x11, 0x12, 0x29, 0x31, 0x34, 0x3a, + 0x45, 0x46, 0x49, 0x4a, 0x4e, 0x4f, 0x64, 0x65, 0x5c, 0xb6, 0xb7, 0x1b, + 0x1c, 0x07, 0x08, 0x0a, 0x0b, 0x14, 0x17, 0x36, 0x39, 0x3a, 0xa8, 0xa9, + 0xd8, 0xd9, 0x09, 0x37, 0x90, 0x91, 0xa8, 0x07, 0x0a, 0x3b, 0x3e, 0x66, + 0x69, 0x8f, 0x92, 0x6f, 0x5f, 0xee, 0xef, 0x5a, 0x62, 0x9a, 0x9b, 0x27, + 0x28, 0x55, 0x9d, 0xa0, 0xa1, 0xa3, 0xa4, 0xa7, 0xa8, 0xad, 0xba, 0xbc, + 0xc4, 0x06, 0x0b, 0x0c, 0x15, 0x1d, 0x3a, 0x3f, 0x45, 0x51, 0xa6, 0xa7, + 0xcc, 0xcd, 0xa0, 0x07, 0x19, 0x1a, 0x22, 0x25, 0x3e, 0x3f, 0xc5, 0xc6, + 0x04, 0x20, 0x23, 0x25, 0x26, 0x28, 0x33, 0x38, 0x3a, 0x48, 0x4a, 0x4c, + 0x50, 0x53, 0x55, 0x56, 0x58, 0x5a, 0x5c, 0x5e, 0x60, 0x63, 0x65, 0x66, + 0x6b, 0x73, 0x78, 0x7d, 0x7f, 0x8a, 0xa4, 0xaa, 0xaf, 0xb0, 0xc0, 0xd0, + 0xae, 0xaf, 0x79, 0xcc, 0x6e, 0x6f, 0x93, + }; + static constexpr unsigned char normal0[] = { + 0x00, 0x20, 0x5f, 0x22, 0x82, 0xdf, 0x04, 0x82, 0x44, 0x08, 0x1b, 0x04, + 0x06, 0x11, 0x81, 0xac, 0x0e, 0x80, 0xab, 0x35, 0x28, 0x0b, 0x80, 0xe0, + 0x03, 0x19, 0x08, 0x01, 0x04, 0x2f, 0x04, 0x34, 0x04, 0x07, 0x03, 0x01, + 0x07, 0x06, 0x07, 0x11, 0x0a, 0x50, 0x0f, 0x12, 0x07, 0x55, 0x07, 0x03, + 0x04, 0x1c, 0x0a, 0x09, 0x03, 0x08, 0x03, 0x07, 0x03, 0x02, 0x03, 0x03, + 0x03, 0x0c, 0x04, 0x05, 0x03, 0x0b, 0x06, 0x01, 0x0e, 0x15, 0x05, 0x3a, + 0x03, 0x11, 0x07, 0x06, 0x05, 0x10, 0x07, 0x57, 0x07, 0x02, 0x07, 0x15, + 0x0d, 0x50, 0x04, 0x43, 0x03, 0x2d, 0x03, 0x01, 0x04, 0x11, 0x06, 0x0f, + 0x0c, 0x3a, 0x04, 0x1d, 0x25, 0x5f, 0x20, 0x6d, 0x04, 0x6a, 0x25, 0x80, + 0xc8, 0x05, 0x82, 0xb0, 0x03, 0x1a, 0x06, 0x82, 0xfd, 0x03, 0x59, 0x07, + 0x15, 0x0b, 0x17, 0x09, 0x14, 0x0c, 0x14, 0x0c, 0x6a, 0x06, 0x0a, 0x06, + 0x1a, 0x06, 0x59, 0x07, 0x2b, 0x05, 0x46, 0x0a, 0x2c, 0x04, 0x0c, 0x04, + 0x01, 0x03, 0x31, 0x0b, 0x2c, 0x04, 0x1a, 0x06, 0x0b, 0x03, 0x80, 0xac, + 0x06, 0x0a, 0x06, 0x21, 0x3f, 0x4c, 0x04, 0x2d, 0x03, 0x74, 0x08, 0x3c, + 0x03, 0x0f, 0x03, 0x3c, 0x07, 0x38, 0x08, 0x2b, 0x05, 0x82, 0xff, 0x11, + 0x18, 0x08, 0x2f, 0x11, 0x2d, 0x03, 0x20, 0x10, 0x21, 0x0f, 0x80, 0x8c, + 0x04, 0x82, 0x97, 0x19, 0x0b, 0x15, 0x88, 0x94, 0x05, 0x2f, 0x05, 0x3b, + 0x07, 0x02, 0x0e, 0x18, 0x09, 0x80, 0xb3, 0x2d, 0x74, 0x0c, 0x80, 0xd6, + 0x1a, 0x0c, 0x05, 0x80, 0xff, 0x05, 0x80, 0xdf, 0x0c, 0xee, 0x0d, 0x03, + 0x84, 0x8d, 0x03, 0x37, 0x09, 0x81, 0x5c, 0x14, 0x80, 0xb8, 0x08, 0x80, + 0xcb, 0x2a, 0x38, 0x03, 0x0a, 0x06, 0x38, 0x08, 0x46, 0x08, 0x0c, 0x06, + 0x74, 0x0b, 0x1e, 0x03, 0x5a, 0x04, 0x59, 0x09, 0x80, 0x83, 0x18, 0x1c, + 0x0a, 0x16, 0x09, 0x4c, 0x04, 0x80, 0x8a, 0x06, 0xab, 0xa4, 0x0c, 0x17, + 0x04, 0x31, 0xa1, 0x04, 0x81, 0xda, 0x26, 0x07, 0x0c, 0x05, 0x05, 0x80, + 0xa5, 0x11, 0x81, 0x6d, 0x10, 0x78, 0x28, 0x2a, 0x06, 0x4c, 0x04, 0x80, + 0x8d, 0x04, 0x80, 0xbe, 0x03, 0x1b, 0x03, 0x0f, 0x0d, + }; + static constexpr unsigned char normal1[] = { + 0x5e, 0x22, 0x7b, 0x05, 0x03, 0x04, 0x2d, 0x03, 0x66, 0x03, 0x01, 0x2f, + 0x2e, 0x80, 0x82, 0x1d, 0x03, 0x31, 0x0f, 0x1c, 0x04, 0x24, 0x09, 0x1e, + 0x05, 0x2b, 0x05, 0x44, 0x04, 0x0e, 0x2a, 0x80, 0xaa, 0x06, 0x24, 0x04, + 0x24, 0x04, 0x28, 0x08, 0x34, 0x0b, 0x01, 0x80, 0x90, 0x81, 0x37, 0x09, + 0x16, 0x0a, 0x08, 0x80, 0x98, 0x39, 0x03, 0x63, 0x08, 0x09, 0x30, 0x16, + 0x05, 0x21, 0x03, 0x1b, 0x05, 0x01, 0x40, 0x38, 0x04, 0x4b, 0x05, 0x2f, + 0x04, 0x0a, 0x07, 0x09, 0x07, 0x40, 0x20, 0x27, 0x04, 0x0c, 0x09, 0x36, + 0x03, 0x3a, 0x05, 0x1a, 0x07, 0x04, 0x0c, 0x07, 0x50, 0x49, 0x37, 0x33, + 0x0d, 0x33, 0x07, 0x2e, 0x08, 0x0a, 0x81, 0x26, 0x52, 0x4e, 0x28, 0x08, + 0x2a, 0x56, 0x1c, 0x14, 0x17, 0x09, 0x4e, 0x04, 0x1e, 0x0f, 0x43, 0x0e, + 0x19, 0x07, 0x0a, 0x06, 0x48, 0x08, 0x27, 0x09, 0x75, 0x0b, 0x3f, 0x41, + 0x2a, 0x06, 0x3b, 0x05, 0x0a, 0x06, 0x51, 0x06, 0x01, 0x05, 0x10, 0x03, + 0x05, 0x80, 0x8b, 0x62, 0x1e, 0x48, 0x08, 0x0a, 0x80, 0xa6, 0x5e, 0x22, + 0x45, 0x0b, 0x0a, 0x06, 0x0d, 0x13, 0x39, 0x07, 0x0a, 0x36, 0x2c, 0x04, + 0x10, 0x80, 0xc0, 0x3c, 0x64, 0x53, 0x0c, 0x48, 0x09, 0x0a, 0x46, 0x45, + 0x1b, 0x48, 0x08, 0x53, 0x1d, 0x39, 0x81, 0x07, 0x46, 0x0a, 0x1d, 0x03, + 0x47, 0x49, 0x37, 0x03, 0x0e, 0x08, 0x0a, 0x06, 0x39, 0x07, 0x0a, 0x81, + 0x36, 0x19, 0x80, 0xb7, 0x01, 0x0f, 0x32, 0x0d, 0x83, 0x9b, 0x66, 0x75, + 0x0b, 0x80, 0xc4, 0x8a, 0xbc, 0x84, 0x2f, 0x8f, 0xd1, 0x82, 0x47, 0xa1, + 0xb9, 0x82, 0x39, 0x07, 0x2a, 0x04, 0x02, 0x60, 0x26, 0x0a, 0x46, 0x0a, + 0x28, 0x05, 0x13, 0x82, 0xb0, 0x5b, 0x65, 0x4b, 0x04, 0x39, 0x07, 0x11, + 0x40, 0x05, 0x0b, 0x02, 0x0e, 0x97, 0xf8, 0x08, 0x84, 0xd6, 0x2a, 0x09, + 0xa2, 0xf7, 0x81, 0x1f, 0x31, 0x03, 0x11, 0x04, 0x08, 0x81, 0x8c, 0x89, + 0x04, 0x6b, 0x05, 0x0d, 0x03, 0x09, 0x07, 0x10, 0x93, 0x60, 0x80, 0xf6, + 0x0a, 0x73, 0x08, 0x6e, 0x17, 0x46, 0x80, 0x9a, 0x14, 0x0c, 0x57, 0x09, + 0x19, 0x80, 0x87, 0x81, 0x47, 0x03, 0x85, 0x42, 0x0f, 0x15, 0x85, 0x50, + 0x2b, 0x80, 0xd5, 0x2d, 0x03, 0x1a, 0x04, 0x02, 0x81, 0x70, 0x3a, 0x05, + 0x01, 0x85, 0x00, 0x80, 0xd7, 0x29, 0x4c, 0x04, 0x0a, 0x04, 0x02, 0x83, + 0x11, 0x44, 0x4c, 0x3d, 0x80, 0xc2, 0x3c, 0x06, 0x01, 0x04, 0x55, 0x05, + 0x1b, 0x34, 0x02, 0x81, 0x0e, 0x2c, 0x04, 0x64, 0x0c, 0x56, 0x0a, 0x80, + 0xae, 0x38, 0x1d, 0x0d, 0x2c, 0x04, 0x09, 0x07, 0x02, 0x0e, 0x06, 0x80, + 0x9a, 0x83, 0xd8, 0x08, 0x0d, 0x03, 0x0d, 0x03, 0x74, 0x0c, 0x59, 0x07, + 0x0c, 0x14, 0x0c, 0x04, 0x38, 0x08, 0x0a, 0x06, 0x28, 0x08, 0x22, 0x4e, + 0x81, 0x54, 0x0c, 0x15, 0x03, 0x03, 0x05, 0x07, 0x09, 0x19, 0x07, 0x07, + 0x09, 0x03, 0x0d, 0x07, 0x29, 0x80, 0xcb, 0x25, 0x0a, 0x84, 0x06, + }; + auto lower = static_cast(cp); + if (cp < 0x10000) { + return is_printable(lower, singletons0, + sizeof(singletons0) / sizeof(*singletons0), + singletons0_lower, normal0, sizeof(normal0)); + } + if (cp < 0x20000) { + return is_printable(lower, singletons1, + sizeof(singletons1) / sizeof(*singletons1), + singletons1_lower, normal1, sizeof(normal1)); + } + if (0x2a6de <= cp && cp < 0x2a700) return false; + if (0x2b735 <= cp && cp < 0x2b740) return false; + if (0x2b81e <= cp && cp < 0x2b820) return false; + if (0x2cea2 <= cp && cp < 0x2ceb0) return false; + if (0x2ebe1 <= cp && cp < 0x2f800) return false; + if (0x2fa1e <= cp && cp < 0x30000) return false; + if (0x3134b <= cp && cp < 0xe0100) return false; + if (0xe01f0 <= cp && cp < 0x110000) return false; + return cp < 0x110000; +} + +} // namespace detail + +FMT_END_NAMESPACE + +#endif // FMT_FORMAT_INL_H_ diff --git a/external/fmt/include/fmt/format.h b/external/fmt/include/fmt/format.h new file mode 100644 index 00000000..d35207c8 --- /dev/null +++ b/external/fmt/include/fmt/format.h @@ -0,0 +1,3444 @@ +/* + Formatting library for C++ + + Copyright (c) 2012 - present, Victor Zverovich + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE + LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + --- Optional exception to the license --- + + As an exception, if, as a result of your compiling your source code, portions + of this Software are embedded into a machine-executable object form of such + source code, you may redistribute such embedded portions in such object form + without including the above copyright and permission notices. + */ + +#ifndef FMT_FORMAT_H_ +#define FMT_FORMAT_H_ + +#include // std::signbit +#include // uint32_t +#include // std::memcpy +#include // std::numeric_limits +#include // std::uninitialized_copy +#include // std::runtime_error +#include // std::system_error + +#ifdef __cpp_lib_bit_cast +# include // std::bitcast +#endif + +#include "core.h" + +#if FMT_GCC_VERSION +# define FMT_GCC_VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) +#else +# define FMT_GCC_VISIBILITY_HIDDEN +#endif + +#ifdef __NVCC__ +# define FMT_CUDA_VERSION (__CUDACC_VER_MAJOR__ * 100 + __CUDACC_VER_MINOR__) +#else +# define FMT_CUDA_VERSION 0 +#endif + +#ifdef __has_builtin +# define FMT_HAS_BUILTIN(x) __has_builtin(x) +#else +# define FMT_HAS_BUILTIN(x) 0 +#endif + +#if FMT_GCC_VERSION || FMT_CLANG_VERSION +# define FMT_NOINLINE __attribute__((noinline)) +#else +# define FMT_NOINLINE +#endif + +#if FMT_MSC_VER +# define FMT_MSC_DEFAULT = default +#else +# define FMT_MSC_DEFAULT +#endif + +#ifndef FMT_THROW +# if FMT_EXCEPTIONS +# if FMT_MSC_VER || FMT_NVCC +FMT_BEGIN_NAMESPACE +namespace detail { +template inline void do_throw(const Exception& x) { + // Silence unreachable code warnings in MSVC and NVCC because these + // are nearly impossible to fix in a generic code. + volatile bool b = true; + if (b) throw x; +} +} // namespace detail +FMT_END_NAMESPACE +# define FMT_THROW(x) detail::do_throw(x) +# else +# define FMT_THROW(x) throw x +# endif +# else +# define FMT_THROW(x) \ + do { \ + FMT_ASSERT(false, (x).what()); \ + } while (false) +# endif +#endif + +#if FMT_EXCEPTIONS +# define FMT_TRY try +# define FMT_CATCH(x) catch (x) +#else +# define FMT_TRY if (true) +# define FMT_CATCH(x) if (false) +#endif + +#ifndef FMT_MAYBE_UNUSED +# if FMT_HAS_CPP17_ATTRIBUTE(maybe_unused) +# define FMT_MAYBE_UNUSED [[maybe_unused]] +# else +# define FMT_MAYBE_UNUSED +# endif +#endif + +// Workaround broken [[deprecated]] in the Intel, PGI and NVCC compilers. +#if FMT_ICC_VERSION || defined(__PGI) || FMT_NVCC +# define FMT_DEPRECATED_ALIAS +#else +# define FMT_DEPRECATED_ALIAS FMT_DEPRECATED +#endif + +#ifndef FMT_USE_USER_DEFINED_LITERALS +// EDG based compilers (Intel, NVIDIA, Elbrus, etc), GCC and MSVC support UDLs. +# if (FMT_HAS_FEATURE(cxx_user_literals) || FMT_GCC_VERSION >= 407 || \ + FMT_MSC_VER >= 1900) && \ + (!defined(__EDG_VERSION__) || __EDG_VERSION__ >= /* UDL feature */ 480) +# define FMT_USE_USER_DEFINED_LITERALS 1 +# else +# define FMT_USE_USER_DEFINED_LITERALS 0 +# endif +#endif + +// Defining FMT_REDUCE_INT_INSTANTIATIONS to 1, will reduce the number of +// integer formatter template instantiations to just one by only using the +// largest integer type. This results in a reduction in binary size but will +// cause a decrease in integer formatting performance. +#if !defined(FMT_REDUCE_INT_INSTANTIATIONS) +# define FMT_REDUCE_INT_INSTANTIATIONS 0 +#endif + +// __builtin_clz is broken in clang with Microsoft CodeGen: +// https://github.com/fmtlib/fmt/issues/519. +#if !FMT_MSC_VER +# if FMT_HAS_BUILTIN(__builtin_clz) || FMT_GCC_VERSION || FMT_ICC_VERSION +# define FMT_BUILTIN_CLZ(n) __builtin_clz(n) +# endif +# if FMT_HAS_BUILTIN(__builtin_clzll) || FMT_GCC_VERSION || FMT_ICC_VERSION +# define FMT_BUILTIN_CLZLL(n) __builtin_clzll(n) +# endif +#endif + +// __builtin_ctz is broken in Intel Compiler Classic on Windows: +// https://github.com/fmtlib/fmt/issues/2510. +#ifndef __ICL +# if FMT_HAS_BUILTIN(__builtin_ctz) || FMT_GCC_VERSION || FMT_ICC_VERSION || \ + FMT_NVCOMPILER_VERSION +# define FMT_BUILTIN_CTZ(n) __builtin_ctz(n) +# endif +# if FMT_HAS_BUILTIN(__builtin_ctzll) || FMT_GCC_VERSION || \ + FMT_ICC_VERSION || FMT_NVCOMPILER_VERSION +# define FMT_BUILTIN_CTZLL(n) __builtin_ctzll(n) +# endif +#endif + +#if FMT_MSC_VER +# include // _BitScanReverse[64], _BitScanForward[64], _umul128 +#endif + +// Some compilers masquerade as both MSVC and GCC-likes or otherwise support +// __builtin_clz and __builtin_clzll, so only define FMT_BUILTIN_CLZ using the +// MSVC intrinsics if the clz and clzll builtins are not available. +#if FMT_MSC_VER && !defined(FMT_BUILTIN_CLZLL) && !defined(FMT_BUILTIN_CTZLL) +FMT_BEGIN_NAMESPACE +namespace detail { +// Avoid Clang with Microsoft CodeGen's -Wunknown-pragmas warning. +# if !defined(__clang__) +# pragma intrinsic(_BitScanForward) +# pragma intrinsic(_BitScanReverse) +# if defined(_WIN64) +# pragma intrinsic(_BitScanForward64) +# pragma intrinsic(_BitScanReverse64) +# endif +# endif + +inline auto clz(uint32_t x) -> int { + unsigned long r = 0; + _BitScanReverse(&r, x); + FMT_ASSERT(x != 0, ""); + // Static analysis complains about using uninitialized data + // "r", but the only way that can happen is if "x" is 0, + // which the callers guarantee to not happen. + FMT_MSC_WARNING(suppress : 6102) + return 31 ^ static_cast(r); +} +# define FMT_BUILTIN_CLZ(n) detail::clz(n) + +inline auto clzll(uint64_t x) -> int { + unsigned long r = 0; +# ifdef _WIN64 + _BitScanReverse64(&r, x); +# else + // Scan the high 32 bits. + if (_BitScanReverse(&r, static_cast(x >> 32))) return 63 ^ (r + 32); + // Scan the low 32 bits. + _BitScanReverse(&r, static_cast(x)); +# endif + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. + return 63 ^ static_cast(r); +} +# define FMT_BUILTIN_CLZLL(n) detail::clzll(n) + +inline auto ctz(uint32_t x) -> int { + unsigned long r = 0; + _BitScanForward(&r, x); + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. + return static_cast(r); +} +# define FMT_BUILTIN_CTZ(n) detail::ctz(n) + +inline auto ctzll(uint64_t x) -> int { + unsigned long r = 0; + FMT_ASSERT(x != 0, ""); + FMT_MSC_WARNING(suppress : 6102) // Suppress a bogus static analysis warning. +# ifdef _WIN64 + _BitScanForward64(&r, x); +# else + // Scan the low 32 bits. + if (_BitScanForward(&r, static_cast(x))) return static_cast(r); + // Scan the high 32 bits. + _BitScanForward(&r, static_cast(x >> 32)); + r += 32; +# endif + return static_cast(r); +} +# define FMT_BUILTIN_CTZLL(n) detail::ctzll(n) +} // namespace detail +FMT_END_NAMESPACE +#endif + +#ifdef FMT_HEADER_ONLY +# define FMT_HEADER_ONLY_CONSTEXPR20 FMT_CONSTEXPR20 +#else +# define FMT_HEADER_ONLY_CONSTEXPR20 +#endif + +FMT_BEGIN_NAMESPACE +namespace detail { + +FMT_CONSTEXPR inline void abort_fuzzing_if(bool condition) { + ignore_unused(condition); +#ifdef FMT_FUZZ + if (condition) throw std::runtime_error("fuzzing limit reached"); +#endif +} + +template class formatbuf : public Streambuf { + private: + using char_type = typename Streambuf::char_type; + using streamsize = decltype(std::declval().sputn(nullptr, 0)); + using int_type = typename Streambuf::int_type; + using traits_type = typename Streambuf::traits_type; + + buffer& buffer_; + + public: + explicit formatbuf(buffer& buf) : buffer_(buf) {} + + protected: + // The put area is always empty. This makes the implementation simpler and has + // the advantage that the streambuf and the buffer are always in sync and + // sputc never writes into uninitialized memory. A disadvantage is that each + // call to sputc always results in a (virtual) call to overflow. There is no + // disadvantage here for sputn since this always results in a call to xsputn. + + auto overflow(int_type ch) -> int_type override { + if (!traits_type::eq_int_type(ch, traits_type::eof())) + buffer_.push_back(static_cast(ch)); + return ch; + } + + auto xsputn(const char_type* s, streamsize count) -> streamsize override { + buffer_.append(s, s + count); + return count; + } +}; + +// Implementation of std::bit_cast for pre-C++20. +template +FMT_CONSTEXPR20 auto bit_cast(const From& from) -> To { +#ifdef __cpp_lib_bit_cast + if (is_constant_evaluated()) return std::bit_cast(from); +#endif + auto to = To(); + std::memcpy(&to, &from, sizeof(to)); + return to; +} + +inline auto is_big_endian() -> bool { +#ifdef _WIN32 + return false; +#elif defined(__BIG_ENDIAN__) + return true; +#elif defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) + return __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__; +#else + struct bytes { + char data[sizeof(int)]; + }; + return bit_cast(1).data[0] == 0; +#endif +} + +class uint128_fallback { + private: + uint64_t lo_, hi_; + + friend uint128_fallback umul128(uint64_t x, uint64_t y) noexcept; + + public: + constexpr uint128_fallback(uint64_t hi, uint64_t lo) : lo_(lo), hi_(hi) {} + constexpr uint128_fallback(uint64_t value = 0) : lo_(value), hi_(0) {} + + constexpr uint64_t high() const noexcept { return hi_; } + constexpr uint64_t low() const noexcept { return lo_; } + + template ::value)> + constexpr explicit operator T() const { + return static_cast(lo_); + } + + friend constexpr auto operator==(const uint128_fallback& lhs, + const uint128_fallback& rhs) -> bool { + return lhs.hi_ == rhs.hi_ && lhs.lo_ == rhs.lo_; + } + friend constexpr auto operator!=(const uint128_fallback& lhs, + const uint128_fallback& rhs) -> bool { + return !(lhs == rhs); + } + friend constexpr auto operator>(const uint128_fallback& lhs, + const uint128_fallback& rhs) -> bool { + return lhs.hi_ != rhs.hi_ ? lhs.hi_ > rhs.hi_ : lhs.lo_ > rhs.lo_; + } + friend constexpr auto operator|(const uint128_fallback& lhs, + const uint128_fallback& rhs) + -> uint128_fallback { + return {lhs.hi_ | rhs.hi_, lhs.lo_ | rhs.lo_}; + } + friend constexpr auto operator&(const uint128_fallback& lhs, + const uint128_fallback& rhs) + -> uint128_fallback { + return {lhs.hi_ & rhs.hi_, lhs.lo_ & rhs.lo_}; + } + friend auto operator+(const uint128_fallback& lhs, + const uint128_fallback& rhs) -> uint128_fallback { + auto result = uint128_fallback(lhs); + result += rhs; + return result; + } + friend auto operator*(const uint128_fallback& lhs, uint32_t rhs) + -> uint128_fallback { + FMT_ASSERT(lhs.hi_ == 0, ""); + uint64_t hi = (lhs.lo_ >> 32) * rhs; + uint64_t lo = (lhs.lo_ & ~uint32_t()) * rhs; + uint64_t new_lo = (hi << 32) + lo; + return {(hi >> 32) + (new_lo < lo ? 1 : 0), new_lo}; + } + friend auto operator-(const uint128_fallback& lhs, uint64_t rhs) + -> uint128_fallback { + return {lhs.hi_ - (lhs.lo_ < rhs ? 1 : 0), lhs.lo_ - rhs}; + } + FMT_CONSTEXPR auto operator>>(int shift) const -> uint128_fallback { + if (shift == 64) return {0, hi_}; + return {hi_ >> shift, (hi_ << (64 - shift)) | (lo_ >> shift)}; + } + FMT_CONSTEXPR auto operator<<(int shift) const -> uint128_fallback { + if (shift == 64) return {lo_, 0}; + return {hi_ << shift | (lo_ >> (64 - shift)), (lo_ << shift)}; + } + FMT_CONSTEXPR auto operator>>=(int shift) -> uint128_fallback& { + return *this = *this >> shift; + } + FMT_CONSTEXPR void operator+=(uint128_fallback n) { + uint64_t new_lo = lo_ + n.lo_; + uint64_t new_hi = hi_ + n.hi_ + (new_lo < lo_ ? 1 : 0); + FMT_ASSERT(new_hi >= hi_, ""); + lo_ = new_lo; + hi_ = new_hi; + } + FMT_CONSTEXPR20 uint128_fallback& operator+=(uint64_t n) noexcept; +}; + +using uint128_t = conditional_t; + +#ifdef UINTPTR_MAX +using uintptr_t = ::uintptr_t; +#else +using uintptr_t = uint128_t; +#endif + +// Returns the largest possible value for type T. Same as +// std::numeric_limits::max() but shorter and not affected by the max macro. +template constexpr auto max_value() -> T { + return (std::numeric_limits::max)(); +} +template constexpr auto num_bits() -> int { + return std::numeric_limits::digits; +} +// std::numeric_limits::digits may return 0 for 128-bit ints. +template <> constexpr auto num_bits() -> int { return 128; } +template <> constexpr auto num_bits() -> int { return 128; } + +// A heterogeneous bit_cast used for converting 96-bit long double to uint128_t +// and 128-bit pointers to uint128_fallback. +template sizeof(From))> +inline auto bit_cast(const From& from) -> To { + constexpr auto size = static_cast(sizeof(From) / sizeof(unsigned)); + struct data_t { + unsigned value[static_cast(size)]; + } data = bit_cast(from); + auto result = To(); + if (const_check(is_big_endian())) { + for (int i = 0; i < size; ++i) + result = (result << num_bits()) | data.value[i]; + } else { + for (int i = size - 1; i >= 0; --i) + result = (result << num_bits()) | data.value[i]; + } + return result; +} + +FMT_INLINE void assume(bool condition) { + (void)condition; +#if FMT_HAS_BUILTIN(__builtin_assume) && !FMT_ICC_VERSION + __builtin_assume(condition); +#endif +} + +// An approximation of iterator_t for pre-C++20 systems. +template +using iterator_t = decltype(std::begin(std::declval())); +template using sentinel_t = decltype(std::end(std::declval())); + +// A workaround for std::string not having mutable data() until C++17. +template +inline auto get_data(std::basic_string& s) -> Char* { + return &s[0]; +} +template +inline auto get_data(Container& c) -> typename Container::value_type* { + return c.data(); +} + +#if defined(_SECURE_SCL) && _SECURE_SCL +// Make a checked iterator to avoid MSVC warnings. +template using checked_ptr = stdext::checked_array_iterator; +template +constexpr auto make_checked(T* p, size_t size) -> checked_ptr { + return {p, size}; +} +#else +template using checked_ptr = T*; +template constexpr auto make_checked(T* p, size_t) -> T* { + return p; +} +#endif + +// Attempts to reserve space for n extra characters in the output range. +// Returns a pointer to the reserved range or a reference to it. +template ::value)> +#if FMT_CLANG_VERSION >= 307 && !FMT_ICC_VERSION +__attribute__((no_sanitize("undefined"))) +#endif +inline auto +reserve(std::back_insert_iterator it, size_t n) + -> checked_ptr { + Container& c = get_container(it); + size_t size = c.size(); + c.resize(size + n); + return make_checked(get_data(c) + size, n); +} + +template +inline auto reserve(buffer_appender it, size_t n) -> buffer_appender { + buffer& buf = get_container(it); + buf.try_reserve(buf.size() + n); + return it; +} + +template +constexpr auto reserve(Iterator& it, size_t) -> Iterator& { + return it; +} + +template +using reserve_iterator = + remove_reference_t(), 0))>; + +template +constexpr auto to_pointer(OutputIt, size_t) -> T* { + return nullptr; +} +template auto to_pointer(buffer_appender it, size_t n) -> T* { + buffer& buf = get_container(it); + auto size = buf.size(); + if (buf.capacity() < size + n) return nullptr; + buf.try_resize(size + n); + return buf.data() + size; +} + +template ::value)> +inline auto base_iterator(std::back_insert_iterator& it, + checked_ptr) + -> std::back_insert_iterator { + return it; +} + +template +constexpr auto base_iterator(Iterator, Iterator it) -> Iterator { + return it; +} + +// is spectacularly slow to compile in C++20 so use a simple fill_n +// instead (#1998). +template +FMT_CONSTEXPR auto fill_n(OutputIt out, Size count, const T& value) + -> OutputIt { + for (Size i = 0; i < count; ++i) *out++ = value; + return out; +} +template +FMT_CONSTEXPR20 auto fill_n(T* out, Size count, char value) -> T* { + if (is_constant_evaluated()) { + return fill_n(out, count, value); + } + std::memset(out, value, to_unsigned(count)); + return out + count; +} + +#ifdef __cpp_char8_t +using char8_type = char8_t; +#else +enum char8_type : unsigned char {}; +#endif + +template +FMT_CONSTEXPR FMT_NOINLINE auto copy_str_noinline(InputIt begin, InputIt end, + OutputIt out) -> OutputIt { + return copy_str(begin, end, out); +} + +// A public domain branchless UTF-8 decoder by Christopher Wellons: +// https://github.com/skeeto/branchless-utf8 +/* Decode the next character, c, from s, reporting errors in e. + * + * Since this is a branchless decoder, four bytes will be read from the + * buffer regardless of the actual length of the next character. This + * means the buffer _must_ have at least three bytes of zero padding + * following the end of the data stream. + * + * Errors are reported in e, which will be non-zero if the parsed + * character was somehow invalid: invalid byte sequence, non-canonical + * encoding, or a surrogate half. + * + * The function returns a pointer to the next character. When an error + * occurs, this pointer will be a guess that depends on the particular + * error, but it will always advance at least one byte. + */ +FMT_CONSTEXPR inline auto utf8_decode(const char* s, uint32_t* c, int* e) + -> const char* { + constexpr const int masks[] = {0x00, 0x7f, 0x1f, 0x0f, 0x07}; + constexpr const uint32_t mins[] = {4194304, 0, 128, 2048, 65536}; + constexpr const int shiftc[] = {0, 18, 12, 6, 0}; + constexpr const int shifte[] = {0, 6, 4, 2, 0}; + + int len = code_point_length(s); + const char* next = s + len; + + // Assume a four-byte character and load four bytes. Unused bits are + // shifted out. + *c = uint32_t(s[0] & masks[len]) << 18; + *c |= uint32_t(s[1] & 0x3f) << 12; + *c |= uint32_t(s[2] & 0x3f) << 6; + *c |= uint32_t(s[3] & 0x3f) << 0; + *c >>= shiftc[len]; + + // Accumulate the various error conditions. + using uchar = unsigned char; + *e = (*c < mins[len]) << 6; // non-canonical encoding + *e |= ((*c >> 11) == 0x1b) << 7; // surrogate half? + *e |= (*c > 0x10FFFF) << 8; // out of range? + *e |= (uchar(s[1]) & 0xc0) >> 2; + *e |= (uchar(s[2]) & 0xc0) >> 4; + *e |= uchar(s[3]) >> 6; + *e ^= 0x2a; // top two bits of each tail byte correct? + *e >>= shifte[len]; + + return next; +} + +constexpr uint32_t invalid_code_point = ~uint32_t(); + +// Invokes f(cp, sv) for every code point cp in s with sv being the string view +// corresponding to the code point. cp is invalid_code_point on error. +template +FMT_CONSTEXPR void for_each_codepoint(string_view s, F f) { + auto decode = [f](const char* buf_ptr, const char* ptr) { + auto cp = uint32_t(); + auto error = 0; + auto end = utf8_decode(buf_ptr, &cp, &error); + bool result = f(error ? invalid_code_point : cp, + string_view(ptr, to_unsigned(end - buf_ptr))); + return result ? end : nullptr; + }; + auto p = s.data(); + const size_t block_size = 4; // utf8_decode always reads blocks of 4 chars. + if (s.size() >= block_size) { + for (auto end = p + s.size() - block_size + 1; p < end;) { + p = decode(p, p); + if (!p) return; + } + } + if (auto num_chars_left = s.data() + s.size() - p) { + char buf[2 * block_size - 1] = {}; + copy_str(p, p + num_chars_left, buf); + const char* buf_ptr = buf; + do { + auto end = decode(buf_ptr, p); + if (!end) return; + p += end - buf_ptr; + buf_ptr = end; + } while (buf_ptr - buf < num_chars_left); + } +} + +template +inline auto compute_width(basic_string_view s) -> size_t { + return s.size(); +} + +// Computes approximate display width of a UTF-8 string. +FMT_CONSTEXPR inline size_t compute_width(string_view s) { + size_t num_code_points = 0; + // It is not a lambda for compatibility with C++14. + struct count_code_points { + size_t* count; + FMT_CONSTEXPR auto operator()(uint32_t cp, string_view) const -> bool { + *count += detail::to_unsigned( + 1 + + (cp >= 0x1100 && + (cp <= 0x115f || // Hangul Jamo init. consonants + cp == 0x2329 || // LEFT-POINTING ANGLE BRACKET + cp == 0x232a || // RIGHT-POINTING ANGLE BRACKET + // CJK ... Yi except IDEOGRAPHIC HALF FILL SPACE: + (cp >= 0x2e80 && cp <= 0xa4cf && cp != 0x303f) || + (cp >= 0xac00 && cp <= 0xd7a3) || // Hangul Syllables + (cp >= 0xf900 && cp <= 0xfaff) || // CJK Compatibility Ideographs + (cp >= 0xfe10 && cp <= 0xfe19) || // Vertical Forms + (cp >= 0xfe30 && cp <= 0xfe6f) || // CJK Compatibility Forms + (cp >= 0xff00 && cp <= 0xff60) || // Fullwidth Forms + (cp >= 0xffe0 && cp <= 0xffe6) || // Fullwidth Forms + (cp >= 0x20000 && cp <= 0x2fffd) || // CJK + (cp >= 0x30000 && cp <= 0x3fffd) || + // Miscellaneous Symbols and Pictographs + Emoticons: + (cp >= 0x1f300 && cp <= 0x1f64f) || + // Supplemental Symbols and Pictographs: + (cp >= 0x1f900 && cp <= 0x1f9ff)))); + return true; + } + }; + for_each_codepoint(s, count_code_points{&num_code_points}); + return num_code_points; +} + +inline auto compute_width(basic_string_view s) -> size_t { + return compute_width(basic_string_view( + reinterpret_cast(s.data()), s.size())); +} + +template +inline auto code_point_index(basic_string_view s, size_t n) -> size_t { + size_t size = s.size(); + return n < size ? n : size; +} + +// Calculates the index of the nth code point in a UTF-8 string. +inline auto code_point_index(basic_string_view s, size_t n) + -> size_t { + const char8_type* data = s.data(); + size_t num_code_points = 0; + for (size_t i = 0, size = s.size(); i != size; ++i) { + if ((data[i] & 0xc0) != 0x80 && ++num_code_points > n) return i; + } + return s.size(); +} + +#ifndef FMT_USE_FLOAT128 +# ifdef __SIZEOF_FLOAT128__ +# define FMT_USE_FLOAT128 1 +# else +# define FMT_USE_FLOAT128 0 +# endif +#endif +#if FMT_USE_FLOAT128 +using float128 = __float128; +#else +using float128 = void; +#endif +template using is_float128 = std::is_same; + +template +using is_floating_point = + bool_constant::value || is_float128::value>; + +template ::value> +struct is_fast_float : bool_constant::is_iec559 && + sizeof(T) <= sizeof(double)> {}; +template struct is_fast_float : std::false_type {}; + +#ifndef FMT_USE_FULL_CACHE_DRAGONBOX +# define FMT_USE_FULL_CACHE_DRAGONBOX 0 +#endif + +template +template +void buffer::append(const U* begin, const U* end) { + while (begin != end) { + auto count = to_unsigned(end - begin); + try_reserve(size_ + count); + auto free_cap = capacity_ - size_; + if (free_cap < count) count = free_cap; + std::uninitialized_copy_n(begin, count, make_checked(ptr_ + size_, count)); + size_ += count; + begin += count; + } +} + +template +struct is_locale : std::false_type {}; +template +struct is_locale> : std::true_type {}; +} // namespace detail + +FMT_MODULE_EXPORT_BEGIN + +// The number of characters to store in the basic_memory_buffer object itself +// to avoid dynamic memory allocation. +enum { inline_buffer_size = 500 }; + +/** + \rst + A dynamically growing memory buffer for trivially copyable/constructible types + with the first ``SIZE`` elements stored in the object itself. + + You can use the ``memory_buffer`` type alias for ``char`` instead. + + **Example**:: + + auto out = fmt::memory_buffer(); + format_to(std::back_inserter(out), "The answer is {}.", 42); + + This will append the following output to the ``out`` object: + + .. code-block:: none + + The answer is 42. + + The output can be converted to an ``std::string`` with ``to_string(out)``. + \endrst + */ +template > +class basic_memory_buffer final : public detail::buffer { + private: + T store_[SIZE]; + + // Don't inherit from Allocator avoid generating type_info for it. + Allocator alloc_; + + // Deallocate memory allocated by the buffer. + FMT_CONSTEXPR20 void deallocate() { + T* data = this->data(); + if (data != store_) alloc_.deallocate(data, this->capacity()); + } + + protected: + FMT_CONSTEXPR20 void grow(size_t size) override; + + public: + using value_type = T; + using const_reference = const T&; + + FMT_CONSTEXPR20 explicit basic_memory_buffer( + const Allocator& alloc = Allocator()) + : alloc_(alloc) { + this->set(store_, SIZE); + if (detail::is_constant_evaluated()) detail::fill_n(store_, SIZE, T()); + } + FMT_CONSTEXPR20 ~basic_memory_buffer() { deallocate(); } + + private: + // Move data from other to this buffer. + FMT_CONSTEXPR20 void move(basic_memory_buffer& other) { + alloc_ = std::move(other.alloc_); + T* data = other.data(); + size_t size = other.size(), capacity = other.capacity(); + if (data == other.store_) { + this->set(store_, capacity); + detail::copy_str(other.store_, other.store_ + size, + detail::make_checked(store_, capacity)); + } else { + this->set(data, capacity); + // Set pointer to the inline array so that delete is not called + // when deallocating. + other.set(other.store_, 0); + other.clear(); + } + this->resize(size); + } + + public: + /** + \rst + Constructs a :class:`fmt::basic_memory_buffer` object moving the content + of the other object to it. + \endrst + */ + FMT_CONSTEXPR20 basic_memory_buffer(basic_memory_buffer&& other) noexcept { + move(other); + } + + /** + \rst + Moves the content of the other ``basic_memory_buffer`` object to this one. + \endrst + */ + auto operator=(basic_memory_buffer&& other) noexcept -> basic_memory_buffer& { + FMT_ASSERT(this != &other, ""); + deallocate(); + move(other); + return *this; + } + + // Returns a copy of the allocator associated with this buffer. + auto get_allocator() const -> Allocator { return alloc_; } + + /** + Resizes the buffer to contain *count* elements. If T is a POD type new + elements may not be initialized. + */ + FMT_CONSTEXPR20 void resize(size_t count) { this->try_resize(count); } + + /** Increases the buffer capacity to *new_capacity*. */ + void reserve(size_t new_capacity) { this->try_reserve(new_capacity); } + + // Directly append data into the buffer + using detail::buffer::append; + template + void append(const ContiguousRange& range) { + append(range.data(), range.data() + range.size()); + } +}; + +template +FMT_CONSTEXPR20 void basic_memory_buffer::grow( + size_t size) { + detail::abort_fuzzing_if(size > 5000); + const size_t max_size = std::allocator_traits::max_size(alloc_); + size_t old_capacity = this->capacity(); + size_t new_capacity = old_capacity + old_capacity / 2; + if (size > new_capacity) + new_capacity = size; + else if (new_capacity > max_size) + new_capacity = size > max_size ? size : max_size; + T* old_data = this->data(); + T* new_data = + std::allocator_traits::allocate(alloc_, new_capacity); + // The following code doesn't throw, so the raw pointer above doesn't leak. + std::uninitialized_copy(old_data, old_data + this->size(), + detail::make_checked(new_data, new_capacity)); + this->set(new_data, new_capacity); + // deallocate must not throw according to the standard, but even if it does, + // the buffer already uses the new storage and will deallocate it in + // destructor. + if (old_data != store_) alloc_.deallocate(old_data, old_capacity); +} + +using memory_buffer = basic_memory_buffer; + +template +struct is_contiguous> : std::true_type { +}; + +namespace detail { +FMT_API void print(std::FILE*, string_view); +} + +/** A formatting error such as invalid format string. */ +FMT_CLASS_API +class FMT_API format_error : public std::runtime_error { + public: + explicit format_error(const char* message) : std::runtime_error(message) {} + explicit format_error(const std::string& message) + : std::runtime_error(message) {} + format_error(const format_error&) = default; + format_error& operator=(const format_error&) = default; + format_error(format_error&&) = default; + format_error& operator=(format_error&&) = default; + ~format_error() noexcept override FMT_MSC_DEFAULT; +}; + +/** + \rst + Constructs a `~fmt::format_arg_store` object that contains references + to arguments and can be implicitly converted to `~fmt::format_args`. + If ``fmt`` is a compile-time string then `make_args_checked` checks + its validity at compile time. + \endrst + */ +template > +FMT_DEPRECATED FMT_INLINE auto make_args_checked( + const S& fmt, const remove_reference_t&... args) + -> format_arg_store, remove_reference_t...> { + static_assert( + detail::count<( + std::is_base_of>::value && + std::is_reference::value)...>() == 0, + "passing views as lvalues is disallowed"); + detail::check_format_string(fmt); + return {args...}; +} + +namespace detail_exported { +#if FMT_USE_NONTYPE_TEMPLATE_PARAMETERS +template struct fixed_string { + constexpr fixed_string(const Char (&str)[N]) { + detail::copy_str(static_cast(str), + str + N, data); + } + Char data[N] = {}; +}; +#endif + +// Converts a compile-time string to basic_string_view. +template +constexpr auto compile_string_to_view(const Char (&s)[N]) + -> basic_string_view { + // Remove trailing NUL character if needed. Won't be present if this is used + // with a raw character array (i.e. not defined as a string). + return {s, N - (std::char_traits::to_int_type(s[N - 1]) == 0 ? 1 : 0)}; +} +template +constexpr auto compile_string_to_view(detail::std_string_view s) + -> basic_string_view { + return {s.data(), s.size()}; +} +} // namespace detail_exported + +FMT_BEGIN_DETAIL_NAMESPACE + +template struct is_integral : std::is_integral {}; +template <> struct is_integral : std::true_type {}; +template <> struct is_integral : std::true_type {}; + +template +using is_signed = + std::integral_constant::is_signed || + std::is_same::value>; + +// Returns true if value is negative, false otherwise. +// Same as `value < 0` but doesn't produce warnings if T is an unsigned type. +template ::value)> +constexpr auto is_negative(T value) -> bool { + return value < 0; +} +template ::value)> +constexpr auto is_negative(T) -> bool { + return false; +} + +template constexpr auto is_supported_floating_point(T) -> bool { + return (std::is_same() && FMT_USE_FLOAT) || + (std::is_same() && FMT_USE_DOUBLE) || + (std::is_same() && FMT_USE_LONG_DOUBLE) || + is_float128(); +} + +// Smallest of uint32_t, uint64_t, uint128_t that is large enough to +// represent all values of an integral type T. +template +using uint32_or_64_or_128_t = + conditional_t() <= 32 && !FMT_REDUCE_INT_INSTANTIATIONS, + uint32_t, + conditional_t() <= 64, uint64_t, uint128_t>>; +template +using uint64_or_128_t = conditional_t() <= 64, uint64_t, uint128_t>; + +#define FMT_POWERS_OF_10(factor) \ + factor * 10, (factor)*100, (factor)*1000, (factor)*10000, (factor)*100000, \ + (factor)*1000000, (factor)*10000000, (factor)*100000000, \ + (factor)*1000000000 + +// Converts value in the range [0, 100) to a string. +constexpr const char* digits2(size_t value) { + // GCC generates slightly better code when value is pointer-size. + return &"0001020304050607080910111213141516171819" + "2021222324252627282930313233343536373839" + "4041424344454647484950515253545556575859" + "6061626364656667686970717273747576777879" + "8081828384858687888990919293949596979899"[value * 2]; +} + +// Sign is a template parameter to workaround a bug in gcc 4.8. +template constexpr Char sign(Sign s) { +#if !FMT_GCC_VERSION || FMT_GCC_VERSION >= 604 + static_assert(std::is_same::value, ""); +#endif + return static_cast("\0-+ "[s]); +} + +template FMT_CONSTEXPR auto count_digits_fallback(T n) -> int { + int count = 1; + for (;;) { + // Integer division is slow so do it for a group of four digits instead + // of for every digit. The idea comes from the talk by Alexandrescu + // "Three Optimization Tips for C++". See speed-test for a comparison. + if (n < 10) return count; + if (n < 100) return count + 1; + if (n < 1000) return count + 2; + if (n < 10000) return count + 3; + n /= 10000u; + count += 4; + } +} +#if FMT_USE_INT128 +FMT_CONSTEXPR inline auto count_digits(uint128_opt n) -> int { + return count_digits_fallback(n); +} +#endif + +#ifdef FMT_BUILTIN_CLZLL +// It is a separate function rather than a part of count_digits to workaround +// the lack of static constexpr in constexpr functions. +inline auto do_count_digits(uint64_t n) -> int { + // This has comparable performance to the version by Kendall Willets + // (https://github.com/fmtlib/format-benchmark/blob/master/digits10) + // but uses smaller tables. + // Maps bsr(n) to ceil(log10(pow(2, bsr(n) + 1) - 1)). + static constexpr uint8_t bsr2log10[] = { + 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, + 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, + 10, 11, 11, 11, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 15, 15, + 15, 16, 16, 16, 16, 17, 17, 17, 18, 18, 18, 19, 19, 19, 19, 20}; + auto t = bsr2log10[FMT_BUILTIN_CLZLL(n | 1) ^ 63]; + static constexpr const uint64_t zero_or_powers_of_10[] = { + 0, 0, FMT_POWERS_OF_10(1U), FMT_POWERS_OF_10(1000000000ULL), + 10000000000000000000ULL}; + return t - (n < zero_or_powers_of_10[t]); +} +#endif + +// Returns the number of decimal digits in n. Leading zeros are not counted +// except for n == 0 in which case count_digits returns 1. +FMT_CONSTEXPR20 inline auto count_digits(uint64_t n) -> int { +#ifdef FMT_BUILTIN_CLZLL + if (!is_constant_evaluated()) { + return do_count_digits(n); + } +#endif + return count_digits_fallback(n); +} + +// Counts the number of digits in n. BITS = log2(radix). +template +FMT_CONSTEXPR auto count_digits(UInt n) -> int { +#ifdef FMT_BUILTIN_CLZ + if (!is_constant_evaluated() && num_bits() == 32) + return (FMT_BUILTIN_CLZ(static_cast(n) | 1) ^ 31) / BITS + 1; +#endif + // Lambda avoids unreachable code warnings from NVHPC. + return [](UInt m) { + int num_digits = 0; + do { + ++num_digits; + } while ((m >>= BITS) != 0); + return num_digits; + }(n); +} + +#ifdef FMT_BUILTIN_CLZ +// It is a separate function rather than a part of count_digits to workaround +// the lack of static constexpr in constexpr functions. +FMT_INLINE auto do_count_digits(uint32_t n) -> int { +// An optimization by Kendall Willets from https://bit.ly/3uOIQrB. +// This increments the upper 32 bits (log10(T) - 1) when >= T is added. +# define FMT_INC(T) (((sizeof(# T) - 1ull) << 32) - T) + static constexpr uint64_t table[] = { + FMT_INC(0), FMT_INC(0), FMT_INC(0), // 8 + FMT_INC(10), FMT_INC(10), FMT_INC(10), // 64 + FMT_INC(100), FMT_INC(100), FMT_INC(100), // 512 + FMT_INC(1000), FMT_INC(1000), FMT_INC(1000), // 4096 + FMT_INC(10000), FMT_INC(10000), FMT_INC(10000), // 32k + FMT_INC(100000), FMT_INC(100000), FMT_INC(100000), // 256k + FMT_INC(1000000), FMT_INC(1000000), FMT_INC(1000000), // 2048k + FMT_INC(10000000), FMT_INC(10000000), FMT_INC(10000000), // 16M + FMT_INC(100000000), FMT_INC(100000000), FMT_INC(100000000), // 128M + FMT_INC(1000000000), FMT_INC(1000000000), FMT_INC(1000000000), // 1024M + FMT_INC(1000000000), FMT_INC(1000000000) // 4B + }; + auto inc = table[FMT_BUILTIN_CLZ(n | 1) ^ 31]; + return static_cast((n + inc) >> 32); +} +#endif + +// Optional version of count_digits for better performance on 32-bit platforms. +FMT_CONSTEXPR20 inline auto count_digits(uint32_t n) -> int { +#ifdef FMT_BUILTIN_CLZ + if (!is_constant_evaluated()) { + return do_count_digits(n); + } +#endif + return count_digits_fallback(n); +} + +template constexpr auto digits10() noexcept -> int { + return std::numeric_limits::digits10; +} +template <> constexpr auto digits10() noexcept -> int { return 38; } +template <> constexpr auto digits10() noexcept -> int { return 38; } + +template struct thousands_sep_result { + std::string grouping; + Char thousands_sep; +}; + +template +FMT_API auto thousands_sep_impl(locale_ref loc) -> thousands_sep_result; +template +inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { + auto result = thousands_sep_impl(loc); + return {result.grouping, Char(result.thousands_sep)}; +} +template <> +inline auto thousands_sep(locale_ref loc) -> thousands_sep_result { + return thousands_sep_impl(loc); +} + +template +FMT_API auto decimal_point_impl(locale_ref loc) -> Char; +template inline auto decimal_point(locale_ref loc) -> Char { + return Char(decimal_point_impl(loc)); +} +template <> inline auto decimal_point(locale_ref loc) -> wchar_t { + return decimal_point_impl(loc); +} + +// Compares two characters for equality. +template auto equal2(const Char* lhs, const char* rhs) -> bool { + return lhs[0] == Char(rhs[0]) && lhs[1] == Char(rhs[1]); +} +inline auto equal2(const char* lhs, const char* rhs) -> bool { + return memcmp(lhs, rhs, 2) == 0; +} + +// Copies two characters from src to dst. +template +FMT_CONSTEXPR20 FMT_INLINE void copy2(Char* dst, const char* src) { + if (!is_constant_evaluated() && sizeof(Char) == sizeof(char)) { + memcpy(dst, src, 2); + return; + } + *dst++ = static_cast(*src++); + *dst = static_cast(*src); +} + +template struct format_decimal_result { + Iterator begin; + Iterator end; +}; + +// Formats a decimal unsigned integer value writing into out pointing to a +// buffer of specified size. The caller must ensure that the buffer is large +// enough. +template +FMT_CONSTEXPR20 auto format_decimal(Char* out, UInt value, int size) + -> format_decimal_result { + FMT_ASSERT(size >= count_digits(value), "invalid digit count"); + out += size; + Char* end = out; + while (value >= 100) { + // Integer division is slow so do it for a group of two digits instead + // of for every digit. The idea comes from the talk by Alexandrescu + // "Three Optimization Tips for C++". See speed-test for a comparison. + out -= 2; + copy2(out, digits2(static_cast(value % 100))); + value /= 100; + } + if (value < 10) { + *--out = static_cast('0' + value); + return {out, end}; + } + out -= 2; + copy2(out, digits2(static_cast(value))); + return {out, end}; +} + +template >::value)> +inline auto format_decimal(Iterator out, UInt value, int size) + -> format_decimal_result { + // Buffer is large enough to hold all digits (digits10 + 1). + Char buffer[digits10() + 1]; + auto end = format_decimal(buffer, value, size).end; + return {out, detail::copy_str_noinline(buffer, end, out)}; +} + +template +FMT_CONSTEXPR auto format_uint(Char* buffer, UInt value, int num_digits, + bool upper = false) -> Char* { + buffer += num_digits; + Char* end = buffer; + do { + const char* digits = upper ? "0123456789ABCDEF" : "0123456789abcdef"; + unsigned digit = static_cast(value & ((1 << BASE_BITS) - 1)); + *--buffer = static_cast(BASE_BITS < 4 ? static_cast('0' + digit) + : digits[digit]); + } while ((value >>= BASE_BITS) != 0); + return end; +} + +template +inline auto format_uint(It out, UInt value, int num_digits, bool upper = false) + -> It { + if (auto ptr = to_pointer(out, to_unsigned(num_digits))) { + format_uint(ptr, value, num_digits, upper); + return out; + } + // Buffer should be large enough to hold all digits (digits / BASE_BITS + 1). + char buffer[num_bits() / BASE_BITS + 1]; + format_uint(buffer, value, num_digits, upper); + return detail::copy_str_noinline(buffer, buffer + num_digits, out); +} + +// A converter from UTF-8 to UTF-16. +class utf8_to_utf16 { + private: + basic_memory_buffer buffer_; + + public: + FMT_API explicit utf8_to_utf16(string_view s); + operator basic_string_view() const { return {&buffer_[0], size()}; } + auto size() const -> size_t { return buffer_.size() - 1; } + auto c_str() const -> const wchar_t* { return &buffer_[0]; } + auto str() const -> std::wstring { return {&buffer_[0], size()}; } +}; + +namespace dragonbox { + +// Type-specific information that Dragonbox uses. +template struct float_info; + +template <> struct float_info { + using carrier_uint = uint32_t; + static const int exponent_bits = 8; + static const int kappa = 1; + static const int big_divisor = 100; + static const int small_divisor = 10; + static const int min_k = -31; + static const int max_k = 46; + static const int divisibility_check_by_5_threshold = 39; + static const int case_fc_pm_half_lower_threshold = -1; + static const int shorter_interval_tie_lower_threshold = -35; + static const int shorter_interval_tie_upper_threshold = -35; +}; + +template <> struct float_info { + using carrier_uint = uint64_t; + static const int exponent_bits = 11; + static const int kappa = 2; + static const int big_divisor = 1000; + static const int small_divisor = 100; + static const int min_k = -292; + static const int max_k = 326; + static const int divisibility_check_by_5_threshold = 86; + static const int case_fc_pm_half_lower_threshold = -2; + static const int shorter_interval_tie_lower_threshold = -77; + static const int shorter_interval_tie_upper_threshold = -77; +}; + +// An 80- or 128-bit floating point number. +template +struct float_info::digits == 64 || + std::numeric_limits::digits == 113 || + is_float128::value>> { + using carrier_uint = detail::uint128_t; + static const int exponent_bits = 15; +}; + +// A double-double floating point number. +template +struct float_info::digits >= 106 && + !std::numeric_limits::is_iec559>> { + using carrier_uint = detail::uint128_t; +}; + +template struct decimal_fp { + using significand_type = typename float_info::carrier_uint; + significand_type significand; + int exponent; +}; + +template FMT_API auto to_decimal(T x) noexcept -> decimal_fp; +} // namespace dragonbox + +// Returns true iff Float has the implicit bit which is not stored. +template constexpr bool has_implicit_bit() { + // An 80-bit FP number has a 64-bit significand an no implicit bit. + return std::numeric_limits::digits != 64; +} + +// Returns the number of significand bits stored in Float. The implicit bit is +// not counted since it is not stored. +template constexpr int num_significand_bits() { + // std::numeric_limits may not support __float128. + return is_float128() ? 112 + : (std::numeric_limits::digits - + (has_implicit_bit() ? 1 : 0)); +} + +template +constexpr auto exponent_mask() -> + typename dragonbox::float_info::carrier_uint { + using uint = typename dragonbox::float_info::carrier_uint; + return ((uint(1) << dragonbox::float_info::exponent_bits) - 1) + << num_significand_bits(); +} +template constexpr auto exponent_bias() -> int { + // std::numeric_limits may not support __float128. + return is_float128() ? 16383 + : std::numeric_limits::max_exponent - 1; +} + +// Writes the exponent exp in the form "[+-]d{2,3}" to buffer. +template +FMT_CONSTEXPR auto write_exponent(int exp, It it) -> It { + FMT_ASSERT(-10000 < exp && exp < 10000, "exponent out of range"); + if (exp < 0) { + *it++ = static_cast('-'); + exp = -exp; + } else { + *it++ = static_cast('+'); + } + if (exp >= 100) { + const char* top = digits2(to_unsigned(exp / 100)); + if (exp >= 1000) *it++ = static_cast(top[0]); + *it++ = static_cast(top[1]); + exp %= 100; + } + const char* d = digits2(to_unsigned(exp)); + *it++ = static_cast(d[0]); + *it++ = static_cast(d[1]); + return it; +} + +template +FMT_HEADER_ONLY_CONSTEXPR20 auto format_float(T value, int precision, + float_specs specs, + buffer& buf) -> int; + +#ifndef _MSC_VER +# define FMT_SNPRINTF snprintf +#else +FMT_API auto fmt_snprintf(char* buf, size_t size, const char* fmt, ...) -> int; +# define FMT_SNPRINTF fmt_snprintf +#endif // _MSC_VER + +// Formats a floating-point number with snprintf. +template +auto snprintf_float(T value, int precision, float_specs specs, + buffer& buf) -> int { + // Buffer capacity must be non-zero, otherwise MSVC's vsnprintf_s will fail. + FMT_ASSERT(buf.capacity() > buf.size(), "empty buffer"); + static_assert(!std::is_same::value, ""); + + // Subtract 1 to account for the difference in precision since we use %e for + // both general and exponent format. + if (specs.format == float_format::general || + specs.format == float_format::exp) { + precision = (precision >= 0 ? precision : 6) - 1; + } + + // Build the format string. + char format[7]; // The longest format is "%#.*Le". + char* format_ptr = format; + *format_ptr++ = '%'; + if (specs.showpoint && specs.format == float_format::hex) *format_ptr++ = '#'; + if (precision >= 0) { + *format_ptr++ = '.'; + *format_ptr++ = '*'; + } + if (std::is_same()) *format_ptr++ = 'L'; + *format_ptr++ = specs.format != float_format::hex + ? (specs.format == float_format::fixed ? 'f' : 'e') + : (specs.upper ? 'A' : 'a'); + *format_ptr = '\0'; + + // Format using snprintf. + auto offset = buf.size(); + for (;;) { + auto begin = buf.data() + offset; + auto capacity = buf.capacity() - offset; + abort_fuzzing_if(precision > 100000); + // Suppress the warning about a nonliteral format string. + // Cannot use auto because of a bug in MinGW (#1532). + int (*snprintf_ptr)(char*, size_t, const char*, ...) = FMT_SNPRINTF; + int result = precision >= 0 + ? snprintf_ptr(begin, capacity, format, precision, value) + : snprintf_ptr(begin, capacity, format, value); + if (result < 0) { + // The buffer will grow exponentially. + buf.try_reserve(buf.capacity() + 1); + continue; + } + auto size = to_unsigned(result); + // Size equal to capacity means that the last character was truncated. + if (size >= capacity) { + buf.try_reserve(size + offset + 1); // Add 1 for the terminating '\0'. + continue; + } + auto is_digit = [](char c) { return c >= '0' && c <= '9'; }; + if (specs.format == float_format::fixed) { + if (precision == 0) { + buf.try_resize(size); + return 0; + } + // Find and remove the decimal point. + auto end = begin + size, p = end; + do { + --p; + } while (is_digit(*p)); + int fraction_size = static_cast(end - p - 1); + std::memmove(p, p + 1, to_unsigned(fraction_size)); + buf.try_resize(size - 1); + return -fraction_size; + } + if (specs.format == float_format::hex) { + buf.try_resize(size + offset); + return 0; + } + // Find and parse the exponent. + auto end = begin + size, exp_pos = end; + do { + --exp_pos; + } while (*exp_pos != 'e'); + char sign = exp_pos[1]; + FMT_ASSERT(sign == '+' || sign == '-', ""); + int exp = 0; + auto p = exp_pos + 2; // Skip 'e' and sign. + do { + FMT_ASSERT(is_digit(*p), ""); + exp = exp * 10 + (*p++ - '0'); + } while (p != end); + if (sign == '-') exp = -exp; + int fraction_size = 0; + if (exp_pos != begin + 1) { + // Remove trailing zeros. + auto fraction_end = exp_pos - 1; + while (*fraction_end == '0') --fraction_end; + // Move the fractional part left to get rid of the decimal point. + fraction_size = static_cast(fraction_end - begin - 1); + std::memmove(begin + 1, begin + 2, to_unsigned(fraction_size)); + } + buf.try_resize(to_unsigned(fraction_size) + offset + 1); + return exp - fraction_size; + } +} + +template +using convert_float_result = + conditional_t::value || sizeof(T) == sizeof(double), + double, T>; + +template +constexpr auto convert_float(T value) -> convert_float_result { + return static_cast>(value); +} + +template +FMT_NOINLINE FMT_CONSTEXPR auto fill(OutputIt it, size_t n, + const fill_t& fill) -> OutputIt { + auto fill_size = fill.size(); + if (fill_size == 1) return detail::fill_n(it, n, fill[0]); + auto data = fill.data(); + for (size_t i = 0; i < n; ++i) + it = copy_str(data, data + fill_size, it); + return it; +} + +// Writes the output of f, padded according to format specifications in specs. +// size: output size in code units. +// width: output display width in (terminal) column positions. +template +FMT_CONSTEXPR auto write_padded(OutputIt out, + const basic_format_specs& specs, + size_t size, size_t width, F&& f) -> OutputIt { + static_assert(align == align::left || align == align::right, ""); + unsigned spec_width = to_unsigned(specs.width); + size_t padding = spec_width > width ? spec_width - width : 0; + // Shifts are encoded as string literals because static constexpr is not + // supported in constexpr functions. + auto* shifts = align == align::left ? "\x1f\x1f\x00\x01" : "\x00\x1f\x00\x01"; + size_t left_padding = padding >> shifts[specs.align]; + size_t right_padding = padding - left_padding; + auto it = reserve(out, size + padding * specs.fill.size()); + if (left_padding != 0) it = fill(it, left_padding, specs.fill); + it = f(it); + if (right_padding != 0) it = fill(it, right_padding, specs.fill); + return base_iterator(out, it); +} + +template +constexpr auto write_padded(OutputIt out, const basic_format_specs& specs, + size_t size, F&& f) -> OutputIt { + return write_padded(out, specs, size, size, f); +} + +template +FMT_CONSTEXPR auto write_bytes(OutputIt out, string_view bytes, + const basic_format_specs& specs) + -> OutputIt { + return write_padded( + out, specs, bytes.size(), [bytes](reserve_iterator it) { + const char* data = bytes.data(); + return copy_str(data, data + bytes.size(), it); + }); +} + +template +auto write_ptr(OutputIt out, UIntPtr value, + const basic_format_specs* specs) -> OutputIt { + int num_digits = count_digits<4>(value); + auto size = to_unsigned(num_digits) + size_t(2); + auto write = [=](reserve_iterator it) { + *it++ = static_cast('0'); + *it++ = static_cast('x'); + return format_uint<4, Char>(it, value, num_digits); + }; + return specs ? write_padded(out, *specs, size, write) + : base_iterator(out, write(reserve(out, size))); +} + +// Returns true iff the code point cp is printable. +FMT_API auto is_printable(uint32_t cp) -> bool; + +inline auto needs_escape(uint32_t cp) -> bool { + return cp < 0x20 || cp == 0x7f || cp == '"' || cp == '\\' || + !is_printable(cp); +} + +template struct find_escape_result { + const Char* begin; + const Char* end; + uint32_t cp; +}; + +template +using make_unsigned_char = + typename conditional_t::value, + std::make_unsigned, + type_identity>::type; + +template +auto find_escape(const Char* begin, const Char* end) + -> find_escape_result { + for (; begin != end; ++begin) { + uint32_t cp = static_cast>(*begin); + if (sizeof(Char) == 1 && cp >= 0x80) continue; + if (needs_escape(cp)) return {begin, begin + 1, cp}; + } + return {begin, nullptr, 0}; +} + +inline auto find_escape(const char* begin, const char* end) + -> find_escape_result { + if (!is_utf8()) return find_escape(begin, end); + auto result = find_escape_result{end, nullptr, 0}; + for_each_codepoint(string_view(begin, to_unsigned(end - begin)), + [&](uint32_t cp, string_view sv) { + if (needs_escape(cp)) { + result = {sv.begin(), sv.end(), cp}; + return false; + } + return true; + }); + return result; +} + +#define FMT_STRING_IMPL(s, base, explicit) \ + [] { \ + /* Use the hidden visibility as a workaround for a GCC bug (#1973). */ \ + /* Use a macro-like name to avoid shadowing warnings. */ \ + struct FMT_GCC_VISIBILITY_HIDDEN FMT_COMPILE_STRING : base { \ + using char_type = fmt::remove_cvref_t; \ + FMT_MAYBE_UNUSED FMT_CONSTEXPR explicit \ + operator fmt::basic_string_view() const { \ + return fmt::detail_exported::compile_string_to_view(s); \ + } \ + }; \ + return FMT_COMPILE_STRING(); \ + }() + +/** + \rst + Constructs a compile-time format string from a string literal *s*. + + **Example**:: + + // A compile-time error because 'd' is an invalid specifier for strings. + std::string s = fmt::format(FMT_STRING("{:d}"), "foo"); + \endrst + */ +#define FMT_STRING(s) FMT_STRING_IMPL(s, fmt::compile_string, ) + +template +auto write_escaped_string(OutputIt out, basic_string_view str) + -> OutputIt { + return copy_str(str.data(), str.data() + str.size(), out); +} + +template +auto write_escaped_cp(OutputIt out, const find_escape_result& escape) + -> OutputIt { + auto c = static_cast(escape.cp); + switch (escape.cp) { + case '\n': + *out++ = '\\'; + c = 'n'; + break; + case '\r': + *out++ = '\\'; + c = 'r'; + break; + case '\t': + *out++ = '\\'; + c = 't'; + break; + case '"': + FMT_FALLTHROUGH; + case '\'': + FMT_FALLTHROUGH; + case '\\': + *out++ = '\\'; + break; + default: + if (is_utf8()) { + if (escape.cp < 0x100) { + return format_to(out, FMT_STRING("\\x{:02x}"), escape.cp); + } + if (escape.cp < 0x10000) { + return format_to(out, FMT_STRING("\\u{:04x}"), escape.cp); + } + if (escape.cp < 0x110000) { + return format_to(out, FMT_STRING("\\U{:08x}"), escape.cp); + } + } + for (char escape_char : basic_string_view( + escape.begin, to_unsigned(escape.end - escape.begin))) { + out = format_to(out, FMT_STRING("\\x{:02x}"), + static_cast>(escape_char)); + } + return out; + } + *out++ = c; + return out; +} + +template +auto write_escaped_string(OutputIt out, basic_string_view str) + -> OutputIt { + *out++ = '"'; + auto begin = str.begin(), end = str.end(); + do { + auto escape = find_escape(begin, end); + out = copy_str(begin, escape.begin, out); + begin = escape.end; + if (!begin) break; + out = write_escaped_cp(out, escape); + } while (begin != end); + *out++ = '"'; + return out; +} + +template +auto write_escaped_char(OutputIt out, Char v) -> OutputIt { + *out++ = v; + return out; +} + +template +auto write_escaped_char(OutputIt out, char v) -> OutputIt { + *out++ = '\''; + if ((needs_escape(static_cast(v)) && v != '"') || v == '\'') { + out = write_escaped_cp( + out, find_escape_result{&v, &v + 1, static_cast(v)}); + } else { + *out++ = v; + } + *out++ = '\''; + return out; +} + +template +FMT_CONSTEXPR auto write_char(OutputIt out, Char value, + const basic_format_specs& specs) + -> OutputIt { + bool is_debug = specs.type == presentation_type::debug; + return write_padded(out, specs, 1, [=](reserve_iterator it) { + if (is_debug) return write_escaped_char(it, value); + *it++ = value; + return it; + }); +} +template +FMT_CONSTEXPR auto write(OutputIt out, Char value, + const basic_format_specs& specs, + locale_ref loc = {}) -> OutputIt { + return check_char_specs(specs) + ? write_char(out, value, specs) + : write(out, static_cast(value), specs, loc); +} + +// Data for write_int that doesn't depend on output iterator type. It is used to +// avoid template code bloat. +template struct write_int_data { + size_t size; + size_t padding; + + FMT_CONSTEXPR write_int_data(int num_digits, unsigned prefix, + const basic_format_specs& specs) + : size((prefix >> 24) + to_unsigned(num_digits)), padding(0) { + if (specs.align == align::numeric) { + auto width = to_unsigned(specs.width); + if (width > size) { + padding = width - size; + size = width; + } + } else if (specs.precision > num_digits) { + size = (prefix >> 24) + to_unsigned(specs.precision); + padding = to_unsigned(specs.precision - num_digits); + } + } +}; + +// Writes an integer in the format +// +// where are written by write_digits(it). +// prefix contains chars in three lower bytes and the size in the fourth byte. +template +FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, int num_digits, + unsigned prefix, + const basic_format_specs& specs, + W write_digits) -> OutputIt { + // Slightly faster check for specs.width == 0 && specs.precision == -1. + if ((specs.width | (specs.precision + 1)) == 0) { + auto it = reserve(out, to_unsigned(num_digits) + (prefix >> 24)); + if (prefix != 0) { + for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) + *it++ = static_cast(p & 0xff); + } + return base_iterator(out, write_digits(it)); + } + auto data = write_int_data(num_digits, prefix, specs); + return write_padded( + out, specs, data.size, [=](reserve_iterator it) { + for (unsigned p = prefix & 0xffffff; p != 0; p >>= 8) + *it++ = static_cast(p & 0xff); + it = detail::fill_n(it, data.padding, static_cast('0')); + return write_digits(it); + }); +} + +template class digit_grouping { + private: + thousands_sep_result sep_; + + struct next_state { + std::string::const_iterator group; + int pos; + }; + next_state initial_state() const { return {sep_.grouping.begin(), 0}; } + + // Returns the next digit group separator position. + int next(next_state& state) const { + if (!sep_.thousands_sep) return max_value(); + if (state.group == sep_.grouping.end()) + return state.pos += sep_.grouping.back(); + if (*state.group <= 0 || *state.group == max_value()) + return max_value(); + state.pos += *state.group++; + return state.pos; + } + + public: + explicit digit_grouping(locale_ref loc, bool localized = true) { + if (localized) + sep_ = thousands_sep(loc); + else + sep_.thousands_sep = Char(); + } + explicit digit_grouping(thousands_sep_result sep) : sep_(sep) {} + + Char separator() const { return sep_.thousands_sep; } + + int count_separators(int num_digits) const { + int count = 0; + auto state = initial_state(); + while (num_digits > next(state)) ++count; + return count; + } + + // Applies grouping to digits and write the output to out. + template + Out apply(Out out, basic_string_view digits) const { + auto num_digits = static_cast(digits.size()); + auto separators = basic_memory_buffer(); + separators.push_back(0); + auto state = initial_state(); + while (int i = next(state)) { + if (i >= num_digits) break; + separators.push_back(i); + } + for (int i = 0, sep_index = static_cast(separators.size() - 1); + i < num_digits; ++i) { + if (num_digits - i == separators[sep_index]) { + *out++ = separator(); + --sep_index; + } + *out++ = static_cast(digits[to_unsigned(i)]); + } + return out; + } +}; + +template +auto write_int_localized(OutputIt out, UInt value, unsigned prefix, + const basic_format_specs& specs, + const digit_grouping& grouping) -> OutputIt { + static_assert(std::is_same, UInt>::value, ""); + int num_digits = count_digits(value); + char digits[40]; + format_decimal(digits, value, num_digits); + unsigned size = to_unsigned((prefix != 0 ? 1 : 0) + num_digits + + grouping.count_separators(num_digits)); + return write_padded( + out, specs, size, size, [&](reserve_iterator it) { + if (prefix != 0) *it++ = static_cast(prefix); + return grouping.apply(it, string_view(digits, to_unsigned(num_digits))); + }); +} + +template +auto write_int_localized(OutputIt& out, UInt value, unsigned prefix, + const basic_format_specs& specs, locale_ref loc) + -> bool { + auto grouping = digit_grouping(loc); + out = write_int_localized(out, value, prefix, specs, grouping); + return true; +} + +FMT_CONSTEXPR inline void prefix_append(unsigned& prefix, unsigned value) { + prefix |= prefix != 0 ? value << 8 : value; + prefix += (1u + (value > 0xff ? 1 : 0)) << 24; +} + +template struct write_int_arg { + UInt abs_value; + unsigned prefix; +}; + +template +FMT_CONSTEXPR auto make_write_int_arg(T value, sign_t sign) + -> write_int_arg> { + auto prefix = 0u; + auto abs_value = static_cast>(value); + if (is_negative(value)) { + prefix = 0x01000000 | '-'; + abs_value = 0 - abs_value; + } else { + constexpr const unsigned prefixes[4] = {0, 0, 0x1000000u | '+', + 0x1000000u | ' '}; + prefix = prefixes[sign]; + } + return {abs_value, prefix}; +} + +template +FMT_CONSTEXPR FMT_INLINE auto write_int(OutputIt out, write_int_arg arg, + const basic_format_specs& specs, + locale_ref loc) -> OutputIt { + static_assert(std::is_same>::value, ""); + auto abs_value = arg.abs_value; + auto prefix = arg.prefix; + switch (specs.type) { + case presentation_type::none: + case presentation_type::dec: { + if (specs.localized && + write_int_localized(out, static_cast>(abs_value), + prefix, specs, loc)) { + return out; + } + auto num_digits = count_digits(abs_value); + return write_int( + out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_decimal(it, abs_value, num_digits).end; + }); + } + case presentation_type::hex_lower: + case presentation_type::hex_upper: { + bool upper = specs.type == presentation_type::hex_upper; + if (specs.alt) + prefix_append(prefix, unsigned(upper ? 'X' : 'x') << 8 | '0'); + int num_digits = count_digits<4>(abs_value); + return write_int( + out, num_digits, prefix, specs, [=](reserve_iterator it) { + return format_uint<4, Char>(it, abs_value, num_digits, upper); + }); + } + case presentation_type::bin_lower: + case presentation_type::bin_upper: { + bool upper = specs.type == presentation_type::bin_upper; + if (specs.alt) + prefix_append(prefix, unsigned(upper ? 'B' : 'b') << 8 | '0'); + int num_digits = count_digits<1>(abs_value); + return write_int(out, num_digits, prefix, specs, + [=](reserve_iterator it) { + return format_uint<1, Char>(it, abs_value, num_digits); + }); + } + case presentation_type::oct: { + int num_digits = count_digits<3>(abs_value); + // Octal prefix '0' is counted as a digit, so only add it if precision + // is not greater than the number of digits. + if (specs.alt && specs.precision <= num_digits && abs_value != 0) + prefix_append(prefix, '0'); + return write_int(out, num_digits, prefix, specs, + [=](reserve_iterator it) { + return format_uint<3, Char>(it, abs_value, num_digits); + }); + } + case presentation_type::chr: + return write_char(out, static_cast(abs_value), specs); + default: + throw_format_error("invalid type specifier"); + } + return out; +} +template +FMT_CONSTEXPR FMT_NOINLINE auto write_int_noinline( + OutputIt out, write_int_arg arg, const basic_format_specs& specs, + locale_ref loc) -> OutputIt { + return write_int(out, arg, specs, loc); +} +template ::value && + !std::is_same::value && + std::is_same>::value)> +FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, + const basic_format_specs& specs, + locale_ref loc) -> OutputIt { + return write_int_noinline(out, make_write_int_arg(value, specs.sign), specs, + loc); +} +// An inlined version of write used in format string compilation. +template ::value && + !std::is_same::value && + !std::is_same>::value)> +FMT_CONSTEXPR FMT_INLINE auto write(OutputIt out, T value, + const basic_format_specs& specs, + locale_ref loc) -> OutputIt { + return write_int(out, make_write_int_arg(value, specs.sign), specs, loc); +} + +// An output iterator that counts the number of objects written to it and +// discards them. +class counting_iterator { + private: + size_t count_; + + public: + using iterator_category = std::output_iterator_tag; + using difference_type = std::ptrdiff_t; + using pointer = void; + using reference = void; + FMT_UNCHECKED_ITERATOR(counting_iterator); + + struct value_type { + template void operator=(const T&) {} + }; + + counting_iterator() : count_(0) {} + + size_t count() const { return count_; } + + counting_iterator& operator++() { + ++count_; + return *this; + } + counting_iterator operator++(int) { + auto it = *this; + ++*this; + return it; + } + + friend counting_iterator operator+(counting_iterator it, difference_type n) { + it.count_ += static_cast(n); + return it; + } + + value_type operator*() const { return {}; } +}; + +template +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view s, + const basic_format_specs& specs) -> OutputIt { + auto data = s.data(); + auto size = s.size(); + if (specs.precision >= 0 && to_unsigned(specs.precision) < size) + size = code_point_index(s, to_unsigned(specs.precision)); + bool is_debug = specs.type == presentation_type::debug; + size_t width = 0; + if (specs.width != 0) { + if (is_debug) + width = write_escaped_string(counting_iterator{}, s).count(); + else + width = compute_width(basic_string_view(data, size)); + } + return write_padded(out, specs, size, width, + [=](reserve_iterator it) { + if (is_debug) return write_escaped_string(it, s); + return copy_str(data, data + size, it); + }); +} +template +FMT_CONSTEXPR auto write(OutputIt out, + basic_string_view> s, + const basic_format_specs& specs, locale_ref) + -> OutputIt { + check_string_type_spec(specs.type); + return write(out, s, specs); +} +template +FMT_CONSTEXPR auto write(OutputIt out, const Char* s, + const basic_format_specs& specs, locale_ref) + -> OutputIt { + return check_cstring_type_spec(specs.type) + ? write(out, basic_string_view(s), specs, {}) + : write_ptr(out, bit_cast(s), &specs); +} + +template +FMT_CONSTEXPR20 auto write_nonfinite(OutputIt out, bool isnan, + basic_format_specs specs, + const float_specs& fspecs) -> OutputIt { + auto str = + isnan ? (fspecs.upper ? "NAN" : "nan") : (fspecs.upper ? "INF" : "inf"); + constexpr size_t str_size = 3; + auto sign = fspecs.sign; + auto size = str_size + (sign ? 1 : 0); + // Replace '0'-padding with space for non-finite values. + const bool is_zero_fill = + specs.fill.size() == 1 && *specs.fill.data() == static_cast('0'); + if (is_zero_fill) specs.fill[0] = static_cast(' '); + return write_padded(out, specs, size, [=](reserve_iterator it) { + if (sign) *it++ = detail::sign(sign); + return copy_str(str, str + str_size, it); + }); +} + +// A decimal floating-point number significand * pow(10, exp). +struct big_decimal_fp { + const char* significand; + int significand_size; + int exponent; +}; + +constexpr auto get_significand_size(const big_decimal_fp& fp) -> int { + return fp.significand_size; +} +template +inline auto get_significand_size(const dragonbox::decimal_fp& fp) -> int { + return count_digits(fp.significand); +} + +template +constexpr auto write_significand(OutputIt out, const char* significand, + int significand_size) -> OutputIt { + return copy_str(significand, significand + significand_size, out); +} +template +inline auto write_significand(OutputIt out, UInt significand, + int significand_size) -> OutputIt { + return format_decimal(out, significand, significand_size).end; +} +template +FMT_CONSTEXPR20 auto write_significand(OutputIt out, T significand, + int significand_size, int exponent, + const Grouping& grouping) -> OutputIt { + if (!grouping.separator()) { + out = write_significand(out, significand, significand_size); + return detail::fill_n(out, exponent, static_cast('0')); + } + auto buffer = memory_buffer(); + write_significand(appender(buffer), significand, significand_size); + detail::fill_n(appender(buffer), exponent, '0'); + return grouping.apply(out, string_view(buffer.data(), buffer.size())); +} + +template ::value)> +inline auto write_significand(Char* out, UInt significand, int significand_size, + int integral_size, Char decimal_point) -> Char* { + if (!decimal_point) + return format_decimal(out, significand, significand_size).end; + out += significand_size + 1; + Char* end = out; + int floating_size = significand_size - integral_size; + for (int i = floating_size / 2; i > 0; --i) { + out -= 2; + copy2(out, digits2(static_cast(significand % 100))); + significand /= 100; + } + if (floating_size % 2 != 0) { + *--out = static_cast('0' + significand % 10); + significand /= 10; + } + *--out = decimal_point; + format_decimal(out - integral_size, significand, integral_size); + return end; +} + +template >::value)> +inline auto write_significand(OutputIt out, UInt significand, + int significand_size, int integral_size, + Char decimal_point) -> OutputIt { + // Buffer is large enough to hold digits (digits10 + 1) and a decimal point. + Char buffer[digits10() + 2]; + auto end = write_significand(buffer, significand, significand_size, + integral_size, decimal_point); + return detail::copy_str_noinline(buffer, end, out); +} + +template +FMT_CONSTEXPR auto write_significand(OutputIt out, const char* significand, + int significand_size, int integral_size, + Char decimal_point) -> OutputIt { + out = detail::copy_str_noinline(significand, + significand + integral_size, out); + if (!decimal_point) return out; + *out++ = decimal_point; + return detail::copy_str_noinline(significand + integral_size, + significand + significand_size, out); +} + +template +FMT_CONSTEXPR20 auto write_significand(OutputIt out, T significand, + int significand_size, int integral_size, + Char decimal_point, + const Grouping& grouping) -> OutputIt { + if (!grouping.separator()) { + return write_significand(out, significand, significand_size, integral_size, + decimal_point); + } + auto buffer = basic_memory_buffer(); + write_significand(buffer_appender(buffer), significand, + significand_size, integral_size, decimal_point); + grouping.apply( + out, basic_string_view(buffer.data(), to_unsigned(integral_size))); + return detail::copy_str_noinline(buffer.data() + integral_size, + buffer.end(), out); +} + +template > +FMT_CONSTEXPR20 auto do_write_float(OutputIt out, const DecimalFP& fp, + const basic_format_specs& specs, + float_specs fspecs, locale_ref loc) + -> OutputIt { + auto significand = fp.significand; + int significand_size = get_significand_size(fp); + constexpr Char zero = static_cast('0'); + auto sign = fspecs.sign; + size_t size = to_unsigned(significand_size) + (sign ? 1 : 0); + using iterator = reserve_iterator; + + Char decimal_point = + fspecs.locale ? detail::decimal_point(loc) : static_cast('.'); + + int output_exp = fp.exponent + significand_size - 1; + auto use_exp_format = [=]() { + if (fspecs.format == float_format::exp) return true; + if (fspecs.format != float_format::general) return false; + // Use the fixed notation if the exponent is in [exp_lower, exp_upper), + // e.g. 0.0001 instead of 1e-04. Otherwise use the exponent notation. + const int exp_lower = -4, exp_upper = 16; + return output_exp < exp_lower || + output_exp >= (fspecs.precision > 0 ? fspecs.precision : exp_upper); + }; + if (use_exp_format()) { + int num_zeros = 0; + if (fspecs.showpoint) { + num_zeros = fspecs.precision - significand_size; + if (num_zeros < 0) num_zeros = 0; + size += to_unsigned(num_zeros); + } else if (significand_size == 1) { + decimal_point = Char(); + } + auto abs_output_exp = output_exp >= 0 ? output_exp : -output_exp; + int exp_digits = 2; + if (abs_output_exp >= 100) exp_digits = abs_output_exp >= 1000 ? 4 : 3; + + size += to_unsigned((decimal_point ? 1 : 0) + 2 + exp_digits); + char exp_char = fspecs.upper ? 'E' : 'e'; + auto write = [=](iterator it) { + if (sign) *it++ = detail::sign(sign); + // Insert a decimal point after the first digit and add an exponent. + it = write_significand(it, significand, significand_size, 1, + decimal_point); + if (num_zeros > 0) it = detail::fill_n(it, num_zeros, zero); + *it++ = static_cast(exp_char); + return write_exponent(output_exp, it); + }; + return specs.width > 0 ? write_padded(out, specs, size, write) + : base_iterator(out, write(reserve(out, size))); + } + + int exp = fp.exponent + significand_size; + if (fp.exponent >= 0) { + // 1234e5 -> 123400000[.0+] + size += to_unsigned(fp.exponent); + int num_zeros = fspecs.precision - exp; + abort_fuzzing_if(num_zeros > 5000); + if (fspecs.showpoint) { + ++size; + if (num_zeros <= 0 && fspecs.format != float_format::fixed) num_zeros = 1; + if (num_zeros > 0) size += to_unsigned(num_zeros); + } + auto grouping = Grouping(loc, fspecs.locale); + size += to_unsigned(grouping.count_separators(exp)); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) *it++ = detail::sign(sign); + it = write_significand(it, significand, significand_size, + fp.exponent, grouping); + if (!fspecs.showpoint) return it; + *it++ = decimal_point; + return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; + }); + } else if (exp > 0) { + // 1234e-2 -> 12.34[0+] + int num_zeros = fspecs.showpoint ? fspecs.precision - significand_size : 0; + size += 1 + to_unsigned(num_zeros > 0 ? num_zeros : 0); + auto grouping = Grouping(loc, fspecs.locale); + size += to_unsigned(grouping.count_separators(significand_size)); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) *it++ = detail::sign(sign); + it = write_significand(it, significand, significand_size, exp, + decimal_point, grouping); + return num_zeros > 0 ? detail::fill_n(it, num_zeros, zero) : it; + }); + } + // 1234e-6 -> 0.001234 + int num_zeros = -exp; + if (significand_size == 0 && fspecs.precision >= 0 && + fspecs.precision < num_zeros) { + num_zeros = fspecs.precision; + } + bool pointy = num_zeros != 0 || significand_size != 0 || fspecs.showpoint; + size += 1 + (pointy ? 1 : 0) + to_unsigned(num_zeros); + return write_padded(out, specs, size, [&](iterator it) { + if (sign) *it++ = detail::sign(sign); + *it++ = zero; + if (!pointy) return it; + *it++ = decimal_point; + it = detail::fill_n(it, num_zeros, zero); + return write_significand(it, significand, significand_size); + }); +} + +template class fallback_digit_grouping { + public: + constexpr fallback_digit_grouping(locale_ref, bool) {} + + constexpr Char separator() const { return Char(); } + + constexpr int count_separators(int) const { return 0; } + + template + constexpr Out apply(Out out, basic_string_view) const { + return out; + } +}; + +template +FMT_CONSTEXPR20 auto write_float(OutputIt out, const DecimalFP& fp, + const basic_format_specs& specs, + float_specs fspecs, locale_ref loc) + -> OutputIt { + if (is_constant_evaluated()) { + return do_write_float>(out, fp, specs, fspecs, + loc); + } else { + return do_write_float(out, fp, specs, fspecs, loc); + } +} + +template constexpr bool isnan(T value) { + return !(value >= value); // std::isnan doesn't support __float128. +} + +template +struct has_isfinite : std::false_type {}; + +template +struct has_isfinite> + : std::true_type {}; + +template ::value&& + has_isfinite::value)> +FMT_CONSTEXPR20 bool isfinite(T value) { + if (is_constant_evaluated()) return !isnan(value - value); + return std::isfinite(value); +} +template ::value)> +constexpr bool isfinite(T value) { + return value - value == 0; // std::isfinite doesn't support __float128. +} + +template ::value)> +FMT_INLINE FMT_CONSTEXPR bool signbit(T value) { + if (is_constant_evaluated()) { +#ifdef __cpp_if_constexpr + if constexpr (std::numeric_limits::is_iec559) { + auto bits = detail::bit_cast(static_cast(value)); + return (bits >> (num_bits() - 1)) != 0; + } +#endif + } + return std::signbit(static_cast(value)); +} + +template ::value)> +FMT_CONSTEXPR20 auto write(OutputIt out, T value, + basic_format_specs specs, locale_ref loc = {}) + -> OutputIt { + if (const_check(!is_supported_floating_point(value))) return out; + float_specs fspecs = parse_float_type_spec(specs); + fspecs.sign = specs.sign; + if (detail::signbit(value)) { // value < 0 is false for NaN so use signbit. + fspecs.sign = sign::minus; + value = -value; + } else if (fspecs.sign == sign::minus) { + fspecs.sign = sign::none; + } + + if (!detail::isfinite(value)) + return write_nonfinite(out, detail::isnan(value), specs, fspecs); + + if (specs.align == align::numeric && fspecs.sign) { + auto it = reserve(out, 1); + *it++ = detail::sign(fspecs.sign); + out = base_iterator(out, it); + fspecs.sign = sign::none; + if (specs.width != 0) --specs.width; + } + + memory_buffer buffer; + if (fspecs.format == float_format::hex) { + if (fspecs.sign) buffer.push_back(detail::sign(fspecs.sign)); + snprintf_float(convert_float(value), specs.precision, fspecs, buffer); + return write_bytes(out, {buffer.data(), buffer.size()}, + specs); + } + int precision = specs.precision >= 0 || specs.type == presentation_type::none + ? specs.precision + : 6; + if (fspecs.format == float_format::exp) { + if (precision == max_value()) + throw_format_error("number is too big"); + else + ++precision; + } else if (fspecs.format != float_format::fixed && precision == 0) { + precision = 1; + } + if (const_check(std::is_same())) fspecs.binary32 = true; + int exp = format_float(convert_float(value), precision, fspecs, buffer); + fspecs.precision = precision; + auto fp = big_decimal_fp{buffer.data(), static_cast(buffer.size()), exp}; + return write_float(out, fp, specs, fspecs, loc); +} + +template ::value)> +FMT_CONSTEXPR20 auto write(OutputIt out, T value) -> OutputIt { + if (is_constant_evaluated()) + return write(out, value, basic_format_specs()); + if (const_check(!is_supported_floating_point(value))) return out; + + auto fspecs = float_specs(); + if (detail::signbit(value)) { + fspecs.sign = sign::minus; + value = -value; + } + + constexpr auto specs = basic_format_specs(); + using floaty = conditional_t::value, double, T>; + using uint = typename dragonbox::float_info::carrier_uint; + uint mask = exponent_mask(); + if ((bit_cast(value) & mask) == mask) + return write_nonfinite(out, std::isnan(value), specs, fspecs); + + auto dec = dragonbox::to_decimal(static_cast(value)); + return write_float(out, dec, specs, fspecs, {}); +} + +template ::value && + !is_fast_float::value)> +inline auto write(OutputIt out, T value) -> OutputIt { + return write(out, value, basic_format_specs()); +} + +template +auto write(OutputIt out, monostate, basic_format_specs = {}, + locale_ref = {}) -> OutputIt { + FMT_ASSERT(false, ""); + return out; +} + +template +FMT_CONSTEXPR auto write(OutputIt out, basic_string_view value) + -> OutputIt { + auto it = reserve(out, value.size()); + it = copy_str_noinline(value.begin(), value.end(), it); + return base_iterator(out, it); +} + +template ::value)> +constexpr auto write(OutputIt out, const T& value) -> OutputIt { + return write(out, to_string_view(value)); +} + +template ::value && + !std::is_same::value && + !std::is_same::value)> +FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { + auto abs_value = static_cast>(value); + bool negative = is_negative(value); + // Don't do -abs_value since it trips unsigned-integer-overflow sanitizer. + if (negative) abs_value = ~abs_value + 1; + int num_digits = count_digits(abs_value); + auto size = (negative ? 1 : 0) + static_cast(num_digits); + auto it = reserve(out, size); + if (auto ptr = to_pointer(it, size)) { + if (negative) *ptr++ = static_cast('-'); + format_decimal(ptr, abs_value, num_digits); + return out; + } + if (negative) *it++ = static_cast('-'); + it = format_decimal(it, abs_value, num_digits).end; + return base_iterator(out, it); +} + +// FMT_ENABLE_IF() condition separated to workaround an MSVC bug. +template < + typename Char, typename OutputIt, typename T, + bool check = + std::is_enum::value && !std::is_same::value && + mapped_type_constant>::value != + type::custom_type, + FMT_ENABLE_IF(check)> +FMT_CONSTEXPR auto write(OutputIt out, T value) -> OutputIt { + return write(out, static_cast>(value)); +} + +template ::value)> +FMT_CONSTEXPR auto write(OutputIt out, T value, + const basic_format_specs& specs = {}, + locale_ref = {}) -> OutputIt { + return specs.type != presentation_type::none && + specs.type != presentation_type::string + ? write(out, value ? 1 : 0, specs, {}) + : write_bytes(out, value ? "true" : "false", specs); +} + +template +FMT_CONSTEXPR auto write(OutputIt out, Char value) -> OutputIt { + auto it = reserve(out, 1); + *it++ = value; + return base_iterator(out, it); +} + +template +FMT_CONSTEXPR_CHAR_TRAITS auto write(OutputIt out, const Char* value) + -> OutputIt { + if (!value) { + throw_format_error("string pointer is null"); + } else { + out = write(out, basic_string_view(value)); + } + return out; +} + +template ::value)> +auto write(OutputIt out, const T* value, + const basic_format_specs& specs = {}, locale_ref = {}) + -> OutputIt { + check_pointer_type_spec(specs.type, error_handler()); + return write_ptr(out, bit_cast(value), &specs); +} + +// A write overload that handles implicit conversions. +template > +FMT_CONSTEXPR auto write(OutputIt out, const T& value) -> enable_if_t< + std::is_class::value && !is_string::value && + !is_floating_point::value && !std::is_same::value && + !std::is_same().map(value))>::value, + OutputIt> { + return write(out, arg_mapper().map(value)); +} + +template > +FMT_CONSTEXPR auto write(OutputIt out, const T& value) + -> enable_if_t::value == type::custom_type, + OutputIt> { + using formatter_type = + conditional_t::value, + typename Context::template formatter_type, + fallback_formatter>; + auto ctx = Context(out, {}, {}); + return formatter_type().format(value, ctx); +} + +// An argument visitor that formats the argument and writes it via the output +// iterator. It's a class and not a generic lambda for compatibility with C++11. +template struct default_arg_formatter { + using iterator = buffer_appender; + using context = buffer_context; + + iterator out; + basic_format_args args; + locale_ref loc; + + template auto operator()(T value) -> iterator { + return write(out, value); + } + auto operator()(typename basic_format_arg::handle h) -> iterator { + basic_format_parse_context parse_ctx({}); + context format_ctx(out, args, loc); + h.format(parse_ctx, format_ctx); + return format_ctx.out(); + } +}; + +template struct arg_formatter { + using iterator = buffer_appender; + using context = buffer_context; + + iterator out; + const basic_format_specs& specs; + locale_ref locale; + + template + FMT_CONSTEXPR FMT_INLINE auto operator()(T value) -> iterator { + return detail::write(out, value, specs, locale); + } + auto operator()(typename basic_format_arg::handle) -> iterator { + // User-defined types are handled separately because they require access + // to the parse context. + return out; + } +}; + +template struct custom_formatter { + basic_format_parse_context& parse_ctx; + buffer_context& ctx; + + void operator()( + typename basic_format_arg>::handle h) const { + h.format(parse_ctx, ctx); + } + template void operator()(T) const {} +}; + +template +using is_integer = + bool_constant::value && !std::is_same::value && + !std::is_same::value && + !std::is_same::value>; + +template class width_checker { + public: + explicit FMT_CONSTEXPR width_checker(ErrorHandler& eh) : handler_(eh) {} + + template ::value)> + FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { + if (is_negative(value)) handler_.on_error("negative width"); + return static_cast(value); + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T) -> unsigned long long { + handler_.on_error("width is not integer"); + return 0; + } + + private: + ErrorHandler& handler_; +}; + +template class precision_checker { + public: + explicit FMT_CONSTEXPR precision_checker(ErrorHandler& eh) : handler_(eh) {} + + template ::value)> + FMT_CONSTEXPR auto operator()(T value) -> unsigned long long { + if (is_negative(value)) handler_.on_error("negative precision"); + return static_cast(value); + } + + template ::value)> + FMT_CONSTEXPR auto operator()(T) -> unsigned long long { + handler_.on_error("precision is not integer"); + return 0; + } + + private: + ErrorHandler& handler_; +}; + +template