Merge branch 'master' into vk_vfetch

This commit is contained in:
DrChat 2018-02-18 17:02:59 -06:00
commit 300656f788
37 changed files with 8433 additions and 2053 deletions

View File

@ -95,7 +95,18 @@ i.e. virtual 0xA0000000 == physical 0x00000000
Unfortunately, the 0xE0000000-0xFFFFFFFF range is unused in Xenia because
it maps to physical memory with a single page offset, which is impossible
to do under the Win32 API.
to do under the Win32 API. We can't fake this either, as this offset is
built into games - see the following sequence:
```
srwi r9, r10, 20 # r9 = r10 >> 20
clrlwi r10, r10, 3 # r10 = r10 & 0x1FFFFFFF (physical address)
addi r11, r9, 0x200
rlwinm r11, r11, 0,19,19 # r11 = r11 & 0x1000
add r11, r11, r10 # add 1 page to addresses > 0xE0000000
# r11 = addess passed to GPU
```
## Memory Management

View File

@ -26,13 +26,22 @@ void copy_128_aligned(void* dest, const void* src, size_t count) {
#if XE_ARCH_AMD64
void copy_and_swap_16_aligned(void* dest_ptr, const void* src_ptr,
size_t count) {
assert_zero(reinterpret_cast<uintptr_t>(src_ptr) & 0x1);
auto dest = reinterpret_cast<uint16_t*>(dest_ptr);
auto src = reinterpret_cast<const uint16_t*>(src_ptr);
size_t i;
for (i = 0; i + 8 <= count; i += 8) {
__m128i shufmask =
_mm_set_epi8(0x0E, 0x0F, 0x0C, 0x0D, 0x0A, 0x0B, 0x08, 0x09, 0x06, 0x07,
0x04, 0x05, 0x02, 0x03, 0x00, 0x01);
size_t i = 0;
size_t unaligned_words = (reinterpret_cast<uintptr_t>(src_ptr) & 0xF) / 2;
for (; unaligned_words > 0 && i < count; unaligned_words--, i++) {
// Copy up to 16 byte alignment.
dest[i] = byte_swap(src[i]);
}
for (; i + 8 <= count; i += 8) {
__m128i input = _mm_load_si128(reinterpret_cast<const __m128i*>(&src[i]));
__m128i output =
_mm_or_si128(_mm_slli_epi16(input, 8), _mm_srli_epi16(input, 8));
__m128i output = _mm_shuffle_epi8(input, shufmask);
_mm_store_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
@ -44,11 +53,14 @@ void copy_and_swap_16_unaligned(void* dest_ptr, const void* src_ptr,
size_t count) {
auto dest = reinterpret_cast<uint16_t*>(dest_ptr);
auto src = reinterpret_cast<const uint16_t*>(src_ptr);
__m128i shufmask =
_mm_set_epi8(0x0E, 0x0F, 0x0C, 0x0D, 0x0A, 0x0B, 0x08, 0x09, 0x06, 0x07,
0x04, 0x05, 0x02, 0x03, 0x00, 0x01);
size_t i;
for (i = 0; i + 8 <= count; i += 8) {
__m128i input = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&src[i]));
__m128i output =
_mm_or_si128(_mm_slli_epi16(input, 8), _mm_srli_epi16(input, 8));
__m128i output = _mm_shuffle_epi8(input, shufmask);
_mm_storeu_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
@ -58,24 +70,22 @@ void copy_and_swap_16_unaligned(void* dest_ptr, const void* src_ptr,
void copy_and_swap_32_aligned(void* dest_ptr, const void* src_ptr,
size_t count) {
assert_zero(reinterpret_cast<uintptr_t>(src_ptr) & 0x3);
auto dest = reinterpret_cast<uint32_t*>(dest_ptr);
auto src = reinterpret_cast<const uint32_t*>(src_ptr);
__m128i byte2mask = _mm_set1_epi32(0x00FF0000);
__m128i byte3mask = _mm_set1_epi32(0x0000FF00);
size_t i;
for (i = 0; i + 4 <= count; i += 4) {
__m128i shufmask =
_mm_set_epi8(0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B, 0x04, 0x05,
0x06, 0x07, 0x00, 0x01, 0x02, 0x03);
size_t i = 0;
size_t unaligned_dwords = (reinterpret_cast<uintptr_t>(src_ptr) & 0xF) / 4;
for (; unaligned_dwords > 0 && i < count; unaligned_dwords--, i++) {
// Copy up to 16 byte alignment.
dest[i] = byte_swap(src[i]);
}
for (; i + 4 <= count; i += 4) {
__m128i input = _mm_load_si128(reinterpret_cast<const __m128i*>(&src[i]));
// Do the four shifts.
__m128i byte1 = _mm_slli_epi32(input, 24);
__m128i byte2 = _mm_slli_epi32(input, 8);
__m128i byte3 = _mm_srli_epi32(input, 8);
__m128i byte4 = _mm_srli_epi32(input, 24);
// OR bytes together.
__m128i output = _mm_or_si128(byte1, byte4);
byte2 = _mm_and_si128(byte2, byte2mask);
output = _mm_or_si128(output, byte2);
byte3 = _mm_and_si128(byte3, byte3mask);
output = _mm_or_si128(output, byte3);
__m128i output = _mm_shuffle_epi8(input, shufmask);
_mm_store_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
@ -87,22 +97,14 @@ void copy_and_swap_32_unaligned(void* dest_ptr, const void* src_ptr,
size_t count) {
auto dest = reinterpret_cast<uint32_t*>(dest_ptr);
auto src = reinterpret_cast<const uint32_t*>(src_ptr);
__m128i byte2mask = _mm_set1_epi32(0x00FF0000);
__m128i byte3mask = _mm_set1_epi32(0x0000FF00);
__m128i shufmask =
_mm_set_epi8(0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B, 0x04, 0x05,
0x06, 0x07, 0x00, 0x01, 0x02, 0x03);
size_t i;
for (i = 0; i + 4 <= count; i += 4) {
__m128i input = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&src[i]));
// Do the four shifts.
__m128i byte1 = _mm_slli_epi32(input, 24);
__m128i byte2 = _mm_slli_epi32(input, 8);
__m128i byte3 = _mm_srli_epi32(input, 8);
__m128i byte4 = _mm_srli_epi32(input, 24);
// OR bytes together.
__m128i output = _mm_or_si128(byte1, byte4);
byte2 = _mm_and_si128(byte2, byte2mask);
output = _mm_or_si128(output, byte2);
byte3 = _mm_and_si128(byte3, byte3mask);
output = _mm_or_si128(output, byte3);
__m128i output = _mm_shuffle_epi8(input, shufmask);
_mm_storeu_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
@ -112,26 +114,22 @@ void copy_and_swap_32_unaligned(void* dest_ptr, const void* src_ptr,
void copy_and_swap_64_aligned(void* dest_ptr, const void* src_ptr,
size_t count) {
assert_zero(reinterpret_cast<uintptr_t>(src_ptr) & 0x7);
auto dest = reinterpret_cast<uint64_t*>(dest_ptr);
auto src = reinterpret_cast<const uint64_t*>(src_ptr);
__m128i byte2mask = _mm_set1_epi32(0x00FF0000);
__m128i byte3mask = _mm_set1_epi32(0x0000FF00);
size_t i;
for (i = 0; i + 2 <= count; i += 2) {
__m128i shufmask =
_mm_set_epi8(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
size_t i = 0;
size_t unaligned_qwords = (reinterpret_cast<uintptr_t>(src_ptr) & 0xF) / 8;
for (; unaligned_qwords > 0 && i < count; unaligned_qwords--, i++) {
// Copy up to 16 byte alignment.
dest[i] = byte_swap(src[i]);
}
for (; i + 2 <= count; i += 2) {
__m128i input = _mm_load_si128(reinterpret_cast<const __m128i*>(&src[i]));
// Do the four shifts.
__m128i byte1 = _mm_slli_epi32(input, 24);
__m128i byte2 = _mm_slli_epi32(input, 8);
__m128i byte3 = _mm_srli_epi32(input, 8);
__m128i byte4 = _mm_srli_epi32(input, 24);
// OR bytes together.
__m128i output = _mm_or_si128(byte1, byte4);
byte2 = _mm_and_si128(byte2, byte2mask);
output = _mm_or_si128(output, byte2);
byte3 = _mm_and_si128(byte3, byte3mask);
output = _mm_or_si128(output, byte3);
// Reorder the two words.
output = _mm_shuffle_epi32(output, _MM_SHUFFLE(2, 3, 0, 1));
__m128i output = _mm_shuffle_epi8(input, shufmask);
_mm_store_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
@ -143,24 +141,14 @@ void copy_and_swap_64_unaligned(void* dest_ptr, const void* src_ptr,
size_t count) {
auto dest = reinterpret_cast<uint64_t*>(dest_ptr);
auto src = reinterpret_cast<const uint64_t*>(src_ptr);
__m128i byte2mask = _mm_set1_epi32(0x00FF0000);
__m128i byte3mask = _mm_set1_epi32(0x0000FF00);
__m128i shufmask =
_mm_set_epi8(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, 0x00, 0x01,
0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
size_t i;
for (i = 0; i + 2 <= count; i += 2) {
__m128i input = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&src[i]));
// Do the four shifts.
__m128i byte1 = _mm_slli_epi32(input, 24);
__m128i byte2 = _mm_slli_epi32(input, 8);
__m128i byte3 = _mm_srli_epi32(input, 8);
__m128i byte4 = _mm_srli_epi32(input, 24);
// OR bytes together.
__m128i output = _mm_or_si128(byte1, byte4);
byte2 = _mm_and_si128(byte2, byte2mask);
output = _mm_or_si128(output, byte2);
byte3 = _mm_and_si128(byte3, byte3mask);
output = _mm_or_si128(output, byte3);
// Reorder the two words.
output = _mm_shuffle_epi32(output, _MM_SHUFFLE(2, 3, 0, 1));
__m128i output = _mm_shuffle_epi8(input, shufmask);
_mm_storeu_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements

View File

@ -12,6 +12,7 @@
#include <cstdint>
#include <string>
#include <type_traits>
#include <vector>
#include "xenia/base/assert.h"
@ -72,16 +73,25 @@ class RingBuffer {
}
template <typename T>
T Read(bool swap = false) {
static_assert(sizeof(T) <= 8, "Immediate read only supports basic types!");
T Read() {
static_assert(std::is_fundamental<T>::value,
"Immediate read only supports basic types!");
T imm;
size_t read = Read(reinterpret_cast<uint8_t*>(&imm), sizeof(T));
assert_true(read == sizeof(T));
if (swap) {
imm = xe::byte_swap(imm);
}
return imm;
}
template <typename T>
T ReadAndSwap() {
static_assert(std::is_fundamental<T>::value,
"Immediate read only supports basic types!");
T imm;
size_t read = Read(reinterpret_cast<uint8_t*>(&imm), sizeof(T));
assert_true(read == sizeof(T));
imm = xe::byte_swap(imm);
return imm;
}

View File

@ -6484,24 +6484,17 @@ struct CNTLZ_I8 : Sequence<CNTLZ_I8, I<OPCODE_CNTLZ, I8Op, I8Op>> {
e.lzcnt(i.dest.reg().cvt16(), i.dest.reg().cvt16());
e.sub(i.dest, 8);
} else {
Xbyak::Label jz, jend;
Xbyak::Label end;
e.inLocalLabel();
// BSR: searches $2 until MSB 1 found, stores idx (from bit 0) in $1
// if input is 0, results are undefined (and ZF is set)
e.bsr(i.dest, i.src1);
e.jz(jz); // Jump if zero
e.bsr(e.rax, i.src1); // ZF set if i.src1 is 0
e.mov(i.dest, 0x8);
e.jz(end);
// Invert the result (7 - i.dest)
e.xor_(i.dest, 0x7);
e.jmp(jend); // Jmp to end
e.xor_(e.rax, 0x7);
e.mov(i.dest, e.rax);
// src1 was zero, so write 8 to the dest reg
e.L(jz);
e.mov(i.dest, 8);
e.L(jend);
e.L(end);
e.outLocalLabel();
}
}
@ -6512,24 +6505,17 @@ struct CNTLZ_I16 : Sequence<CNTLZ_I16, I<OPCODE_CNTLZ, I8Op, I16Op>> {
// LZCNT: searches $2 until MSB 1 found, stores idx (from last bit) in $1
e.lzcnt(i.dest.reg().cvt32(), i.src1);
} else {
Xbyak::Label jz, jend;
Xbyak::Label end;
e.inLocalLabel();
// BSR: searches $2 until MSB 1 found, stores idx (from bit 0) in $1
// if input is 0, results are undefined (and ZF is set)
e.bsr(i.dest, i.src1);
e.jz(jz); // Jump if zero
e.bsr(e.rax, i.src1); // ZF set if i.src1 is 0
e.mov(i.dest, 0x10);
e.jz(end);
// Invert the result (15 - i.dest)
e.xor_(i.dest, 0xF);
e.jmp(jend); // Jmp to end
e.xor_(e.rax, 0x0F);
e.mov(i.dest, e.rax);
// src1 was zero, so write 16 to the dest reg
e.L(jz);
e.mov(i.dest, 16);
e.L(jend);
e.L(end);
e.outLocalLabel();
}
}
@ -6539,24 +6525,17 @@ struct CNTLZ_I32 : Sequence<CNTLZ_I32, I<OPCODE_CNTLZ, I8Op, I32Op>> {
if (e.IsFeatureEnabled(kX64EmitLZCNT)) {
e.lzcnt(i.dest.reg().cvt32(), i.src1);
} else {
Xbyak::Label jz, jend;
Xbyak::Label end;
e.inLocalLabel();
// BSR: searches $2 until MSB 1 found, stores idx (from bit 0) in $1
// if input is 0, results are undefined (and ZF is set)
e.bsr(i.dest, i.src1);
e.jz(jz); // Jump if zero
e.bsr(e.rax, i.src1); // ZF set if i.src1 is 0
e.mov(i.dest, 0x20);
e.jz(end);
// Invert the result (31 - i.dest)
e.xor_(i.dest, 0x1F);
e.jmp(jend); // Jmp to end
e.xor_(e.rax, 0x1F);
e.mov(i.dest, e.rax);
// src1 was zero, so write 32 to the dest reg
e.L(jz);
e.mov(i.dest, 32);
e.L(jend);
e.L(end);
e.outLocalLabel();
}
}
@ -6566,24 +6545,17 @@ struct CNTLZ_I64 : Sequence<CNTLZ_I64, I<OPCODE_CNTLZ, I8Op, I64Op>> {
if (e.IsFeatureEnabled(kX64EmitLZCNT)) {
e.lzcnt(i.dest.reg().cvt64(), i.src1);
} else {
Xbyak::Label jz, jend;
Xbyak::Label end;
e.inLocalLabel();
// BSR: searches $2 until MSB 1 found, stores idx (from bit 0) in $1
// if input is 0, results are undefined (and ZF is set)
e.bsr(i.dest, i.src1);
e.jz(jz); // Jump if zero
e.bsr(e.rax, i.src1); // ZF set if i.src1 is 0
e.mov(i.dest, 0x40);
e.jz(end);
// Invert the result (63 - i.dest)
e.xor_(i.dest, 0x3F);
e.jmp(jend); // Jmp to end
e.xor_(e.rax, 0x3F);
e.mov(i.dest, e.rax);
// src1 was zero, so write 64 to the dest reg
e.L(jz);
e.mov(i.dest, 64);
e.L(jend);
e.L(end);
e.outLocalLabel();
}
}

View File

@ -240,12 +240,20 @@ bool MMIOHandler::IsRangeWatched(uint32_t physical_address, size_t length) {
for (auto it = access_watches_.begin(); it != access_watches_.end(); ++it) {
auto entry = *it;
if ((entry->address <= physical_address &&
entry->address + entry->length > physical_address) ||
(entry->address >= physical_address &&
entry->address < physical_address + length)) {
// This watch lies within the range.
entry->address + entry->length > physical_address + length)) {
// This range lies entirely within this watch.
return true;
}
// TODO(DrChat): Check if the range is partially covered, and subtract the
// covered portion if it is.
if ((entry->address <= physical_address &&
entry->address + entry->length > physical_address)) {
// The beginning of range lies partially within this watch.
} else if ((entry->address < physical_address + length &&
entry->address + entry->length > physical_address + length)) {
// The ending of this range lies partially within this watch.
}
}
return false;

View File

@ -77,7 +77,7 @@ class MMIOHandler {
// Fires and clears any access watches that overlap this range.
void InvalidateRange(uint32_t physical_address, size_t length);
// Returns true if /any/ part of this range is watched.
// Returns true if /all/ of this range is watched.
bool IsRangeWatched(uint32_t physical_address, size_t length);
protected:

View File

@ -960,7 +960,7 @@ int InstrEmit_rlwimix(PPCHIRBuilder& f, const InstrData& i) {
// RA <- r&m | (RA)&¬m
Value* v = f.LoadGPR(i.M.RT);
// (x||x)
v = f.Or(f.Shl(v, 32), f.And(v, f.LoadConstantUint64(0xFFFFFFFF)));
v = f.Or(f.Shl(v, 32), f.ZeroExtend(f.Truncate(v, INT32_TYPE), INT64_TYPE));
if (i.M.SH) {
v = f.RotateLeft(v, f.LoadConstantInt8(i.M.SH));
}
@ -1018,7 +1018,7 @@ int InstrEmit_rlwnmx(PPCHIRBuilder& f, const InstrData& i) {
f.And(f.Truncate(f.LoadGPR(i.M.SH), INT8_TYPE), f.LoadConstantInt8(0x1F));
Value* v = f.LoadGPR(i.M.RT);
// (x||x)
v = f.Or(f.Shl(v, 32), f.And(v, f.LoadConstantUint64(0xFFFFFFFF)));
v = f.Or(f.Shl(v, 32), f.ZeroExtend(f.Truncate(v, INT32_TYPE), INT64_TYPE));
v = f.RotateLeft(v, sh);
v = f.And(v, f.LoadConstantUint64(XEMASK(i.M.MB + 32, i.M.ME + 32)));
f.StoreGPR(i.M.RA, v);

View File

@ -10,6 +10,7 @@
#include "xenia/gpu/command_processor.h"
#include <algorithm>
#include <cinttypes>
#include <cmath>
#include "xenia/base/byte_stream.h"
@ -19,6 +20,7 @@
#include "xenia/base/ring_buffer.h"
#include "xenia/gpu/gpu_flags.h"
#include "xenia/gpu/graphics_system.h"
#include "xenia/gpu/registers.h"
#include "xenia/gpu/sampler_info.h"
#include "xenia/gpu/texture_info.h"
#include "xenia/gpu/xenos.h"
@ -166,6 +168,9 @@ void CommandProcessor::WorkerThreadMain() {
xe::store_and_swap<uint32_t>(
memory_->TranslatePhysical(read_ptr_writeback_ptr_), read_ptr_index_);
}
// FIXME: We're supposed to process the WAIT_UNTIL register at this point,
// but no games seem to actually use it.
}
ShutdownContext();
@ -438,7 +443,7 @@ void CommandProcessor::ExecutePacket(uint32_t ptr, uint32_t count) {
}
bool CommandProcessor::ExecutePacket(RingBuffer* reader) {
const uint32_t packet = reader->Read<uint32_t>(true);
const uint32_t packet = reader->ReadAndSwap<uint32_t>();
const uint32_t packet_type = packet >> 30;
if (packet == 0) {
trace_writer_.WritePacketStart(uint32_t(reader->read_ptr() - 4), 1);
@ -478,7 +483,7 @@ bool CommandProcessor::ExecutePacketType0(RingBuffer* reader, uint32_t packet) {
uint32_t base_index = (packet & 0x7FFF);
uint32_t write_one_reg = (packet >> 15) & 0x1;
for (uint32_t m = 0; m < count; m++) {
uint32_t reg_data = reader->Read<uint32_t>(true);
uint32_t reg_data = reader->ReadAndSwap<uint32_t>();
uint32_t target_index = write_one_reg ? base_index : base_index + m;
WriteRegister(target_index, reg_data);
}
@ -493,8 +498,8 @@ bool CommandProcessor::ExecutePacketType1(RingBuffer* reader, uint32_t packet) {
trace_writer_.WritePacketStart(uint32_t(reader->read_ptr() - 4), 3);
uint32_t reg_index_1 = packet & 0x7FF;
uint32_t reg_index_2 = (packet >> 11) & 0x7FF;
uint32_t reg_data_1 = reader->Read<uint32_t>(true);
uint32_t reg_data_2 = reader->Read<uint32_t>(true);
uint32_t reg_data_1 = reader->ReadAndSwap<uint32_t>();
uint32_t reg_data_2 = reader->ReadAndSwap<uint32_t>();
WriteRegister(reg_index_1, reg_data_1);
WriteRegister(reg_index_2, reg_data_2);
trace_writer_.WritePacketEnd();
@ -617,38 +622,38 @@ bool CommandProcessor::ExecutePacketType3(RingBuffer* reader, uint32_t packet) {
break;
case PM4_SET_BIN_MASK_LO: {
uint32_t value = reader->Read<uint32_t>(true);
uint32_t value = reader->ReadAndSwap<uint32_t>();
bin_mask_ = (bin_mask_ & 0xFFFFFFFF00000000ull) | value;
result = true;
} break;
case PM4_SET_BIN_MASK_HI: {
uint32_t value = reader->Read<uint32_t>(true);
uint32_t value = reader->ReadAndSwap<uint32_t>();
bin_mask_ =
(bin_mask_ & 0xFFFFFFFFull) | (static_cast<uint64_t>(value) << 32);
result = true;
} break;
case PM4_SET_BIN_SELECT_LO: {
uint32_t value = reader->Read<uint32_t>(true);
uint32_t value = reader->ReadAndSwap<uint32_t>();
bin_select_ = (bin_select_ & 0xFFFFFFFF00000000ull) | value;
result = true;
} break;
case PM4_SET_BIN_SELECT_HI: {
uint32_t value = reader->Read<uint32_t>(true);
uint32_t value = reader->ReadAndSwap<uint32_t>();
bin_select_ =
(bin_select_ & 0xFFFFFFFFull) | (static_cast<uint64_t>(value) << 32);
result = true;
} break;
case PM4_SET_BIN_MASK: {
assert_true(count == 2);
uint64_t val_hi = reader->Read<uint32_t>(true);
uint64_t val_lo = reader->Read<uint32_t>(true);
uint64_t val_hi = reader->ReadAndSwap<uint32_t>();
uint64_t val_lo = reader->ReadAndSwap<uint32_t>();
bin_mask_ = (val_hi << 32) | val_lo;
result = true;
} break;
case PM4_SET_BIN_SELECT: {
assert_true(count == 2);
uint64_t val_hi = reader->Read<uint32_t>(true);
uint64_t val_lo = reader->Read<uint32_t>(true);
uint64_t val_hi = reader->ReadAndSwap<uint32_t>();
uint64_t val_lo = reader->ReadAndSwap<uint32_t>();
bin_select_ = (val_hi << 32) | val_lo;
result = true;
} break;
@ -708,7 +713,7 @@ bool CommandProcessor::ExecutePacketType3_INTERRUPT(RingBuffer* reader,
SCOPE_profile_cpu_f("gpu");
// generate interrupt from the command stream
uint32_t cpu_mask = reader->Read<uint32_t>(true);
uint32_t cpu_mask = reader->ReadAndSwap<uint32_t>();
for (int n = 0; n < 6; n++) {
if (cpu_mask & (1 << n)) {
graphics_system_->DispatchInterruptCallback(1, n);
@ -730,13 +735,13 @@ bool CommandProcessor::ExecutePacketType3_XE_SWAP(RingBuffer* reader,
// VdSwap will post this to tell us we need to swap the screen/fire an
// interrupt.
// 63 words here, but only the first has any data.
uint32_t magic = reader->Read<uint32_t>(true);
uint32_t magic = reader->ReadAndSwap<uint32_t>();
assert_true(magic == 'SWAP');
// TODO(benvanik): only swap frontbuffer ptr.
uint32_t frontbuffer_ptr = reader->Read<uint32_t>(true);
uint32_t frontbuffer_width = reader->Read<uint32_t>(true);
uint32_t frontbuffer_height = reader->Read<uint32_t>(true);
uint32_t frontbuffer_ptr = reader->ReadAndSwap<uint32_t>();
uint32_t frontbuffer_width = reader->ReadAndSwap<uint32_t>();
uint32_t frontbuffer_height = reader->ReadAndSwap<uint32_t>();
reader->AdvanceRead((count - 4) * sizeof(uint32_t));
if (swap_mode_ == SwapMode::kNormal) {
@ -751,8 +756,8 @@ bool CommandProcessor::ExecutePacketType3_INDIRECT_BUFFER(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// indirect buffer dispatch
uint32_t list_ptr = CpuToGpu(reader->Read<uint32_t>(true));
uint32_t list_length = reader->Read<uint32_t>(true);
uint32_t list_ptr = CpuToGpu(reader->ReadAndSwap<uint32_t>());
uint32_t list_length = reader->ReadAndSwap<uint32_t>();
assert_zero(list_length & ~0xFFFFF);
list_length &= 0xFFFFF;
ExecuteIndirectBuffer(GpuToCpu(list_ptr), list_length);
@ -765,11 +770,11 @@ bool CommandProcessor::ExecutePacketType3_WAIT_REG_MEM(RingBuffer* reader,
SCOPE_profile_cpu_f("gpu");
// wait until a register or memory location is a specific value
uint32_t wait_info = reader->Read<uint32_t>(true);
uint32_t poll_reg_addr = reader->Read<uint32_t>(true);
uint32_t ref = reader->Read<uint32_t>(true);
uint32_t mask = reader->Read<uint32_t>(true);
uint32_t wait = reader->Read<uint32_t>(true);
uint32_t wait_info = reader->ReadAndSwap<uint32_t>();
uint32_t poll_reg_addr = reader->ReadAndSwap<uint32_t>();
uint32_t ref = reader->ReadAndSwap<uint32_t>();
uint32_t mask = reader->ReadAndSwap<uint32_t>();
uint32_t wait = reader->ReadAndSwap<uint32_t>();
bool matched = false;
do {
uint32_t value;
@ -846,9 +851,9 @@ bool CommandProcessor::ExecutePacketType3_REG_RMW(RingBuffer* reader,
uint32_t count) {
// register read/modify/write
// ? (used during shader upload and edram setup)
uint32_t rmw_info = reader->Read<uint32_t>(true);
uint32_t and_mask = reader->Read<uint32_t>(true);
uint32_t or_mask = reader->Read<uint32_t>(true);
uint32_t rmw_info = reader->ReadAndSwap<uint32_t>();
uint32_t and_mask = reader->ReadAndSwap<uint32_t>();
uint32_t or_mask = reader->ReadAndSwap<uint32_t>();
uint32_t value = register_file_->values[rmw_info & 0x1FFF].u32;
if ((rmw_info >> 31) & 0x1) {
// & reg
@ -874,8 +879,8 @@ bool CommandProcessor::ExecutePacketType3_REG_TO_MEM(RingBuffer* reader,
// Copy Register to Memory (?)
// Count is 2, assuming a Register Addr and a Memory Addr.
uint32_t reg_addr = reader->Read<uint32_t>(true);
uint32_t mem_addr = reader->Read<uint32_t>(true);
uint32_t reg_addr = reader->ReadAndSwap<uint32_t>();
uint32_t mem_addr = reader->ReadAndSwap<uint32_t>();
uint32_t reg_val;
@ -894,9 +899,9 @@ bool CommandProcessor::ExecutePacketType3_REG_TO_MEM(RingBuffer* reader,
bool CommandProcessor::ExecutePacketType3_MEM_WRITE(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
uint32_t write_addr = reader->Read<uint32_t>(true);
uint32_t write_addr = reader->ReadAndSwap<uint32_t>();
for (uint32_t i = 0; i < count - 1; i++) {
uint32_t write_data = reader->Read<uint32_t>(true);
uint32_t write_data = reader->ReadAndSwap<uint32_t>();
auto endianness = static_cast<Endian>(write_addr & 0x3);
auto addr = write_addr & ~0x3;
@ -913,12 +918,12 @@ bool CommandProcessor::ExecutePacketType3_COND_WRITE(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// conditional write to memory or register
uint32_t wait_info = reader->Read<uint32_t>(true);
uint32_t poll_reg_addr = reader->Read<uint32_t>(true);
uint32_t ref = reader->Read<uint32_t>(true);
uint32_t mask = reader->Read<uint32_t>(true);
uint32_t write_reg_addr = reader->Read<uint32_t>(true);
uint32_t write_data = reader->Read<uint32_t>(true);
uint32_t wait_info = reader->ReadAndSwap<uint32_t>();
uint32_t poll_reg_addr = reader->ReadAndSwap<uint32_t>();
uint32_t ref = reader->ReadAndSwap<uint32_t>();
uint32_t mask = reader->ReadAndSwap<uint32_t>();
uint32_t write_reg_addr = reader->ReadAndSwap<uint32_t>();
uint32_t write_data = reader->ReadAndSwap<uint32_t>();
uint32_t value;
if (wait_info & 0x10) {
// Memory.
@ -980,7 +985,7 @@ bool CommandProcessor::ExecutePacketType3_EVENT_WRITE(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// generate an event that creates a write to memory when completed
uint32_t initiator = reader->Read<uint32_t>(true);
uint32_t initiator = reader->ReadAndSwap<uint32_t>();
// Writeback initiator.
WriteRegister(XE_GPU_REG_VGT_EVENT_INITIATOR, initiator & 0x3F);
if (count == 1) {
@ -997,9 +1002,9 @@ bool CommandProcessor::ExecutePacketType3_EVENT_WRITE_SHD(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// generate a VS|PS_done event
uint32_t initiator = reader->Read<uint32_t>(true);
uint32_t address = reader->Read<uint32_t>(true);
uint32_t value = reader->Read<uint32_t>(true);
uint32_t initiator = reader->ReadAndSwap<uint32_t>();
uint32_t address = reader->ReadAndSwap<uint32_t>();
uint32_t value = reader->ReadAndSwap<uint32_t>();
// Writeback initiator.
WriteRegister(XE_GPU_REG_VGT_EVENT_INITIATOR, initiator & 0x3F);
uint32_t data_value;
@ -1022,13 +1027,17 @@ bool CommandProcessor::ExecutePacketType3_EVENT_WRITE_EXT(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// generate a screen extent event
uint32_t initiator = reader->Read<uint32_t>(true);
uint32_t address = reader->Read<uint32_t>(true);
uint32_t initiator = reader->ReadAndSwap<uint32_t>();
uint32_t address = reader->ReadAndSwap<uint32_t>();
// Writeback initiator.
WriteRegister(XE_GPU_REG_VGT_EVENT_INITIATOR, initiator & 0x3F);
auto endianness = static_cast<Endian>(address & 0x3);
address &= ~0x3;
// Let us hope we can fake this.
// This callback tells the driver the xy coordinates affected by a previous
// drawcall.
// https://www.google.com/patents/US20060055701
uint16_t extents[] = {
0 >> 3, // min x
2560 >> 3, // max x
@ -1048,7 +1057,7 @@ bool CommandProcessor::ExecutePacketType3_EVENT_WRITE_ZPD(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
assert_true(count == 1);
uint32_t initiator = reader->Read<uint32_t>(true);
uint32_t initiator = reader->ReadAndSwap<uint32_t>();
// Writeback initiator.
WriteRegister(XE_GPU_REG_VGT_EVENT_INITIATOR, initiator & 0x3F);
@ -1065,8 +1074,10 @@ bool CommandProcessor::ExecutePacketType3_DRAW_INDX(RingBuffer* reader,
// initiate fetch of index buffer and draw
// if dword0 != 0, this is a conditional draw based on viz query.
// This ID matches the one issued in PM4_VIZ_QUERY
uint32_t dword0 = reader->Read<uint32_t>(true); // viz query info
uint32_t dword1 = reader->Read<uint32_t>(true);
// ID = dword0 & 0x3F;
// use = dword0 & 0x40;
uint32_t dword0 = reader->ReadAndSwap<uint32_t>(); // viz query info
uint32_t dword1 = reader->ReadAndSwap<uint32_t>();
uint32_t index_count = dword1 >> 16;
auto prim_type = static_cast<PrimitiveType>(dword1 & 0x3F);
bool is_indexed = false;
@ -1076,8 +1087,8 @@ bool CommandProcessor::ExecutePacketType3_DRAW_INDX(RingBuffer* reader,
// DI_SRC_SEL_DMA
// Indexed draw.
is_indexed = true;
index_buffer_info.guest_base = reader->Read<uint32_t>(true);
uint32_t index_size = reader->Read<uint32_t>(true);
index_buffer_info.guest_base = reader->ReadAndSwap<uint32_t>();
uint32_t index_size = reader->ReadAndSwap<uint32_t>();
index_buffer_info.endianness = static_cast<Endian>(index_size >> 30);
index_size &= 0x00FFFFFF;
bool index_32bit = (dword1 >> 11) & 0x1;
@ -1113,7 +1124,7 @@ bool CommandProcessor::ExecutePacketType3_DRAW_INDX_2(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// draw using supplied indices in packet
uint32_t dword0 = reader->Read<uint32_t>(true);
uint32_t dword0 = reader->ReadAndSwap<uint32_t>();
uint32_t index_count = dword0 >> 16;
auto prim_type = static_cast<PrimitiveType>(dword0 & 0x3F);
uint32_t src_sel = (dword0 >> 6) & 0x3;
@ -1139,7 +1150,7 @@ bool CommandProcessor::ExecutePacketType3_SET_CONSTANT(RingBuffer* reader,
// load constant into chip and to memory
// PM4_REG(reg) ((0x4 << 16) | (GSL_HAL_SUBBLOCK_OFFSET(reg)))
// reg - 0x2000
uint32_t offset_type = reader->Read<uint32_t>(true);
uint32_t offset_type = reader->ReadAndSwap<uint32_t>();
uint32_t index = offset_type & 0x7FF;
uint32_t type = (offset_type >> 16) & 0xFF;
switch (type) {
@ -1164,7 +1175,7 @@ bool CommandProcessor::ExecutePacketType3_SET_CONSTANT(RingBuffer* reader,
return true;
}
for (uint32_t n = 0; n < count - 1; n++, index++) {
uint32_t data = reader->Read<uint32_t>(true);
uint32_t data = reader->ReadAndSwap<uint32_t>();
WriteRegister(index, data);
}
return true;
@ -1173,10 +1184,10 @@ bool CommandProcessor::ExecutePacketType3_SET_CONSTANT(RingBuffer* reader,
bool CommandProcessor::ExecutePacketType3_SET_CONSTANT2(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
uint32_t offset_type = reader->Read<uint32_t>(true);
uint32_t offset_type = reader->ReadAndSwap<uint32_t>();
uint32_t index = offset_type & 0xFFFF;
for (uint32_t n = 0; n < count - 1; n++, index++) {
uint32_t data = reader->Read<uint32_t>(true);
uint32_t data = reader->ReadAndSwap<uint32_t>();
WriteRegister(index, data);
}
return true;
@ -1186,11 +1197,11 @@ bool CommandProcessor::ExecutePacketType3_LOAD_ALU_CONSTANT(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// load constants from memory
uint32_t address = reader->Read<uint32_t>(true);
uint32_t address = reader->ReadAndSwap<uint32_t>();
address &= 0x3FFFFFFF;
uint32_t offset_type = reader->Read<uint32_t>(true);
uint32_t offset_type = reader->ReadAndSwap<uint32_t>();
uint32_t index = offset_type & 0x7FF;
uint32_t size_dwords = reader->Read<uint32_t>(true);
uint32_t size_dwords = reader->ReadAndSwap<uint32_t>();
size_dwords &= 0xFFF;
uint32_t type = (offset_type >> 16) & 0xFF;
switch (type) {
@ -1224,10 +1235,10 @@ bool CommandProcessor::ExecutePacketType3_LOAD_ALU_CONSTANT(RingBuffer* reader,
bool CommandProcessor::ExecutePacketType3_SET_SHADER_CONSTANTS(
RingBuffer* reader, uint32_t packet, uint32_t count) {
uint32_t offset_type = reader->Read<uint32_t>(true);
uint32_t offset_type = reader->ReadAndSwap<uint32_t>();
uint32_t index = offset_type & 0xFFFF;
for (uint32_t n = 0; n < count - 1; n++, index++) {
uint32_t data = reader->Read<uint32_t>(true);
uint32_t data = reader->ReadAndSwap<uint32_t>();
WriteRegister(index, data);
}
return true;
@ -1239,10 +1250,10 @@ bool CommandProcessor::ExecutePacketType3_IM_LOAD(RingBuffer* reader,
SCOPE_profile_cpu_f("gpu");
// load sequencer instruction memory (pointer-based)
uint32_t addr_type = reader->Read<uint32_t>(true);
uint32_t addr_type = reader->ReadAndSwap<uint32_t>();
auto shader_type = static_cast<ShaderType>(addr_type & 0x3);
uint32_t addr = addr_type & ~0x3;
uint32_t start_size = reader->Read<uint32_t>(true);
uint32_t start_size = reader->ReadAndSwap<uint32_t>();
uint32_t start = start_size >> 16;
uint32_t size_dwords = start_size & 0xFFFF; // dwords
assert_true(start == 0);
@ -1270,8 +1281,8 @@ bool CommandProcessor::ExecutePacketType3_IM_LOAD_IMMEDIATE(RingBuffer* reader,
SCOPE_profile_cpu_f("gpu");
// load sequencer instruction memory (code embedded in packet)
uint32_t dword0 = reader->Read<uint32_t>(true);
uint32_t dword1 = reader->Read<uint32_t>(true);
uint32_t dword0 = reader->ReadAndSwap<uint32_t>();
uint32_t dword1 = reader->ReadAndSwap<uint32_t>();
auto shader_type = static_cast<ShaderType>(dword0);
uint32_t start_size = dword1;
uint32_t start = start_size >> 16;
@ -1301,7 +1312,7 @@ bool CommandProcessor::ExecutePacketType3_INVALIDATE_STATE(RingBuffer* reader,
uint32_t packet,
uint32_t count) {
// selective invalidation of state pointers
/*uint32_t mask =*/reader->Read<uint32_t>(true);
/*uint32_t mask =*/reader->ReadAndSwap<uint32_t>();
// driver_->InvalidateState(mask);
return true;
}
@ -1313,12 +1324,19 @@ bool CommandProcessor::ExecutePacketType3_VIZ_QUERY(RingBuffer* reader,
// http://www.google.com/patents/US20050195186
assert_true(count == 1);
// Some sort of ID?
// This appears to reset a viz query context.
// This ID matches the ID in conditional draw commands.
// Patent says the driver sets the viz_query register with info about the
// context ID.
uint32_t dword0 = reader->Read<uint32_t>(true);
uint32_t dword0 = reader->ReadAndSwap<uint32_t>();
uint32_t id = dword0 & 0x3F;
uint32_t end = dword0 & 0x80;
if (!end) {
// begin a new viz query @ id
WriteRegister(XE_GPU_REG_VGT_EVENT_INITIATOR, VIZQUERY_START);
XELOGGPU("Begin viz query ID %.2X", id);
} else {
// end the viz query
WriteRegister(XE_GPU_REG_VGT_EVENT_INITIATOR, VIZQUERY_END);
XELOGGPU("End viz query ID %.2X", id);
}
return true;
}

View File

@ -179,13 +179,13 @@ uint32_t GraphicsSystem::ReadRegister(uint32_t addr) {
return 0x08100748;
case 0x0F01: // RB_BC_CONTROL
return 0x0000200E;
case 0x194C: // R500_D1MODE_V_COUNTER(?) / scanline(?)
case 0x194C: // R500_D1MODE_V_COUNTER
return 0x000002D0;
case 0x1951: // ? vblank pending?
return 1;
case 0x1951: // interrupt status
return 1; // vblank
case 0x1961: // AVIVO_D1MODE_VIEWPORT_SIZE
// Screen res - 1280x720
// [width(0x0FFF), height(0x0FFF)]
// maximum [width(0x0FFF), height(0x0FFF)]
return 0x050002D0;
default:
if (!register_file_.GetRegisterInfo(r)) {

View File

@ -47,6 +47,10 @@ XE_GPU_REGISTER(0x0D04, kDword, SQ_EO_RT)
XE_GPU_REGISTER(0x0C85, kDword, PA_CL_ENHANCE)
// Set with WAIT_UNTIL = WAIT_3D_IDLECLEAN
XE_GPU_REGISTER(0x0E00, kDword, UNKNOWN_0E00)
XE_GPU_REGISTER(0x0E40, kDword, UNKNOWN_0E40)
XE_GPU_REGISTER(0x0E42, kDword, UNKNOWN_0E42)
XE_GPU_REGISTER(0x0F01, kDword, RB_BC_CONTROL)

View File

@ -16,10 +16,80 @@
#include "xenia/gpu/gpu_flags.h"
#include "xenia/gpu/vulkan/vulkan_gpu_flags.h"
#include "third_party/vulkan/vk_mem_alloc.h"
namespace xe {
namespace gpu {
namespace vulkan {
#if XE_ARCH_AMD64
void copy_cmp_swap_16_unaligned(void* dest_ptr, const void* src_ptr,
uint16_t cmp_value, size_t count) {
auto dest = reinterpret_cast<uint16_t*>(dest_ptr);
auto src = reinterpret_cast<const uint16_t*>(src_ptr);
__m128i shufmask =
_mm_set_epi8(0x0E, 0x0F, 0x0C, 0x0D, 0x0A, 0x0B, 0x08, 0x09, 0x06, 0x07,
0x04, 0x05, 0x02, 0x03, 0x00, 0x01);
__m128i cmpval = _mm_set1_epi16(cmp_value);
size_t i;
for (i = 0; i + 8 <= count; i += 8) {
__m128i input = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&src[i]));
__m128i output = _mm_shuffle_epi8(input, shufmask);
__m128i mask = _mm_cmpeq_epi16(output, cmpval);
output = _mm_or_si128(output, mask);
_mm_storeu_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
dest[i] = byte_swap(src[i]);
}
}
void copy_cmp_swap_32_unaligned(void* dest_ptr, const void* src_ptr,
uint32_t cmp_value, size_t count) {
auto dest = reinterpret_cast<uint32_t*>(dest_ptr);
auto src = reinterpret_cast<const uint32_t*>(src_ptr);
__m128i shufmask =
_mm_set_epi8(0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B, 0x04, 0x05,
0x06, 0x07, 0x00, 0x01, 0x02, 0x03);
__m128i cmpval = _mm_set1_epi32(cmp_value);
size_t i;
for (i = 0; i + 4 <= count; i += 4) {
__m128i input = _mm_loadu_si128(reinterpret_cast<const __m128i*>(&src[i]));
__m128i output = _mm_shuffle_epi8(input, shufmask);
__m128i mask = _mm_cmpeq_epi32(output, cmpval);
output = _mm_or_si128(output, mask);
_mm_storeu_si128(reinterpret_cast<__m128i*>(&dest[i]), output);
}
for (; i < count; ++i) { // handle residual elements
dest[i] = byte_swap(src[i]);
}
}
#else
void copy_and_swap_16_unaligned(void* dest_ptr, const void* src_ptr,
uint16_t cmp_value, size_t count) {
auto dest = reinterpret_cast<uint16_t*>(dest_ptr);
auto src = reinterpret_cast<const uint16_t*>(src_ptr);
for (size_t i = 0; i < count; ++i) {
uint16_t value = byte_swap(src[i]);
dest[i] = value == cmp_value ? 0xFFFF : value;
}
}
void copy_and_swap_32_unaligned(void* dest_ptr, const void* src_ptr,
uint32_t cmp_value, size_t count) {
auto dest = reinterpret_cast<uint32_t*>(dest_ptr);
auto src = reinterpret_cast<const uint32_t*>(src_ptr);
for (size_t i = 0; i < count; ++i) {
uint32_t value = byte_swap(src[i]);
dest[i] = value == cmp_value ? 0xFFFFFFFF : value;
}
}
#endif
using xe::ui::vulkan::CheckResult;
constexpr VkDeviceSize kConstantRegisterUniformRange =
@ -32,7 +102,7 @@ BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
device_,
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
capacity);
capacity, 4096);
}
BufferCache::~BufferCache() { Shutdown(); }
@ -47,6 +117,15 @@ VkResult BufferCache::Initialize() {
return status;
}
// Create a memory allocator for textures.
VmaAllocatorCreateInfo alloc_info = {
0, *device_, *device_, 0, 0, nullptr, nullptr,
};
status = vmaCreateAllocator(&alloc_info, &mem_allocator_);
if (status != VK_SUCCESS) {
return status;
}
// Descriptor pool used for all of our cached descriptors.
// In the steady state we don't allocate anything, so these are all manually
// managed.
@ -148,28 +227,23 @@ VkResult BufferCache::Initialize() {
}
void BufferCache::Shutdown() {
if (mem_allocator_) {
vmaDestroyAllocator(mem_allocator_);
mem_allocator_ = nullptr;
}
if (transient_descriptor_set_) {
vkFreeDescriptorSets(*device_, descriptor_pool_, 1,
&transient_descriptor_set_);
transient_descriptor_set_ = nullptr;
}
if (descriptor_set_layout_) {
vkDestroyDescriptorSetLayout(*device_, descriptor_set_layout_, nullptr);
descriptor_set_layout_ = nullptr;
}
if (descriptor_pool_) {
vkDestroyDescriptorPool(*device_, descriptor_pool_, nullptr);
descriptor_pool_ = nullptr;
}
VK_SAFE_DESTROY(vkDestroyDescriptorSetLayout, *device_,
descriptor_set_layout_, nullptr);
VK_SAFE_DESTROY(vkDestroyDescriptorPool, *device_, descriptor_pool_, nullptr);
transient_buffer_->Shutdown();
if (gpu_memory_pool_) {
vkFreeMemory(*device_, gpu_memory_pool_, nullptr);
gpu_memory_pool_ = nullptr;
}
VK_SAFE_DESTROY(vkFreeMemory, *device_, gpu_memory_pool_, nullptr);
}
std::pair<VkDeviceSize, VkDeviceSize> BufferCache::UploadConstantRegisters(
@ -276,13 +350,8 @@ std::pair<VkDeviceSize, VkDeviceSize> BufferCache::UploadConstantRegisters(
std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadIndexBuffer(
VkCommandBuffer command_buffer, uint32_t source_addr,
uint32_t source_length, IndexFormat format, VkFence fence) {
auto offset = FindCachedTransientData(source_addr, source_length);
if (offset != VK_WHOLE_SIZE) {
return {transient_buffer_->gpu_buffer(), offset};
}
// Allocate space in the buffer for our data.
offset = AllocateTransientData(source_length, fence);
auto offset = AllocateTransientData(source_length, fence);
if (offset == VK_WHOLE_SIZE) {
// OOM.
return {nullptr, VK_WHOLE_SIZE};
@ -290,17 +359,36 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadIndexBuffer(
const void* source_ptr = memory_->TranslatePhysical(source_addr);
// Copy data into the buffer.
// TODO(benvanik): get min/max indices and pass back?
uint32_t prim_reset_index =
register_file_->values[XE_GPU_REG_VGT_MULTI_PRIM_IB_RESET_INDX].u32;
bool prim_reset_enabled =
!!(register_file_->values[XE_GPU_REG_PA_SU_SC_MODE_CNTL].u32 & (1 << 21));
// Copy data into the buffer. If primitive reset is enabled, translate any
// primitive reset indices to something Vulkan understands.
// TODO(benvanik): memcpy then use compute shaders to swap?
if (format == IndexFormat::kInt16) {
// Endian::k8in16, swap half-words.
xe::copy_and_swap_16_aligned(transient_buffer_->host_base() + offset,
source_ptr, source_length / 2);
} else if (format == IndexFormat::kInt32) {
// Endian::k8in32, swap words.
xe::copy_and_swap_32_aligned(transient_buffer_->host_base() + offset,
source_ptr, source_length / 4);
if (prim_reset_enabled) {
if (format == IndexFormat::kInt16) {
// Endian::k8in16, swap half-words.
copy_cmp_swap_16_unaligned(
transient_buffer_->host_base() + offset, source_ptr,
static_cast<uint16_t>(prim_reset_index), source_length / 2);
} else if (format == IndexFormat::kInt32) {
// Endian::k8in32, swap words.
copy_cmp_swap_32_unaligned(transient_buffer_->host_base() + offset,
source_ptr, prim_reset_index,
source_length / 4);
}
} else {
if (format == IndexFormat::kInt16) {
// Endian::k8in16, swap half-words.
xe::copy_and_swap_16_unaligned(transient_buffer_->host_base() + offset,
source_ptr, source_length / 2);
} else if (format == IndexFormat::kInt32) {
// Endian::k8in32, swap words.
xe::copy_and_swap_32_unaligned(transient_buffer_->host_base() + offset,
source_ptr, source_length / 4);
}
}
transient_buffer_->Flush(offset, source_length);
@ -321,7 +409,6 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadIndexBuffer(
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
CacheTransientData(source_addr, source_length, offset);
return {transient_buffer_->gpu_buffer(), offset};
}
@ -333,29 +420,41 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadVertexBuffer(
return {transient_buffer_->gpu_buffer(), offset};
}
// Slow path :)
// Expand the region up to the allocation boundary
auto physical_heap = memory_->GetPhysicalHeap();
uint32_t upload_base = source_addr;
uint32_t upload_size = source_length;
// Ping the memory subsystem for allocation size.
// TODO(DrChat): Artifacting occurring in GripShift with this enabled.
// physical_heap->QueryBaseAndSize(&upload_base, &upload_size);
assert(upload_base <= source_addr);
uint32_t source_offset = source_addr - upload_base;
// Allocate space in the buffer for our data.
offset = AllocateTransientData(source_length, fence);
offset = AllocateTransientData(upload_size, fence);
if (offset == VK_WHOLE_SIZE) {
// OOM.
return {nullptr, VK_WHOLE_SIZE};
}
const void* source_ptr = memory_->TranslatePhysical(source_addr);
const void* upload_ptr = memory_->TranslatePhysical(upload_base);
// Copy data into the buffer.
// TODO(benvanik): memcpy then use compute shaders to swap?
if (endian == Endian::k8in32) {
// Endian::k8in32, swap words.
xe::copy_and_swap_32_aligned(transient_buffer_->host_base() + offset,
source_ptr, source_length / 4);
xe::copy_and_swap_32_unaligned(transient_buffer_->host_base() + offset,
upload_ptr, source_length / 4);
} else if (endian == Endian::k16in32) {
xe::copy_and_swap_16_in_32_aligned(transient_buffer_->host_base() + offset,
source_ptr, source_length / 4);
xe::copy_and_swap_16_in_32_unaligned(
transient_buffer_->host_base() + offset, upload_ptr, source_length / 4);
} else {
assert_always();
}
transient_buffer_->Flush(offset, source_length);
transient_buffer_->Flush(offset, upload_size);
// Append a barrier to the command buffer.
VkBufferMemoryBarrier barrier = {
@ -367,14 +466,14 @@ std::pair<VkBuffer, VkDeviceSize> BufferCache::UploadVertexBuffer(
VK_QUEUE_FAMILY_IGNORED,
transient_buffer_->gpu_buffer(),
offset,
source_length,
upload_size,
};
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, 0, 0, nullptr, 1,
&barrier, 0, nullptr);
CacheTransientData(source_addr, source_length, offset);
return {transient_buffer_->gpu_buffer(), offset};
CacheTransientData(upload_base, upload_size, offset);
return {transient_buffer_->gpu_buffer(), offset + source_offset};
}
VkDeviceSize BufferCache::AllocateTransientData(VkDeviceSize length,
@ -407,10 +506,22 @@ VkDeviceSize BufferCache::TryAllocateTransientData(VkDeviceSize length,
VkDeviceSize BufferCache::FindCachedTransientData(uint32_t guest_address,
uint32_t guest_length) {
uint64_t key = uint64_t(guest_length) << 32 | uint64_t(guest_address);
auto it = transient_cache_.find(key);
if (it != transient_cache_.end()) {
return it->second;
if (transient_cache_.empty()) {
// Short-circuit exit.
return VK_WHOLE_SIZE;
}
// Find the first element > guest_address
auto it = transient_cache_.upper_bound(guest_address);
if (it != transient_cache_.begin()) {
// it = first element <= guest_address
--it;
if ((it->first + it->second.first) >= (guest_address + guest_length)) {
// This data is contained within some existing transient data.
auto source_offset = static_cast<VkDeviceSize>(guest_address - it->first);
return it->second.second + source_offset;
}
}
return VK_WHOLE_SIZE;
@ -419,8 +530,17 @@ VkDeviceSize BufferCache::FindCachedTransientData(uint32_t guest_address,
void BufferCache::CacheTransientData(uint32_t guest_address,
uint32_t guest_length,
VkDeviceSize offset) {
uint64_t key = uint64_t(guest_length) << 32 | uint64_t(guest_address);
transient_cache_[key] = offset;
transient_cache_[guest_address] = {guest_length, offset};
// Erase any entries contained within
auto it = transient_cache_.upper_bound(guest_address);
while (it != transient_cache_.end()) {
if ((guest_address + guest_length) >= (it->first + it->second.first)) {
it = transient_cache_.erase(it);
} else {
break;
}
}
}
void BufferCache::Flush(VkCommandBuffer command_buffer) {

View File

@ -18,6 +18,8 @@
#include "xenia/ui/vulkan/vulkan.h"
#include "xenia/ui/vulkan/vulkan_device.h"
#include "third_party/vulkan/vk_mem_alloc.h"
#include <map>
namespace xe {
@ -95,6 +97,15 @@ class BufferCache {
void Scavenge();
private:
// This represents an uploaded vertex buffer.
struct VertexBuffer {
uint32_t guest_address;
uint32_t size;
VmaAllocation alloc;
VmaAllocationInfo alloc_info;
};
// Allocates a block of memory in the transient buffer.
// When memory is not available fences are checked and space is reclaimed.
// Returns VK_WHOLE_SIZE if requested amount of memory is not available.
@ -115,11 +126,12 @@ class BufferCache {
ui::vulkan::VulkanDevice* device_ = nullptr;
VkDeviceMemory gpu_memory_pool_ = nullptr;
VmaAllocator mem_allocator_ = nullptr;
// Staging ringbuffer we cycle through fast. Used for data we don't
// plan on keeping past the current frame.
std::unique_ptr<ui::vulkan::CircularBuffer> transient_buffer_ = nullptr;
std::map<uint64_t, VkDeviceSize> transient_cache_;
std::map<uint32_t, std::pair<uint32_t, VkDeviceSize>> transient_cache_;
VkDescriptorPool descriptor_pool_ = nullptr;
VkDescriptorSetLayout descriptor_set_layout_ = nullptr;

View File

@ -534,16 +534,19 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
if (scissor_state_dirty) {
int32_t ws_x = regs.pa_sc_window_scissor_tl & 0x7FFF;
int32_t ws_y = (regs.pa_sc_window_scissor_tl >> 16) & 0x7FFF;
uint32_t ws_w = (regs.pa_sc_window_scissor_br & 0x7FFF) - ws_x;
uint32_t ws_h = ((regs.pa_sc_window_scissor_br >> 16) & 0x7FFF) - ws_y;
int32_t ws_w = (regs.pa_sc_window_scissor_br & 0x7FFF) - ws_x;
int32_t ws_h = ((regs.pa_sc_window_scissor_br >> 16) & 0x7FFF) - ws_y;
ws_x += window_offset_x;
ws_y += window_offset_y;
int32_t adj_x = ws_x - std::max(ws_x, 0);
int32_t adj_y = ws_y - std::max(ws_y, 0);
VkRect2D scissor_rect;
scissor_rect.offset.x = ws_x;
scissor_rect.offset.y = ws_y;
scissor_rect.extent.width = ws_w;
scissor_rect.extent.height = ws_h;
scissor_rect.offset.x = ws_x - adj_x;
scissor_rect.offset.y = ws_y - adj_y;
scissor_rect.extent.width = std::max(ws_w + adj_x, 0);
scissor_rect.extent.height = std::max(ws_h + adj_y, 0);
vkCmdSetScissor(command_buffer, 0, 1, &scissor_rect);
}
@ -1209,16 +1212,12 @@ PipelineCache::UpdateStatus PipelineCache::UpdateInputAssemblyState(
// glProvokingVertex(GL_FIRST_VERTEX_CONVENTION);
// }
// Primitive restart index is handled in the buffer cache.
if (regs.pa_su_sc_mode_cntl & (1 << 21)) {
state_info.primitiveRestartEnable = VK_TRUE;
} else {
state_info.primitiveRestartEnable = VK_FALSE;
}
// TODO(benvanik): no way to specify in Vulkan?
assert_true(regs.multi_prim_ib_reset_index == 0xFFFF ||
regs.multi_prim_ib_reset_index == 0xFFFFFF ||
regs.multi_prim_ib_reset_index == 0xFFFFFFFF);
// glPrimitiveRestartIndex(regs.multi_prim_ib_reset_index);
return UpdateStatus::kMismatch;
}

View File

@ -924,6 +924,7 @@ bool TextureCache::ConvertTexture2D(uint8_t* dest,
}
copy_region->bufferRowLength = src.size_2d.input_width;
copy_region->bufferImageHeight = src.size_2d.input_height;
copy_region->imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
copy_region->imageExtent = {src.size_2d.logical_width,
src.size_2d.logical_height, 1};
return true;
@ -932,6 +933,7 @@ bool TextureCache::ConvertTexture2D(uint8_t* dest,
TextureSwap(src.endianness, dest, host_address, src.input_length);
copy_region->bufferRowLength = src.size_2d.input_width;
copy_region->bufferImageHeight = src.size_2d.input_height;
copy_region->imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
copy_region->imageExtent = {src.size_2d.logical_width,
src.size_2d.logical_height, 1};
return true;
@ -996,6 +998,7 @@ bool TextureCache::ConvertTexture2D(uint8_t* dest,
copy_region->bufferRowLength = src.size_2d.input_width;
copy_region->bufferImageHeight = src.size_2d.input_height;
copy_region->imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
copy_region->imageExtent = {src.size_2d.logical_width,
src.size_2d.logical_height, 1};
return true;
@ -1013,8 +1016,9 @@ bool TextureCache::ConvertTextureCube(uint8_t* dest,
TextureSwap(src.endianness, dest, host_address, src.input_length);
copy_region->bufferRowLength = src.size_cube.input_width;
copy_region->bufferImageHeight = src.size_cube.input_height;
copy_region->imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 6};
copy_region->imageExtent = {src.size_cube.logical_width,
src.size_cube.logical_height, 6};
src.size_cube.logical_height, 1};
return true;
} else {
// TODO(benvanik): optimize this inner loop (or work by tiles).
@ -1053,8 +1057,9 @@ bool TextureCache::ConvertTextureCube(uint8_t* dest,
copy_region->bufferRowLength = src.size_cube.input_width;
copy_region->bufferImageHeight = src.size_cube.input_height;
copy_region->imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 6};
copy_region->imageExtent = {src.size_cube.logical_width,
src.size_cube.logical_height, 6};
src.size_cube.logical_height, 1};
return true;
}
@ -1250,7 +1255,9 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = dest->image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1,
copy_region.imageSubresource.baseArrayLayer,
copy_region.imageSubresource.layerCount};
if (dest->format == VK_FORMAT_D16_UNORM_S8_UINT ||
dest->format == VK_FORMAT_D24_UNORM_S8_UINT ||
dest->format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
@ -1264,7 +1271,6 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
// Now move the converted texture into the destination.
copy_region.bufferOffset = alloc->offset;
copy_region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
copy_region.imageOffset = {0, 0, 0};
vkCmdCopyBufferToImage(command_buffer, staging_buffer_.gpu_buffer(),
dest->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,

View File

@ -866,14 +866,13 @@ bool VulkanCommandProcessor::PopulateVertexBuffers(
// TODO: Make the buffer cache ... actually cache buffers. We can have
// a list of buffers that were cached, and store those in chunks in a
// multiple of the host's page size.
// WRITE WATCHES: We need to invalidate vertex buffers if they're written
// to. Since most vertex buffers aren't aligned to a page boundary, this
// means a watch may cover more than one vertex buffer.
// We need to maintain a list of write watches, and what memory ranges
// they cover. If a vertex buffer lies within a write watch's range, assign
// it to the watch. If there's partial alignment where a buffer lies within
// one watch and outside of it, should we create a new watch or extend the
// existing watch?
// So, we need to track all vertex buffers in a sorted map, and track all
// write watches in a sorted map. When a vertex buffer is uploaded, track
// all untracked pages with 1-page write watches. In the callback,
// invalidate any overlapping vertex buffers.
//
// We would keep the old transient buffer as a staging buffer, and upload
// to a GPU-only buffer that tracks all cached vertex buffers.
auto buffer_ref = buffer_cache_->UploadVertexBuffer(
current_setup_buffer_, physical_address, source_length,
static_cast<Endian>(fetch->endian), current_batch_fence_);

View File

@ -268,12 +268,15 @@ X_RESULT XmpApp::DispatchMessageSync(uint32_t message, uint32_t buffer_ptr,
}
case 0x0007000B: {
assert_true(!buffer_length || buffer_length == 8);
uint32_t xmp_client = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t float_ptr = xe::load_and_swap<uint32_t>(
buffer + 4); // out ptr to 4b - floating point
assert_true(xmp_client == 0x00000002);
XELOGD("XMPGetVolume(%.8X)", float_ptr);
xe::store_and_swap<float>(memory_->TranslateVirtual(float_ptr), volume_);
struct {
xe::be<uint32_t> xmp_client;
xe::be<uint32_t> volume_ptr;
}* args = memory_->TranslateVirtual<decltype(args)>(buffer_ptr);
assert_true(args->xmp_client == 0x00000002);
XELOGD("XMPGetVolume(%.8X)", uint32_t(args->volume_ptr));
xe::store_and_swap<float>(memory_->TranslateVirtual(args->volume_ptr),
volume_);
return X_ERROR_SUCCESS;
}
case 0x0007000C: {
@ -349,14 +352,20 @@ X_RESULT XmpApp::DispatchMessageSync(uint32_t message, uint32_t buffer_ptr,
return XMPDeleteTitlePlaylist(playlist_handle);
}
case 0x0007001A: {
// XMPSetPlaybackController
assert_true(!buffer_length || buffer_length == 12);
uint32_t xmp_client = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t unk1 = xe::load_and_swap<uint32_t>(buffer + 4);
uint32_t enabled = xe::load_and_swap<uint32_t>(buffer + 8);
assert_true(xmp_client == 0x00000002);
assert_zero(unk1);
XELOGD("XMPSetEnabled(%.8X, %.8X)", unk1, enabled);
disabled_ = enabled;
struct {
xe::be<uint32_t> xmp_client;
xe::be<uint32_t> controller;
xe::be<uint32_t> locked;
}* args = memory_->TranslateVirtual<decltype(args)>(buffer_ptr);
assert_true(args->xmp_client == 0x00000002);
assert_true(args->controller == 0x00000000);
XELOGD("XMPSetPlaybackController(%.8X, %.8X)", uint32_t(args->controller),
uint32_t(args->locked));
disabled_ = args->locked;
if (disabled_) {
XMPStop(0);
}
@ -364,22 +373,29 @@ X_RESULT XmpApp::DispatchMessageSync(uint32_t message, uint32_t buffer_ptr,
return X_ERROR_SUCCESS;
}
case 0x0007001B: {
// XMPGetPlaybackController
assert_true(!buffer_length || buffer_length == 12);
uint32_t xmp_client = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t unk_ptr =
xe::load_and_swap<uint32_t>(buffer + 4); // out ptr to 4b - expect 0
uint32_t disabled_ptr = xe::load_and_swap<uint32_t>(
buffer + 8); // out ptr to 4b - expect 1 (to skip)
assert_true(xmp_client == 0x00000002);
XELOGD("XMPGetEnabled(%.8X, %.8X)", unk_ptr, disabled_ptr);
xe::store_and_swap<uint32_t>(memory_->TranslateVirtual(unk_ptr), 0);
xe::store_and_swap<uint32_t>(memory_->TranslateVirtual(disabled_ptr),
disabled_);
struct {
xe::be<uint32_t> xmp_client;
xe::be<uint32_t> controller_ptr;
xe::be<uint32_t> locked_ptr;
}* args = memory_->TranslateVirtual<decltype(args)>(buffer_ptr);
assert_true(args->xmp_client == 0x00000002);
XELOGD("XMPGetPlaybackController(%.8X, %.8X, %.8X)",
uint32_t(args->xmp_client), uint32_t(args->controller_ptr),
uint32_t(args->locked_ptr));
xe::store_and_swap<uint32_t>(
memory_->TranslateVirtual(args->controller_ptr), 0);
xe::store_and_swap<uint32_t>(memory_->TranslateVirtual(args->locked_ptr),
0);
// Atrain spawns a thread 82437FD0 to call this in a tight loop forever.
xe::threading::Sleep(std::chrono::milliseconds(10));
return X_ERROR_SUCCESS;
}
case 0x00070029: {
// XMPGetPlaybackBehavior
assert_true(!buffer_length || buffer_length == 16);
uint32_t xmp_client = xe::load_and_swap<uint32_t>(buffer + 0);
uint32_t playback_mode_ptr = xe::load_and_swap<uint32_t>(buffer + 4);

View File

@ -273,14 +273,11 @@ dword_result_t NtQueryVirtualMemory(
return X_STATUS_INVALID_PARAMETER;
}
memory_basic_information_ptr->base_address =
static_cast<uint32_t>(alloc_info.base_address);
memory_basic_information_ptr->allocation_base =
static_cast<uint32_t>(alloc_info.allocation_base);
memory_basic_information_ptr->base_address = alloc_info.base_address;
memory_basic_information_ptr->allocation_base = alloc_info.allocation_base;
memory_basic_information_ptr->allocation_protect =
ToXdkProtectFlags(alloc_info.allocation_protect);
memory_basic_information_ptr->region_size =
static_cast<uint32_t>(alloc_info.region_size);
memory_basic_information_ptr->region_size = alloc_info.region_size;
uint32_t x_state = 0;
if (alloc_info.state & kMemoryAllocationReserve) {
x_state |= X_MEM_RESERVE;
@ -290,7 +287,7 @@ dword_result_t NtQueryVirtualMemory(
}
memory_basic_information_ptr->state = x_state;
memory_basic_information_ptr->protect = ToXdkProtectFlags(alloc_info.protect);
memory_basic_information_ptr->type = alloc_info.type;
memory_basic_information_ptr->type = X_MEM_PRIVATE;
return X_STATUS_SUCCESS;
}

View File

@ -171,12 +171,14 @@ bool Memory::Initialize() {
heaps_.vE0000000.Initialize(virtual_membase_, 0xE0000000, 0x1FD00000, 4096,
&heaps_.physical);
// Protect the first 64kb of memory.
// Protect the first and last 64kb of memory.
heaps_.v00000000.AllocFixed(
0x00000000, 64 * 1024, 64 * 1024,
0x00000000, 0x10000, 0x10000,
kMemoryAllocationReserve | kMemoryAllocationCommit,
!FLAGS_protect_zero ? kMemoryProtectRead | kMemoryProtectWrite
: kMemoryProtectNoAccess);
heaps_.physical.AllocFixed(0x1FFF0000, 0x10000, 0x10000,
kMemoryAllocationReserve, kMemoryProtectNoAccess);
// GPU writeback.
// 0xC... is physical, 0x7F... is virtual. We may need to overlay these.
@ -337,6 +339,8 @@ BaseHeap* Memory::LookupHeapByType(bool physical, uint32_t page_size) {
}
}
VirtualHeap* Memory::GetPhysicalHeap() { return &heaps_.physical; }
void Memory::Zero(uint32_t address, uint32_t size) {
std::memset(TranslateVirtual(address), 0, size);
}
@ -1094,16 +1098,19 @@ bool BaseHeap::QueryRegionInfo(uint32_t base_address,
out_info->region_size = 0;
out_info->state = 0;
out_info->protect = 0;
out_info->type = 0;
if (start_page_entry.state) {
// Committed/reserved region.
out_info->allocation_base = start_page_entry.base_address * page_size_;
out_info->allocation_protect = start_page_entry.allocation_protect;
out_info->allocation_size = start_page_entry.region_page_count * page_size_;
out_info->state = start_page_entry.state;
out_info->protect = start_page_entry.current_protect;
out_info->type = 0x20000;
// Scan forward and report the size of the region matching the initial
// base address's attributes.
for (uint32_t page_number = start_page_number;
page_number < start_page_number + start_page_entry.region_page_count;
page_number <
start_page_entry.base_address + start_page_entry.region_page_count;
++page_number) {
auto page_entry = page_table_[page_number];
if (page_entry.base_address != start_page_entry.base_address ||
@ -1142,6 +1149,20 @@ bool BaseHeap::QuerySize(uint32_t address, uint32_t* out_size) {
return true;
}
bool BaseHeap::QueryBaseAndSize(uint32_t* in_out_address, uint32_t* out_size) {
uint32_t page_number = (*in_out_address - heap_base_) / page_size_;
if (page_number > page_table_.size()) {
XELOGE("BaseHeap::QuerySize base page out of range");
*out_size = 0;
return false;
}
auto global_lock = global_critical_region_.Acquire();
auto page_entry = page_table_[page_number];
*in_out_address = (page_entry.base_address * page_size_);
*out_size = (page_entry.region_page_count * page_size_);
return true;
}
bool BaseHeap::QueryProtect(uint32_t address, uint32_t* out_protect) {
uint32_t page_number = (address - heap_base_) / page_size_;
if (page_number > page_table_.size()) {

View File

@ -56,6 +56,8 @@ struct HeapAllocationInfo {
uint32_t allocation_base;
// The memory protection option when the region was initially allocated.
uint32_t allocation_protect;
// The size specified when the region was initially allocated, in bytes.
uint32_t allocation_size;
// The size of the region beginning at the base address in which all pages
// have identical attributes, in bytes.
uint32_t region_size;
@ -63,8 +65,6 @@ struct HeapAllocationInfo {
uint32_t state;
// The access protection of the pages in the region.
uint32_t protect;
// The type of pages in the region (private).
uint32_t type;
};
// Describes a single page in the page table.
@ -144,6 +144,9 @@ class BaseHeap {
// Queries the size of the region containing the given address.
bool QuerySize(uint32_t address, uint32_t* out_size);
// Queries the base and size of a region containing the given address.
bool QueryBaseAndSize(uint32_t* in_out_address, uint32_t* out_size);
// Queries the current protection mode of the region containing the given
// address.
bool QueryProtect(uint32_t address, uint32_t* out_protect);
@ -332,6 +335,9 @@ class Memory {
// Gets the heap with the given properties.
BaseHeap* LookupHeapByType(bool physical, uint32_t page_size);
// Gets the physical base heap.
VirtualHeap* GetPhysicalHeap();
// Dumps a map of all allocated memory to the log.
void DumpMap();

View File

@ -42,7 +42,7 @@ CircularBuffer::CircularBuffer(VulkanDevice* device, VkBufferUsageFlags usage,
VkMemoryRequirements reqs;
vkGetBufferMemoryRequirements(*device_, gpu_buffer_, &reqs);
alignment_ = reqs.alignment;
alignment_ = xe::round_up(alignment, reqs.alignment);
}
CircularBuffer::~CircularBuffer() { Shutdown(); }

View File

@ -102,6 +102,7 @@ bool VulkanDevice::Initialize(DeviceInfo device_info) {
ENABLE_AND_EXPECT(shaderCullDistance);
ENABLE_AND_EXPECT(shaderStorageImageExtendedFormats);
ENABLE_AND_EXPECT(shaderTessellationAndGeometryPointSize);
ENABLE_AND_EXPECT(samplerAnisotropy);
ENABLE_AND_EXPECT(geometryShader);
ENABLE_AND_EXPECT(depthClamp);
ENABLE_AND_EXPECT(multiViewport);

View File

@ -26,10 +26,14 @@ namespace ui {
namespace vulkan {
#define VK_SAFE_DESTROY(fn, dev, obj, alloc) \
if (obj) { \
fn(dev, obj, alloc); \
obj = nullptr; \
}
\
do { \
if (obj) { \
fn(dev, obj, alloc); \
obj = nullptr; \
} \
\
} while (0)
class Fence {
public:

File diff suppressed because it is too large Load Diff

View File

@ -137,6 +137,10 @@ struct loader_layer_properties {
struct loader_name_value enable_env_var;
uint32_t num_component_layers;
char (*component_layer_names)[MAX_STRING_SIZE];
struct {
char enumerate_instance_extension_properties[MAX_STRING_SIZE];
char enumerate_instance_layer_properties[MAX_STRING_SIZE];
} pre_instance_functions;
};
struct loader_layer_list {
@ -375,7 +379,9 @@ static inline void loader_init_dispatch(void *obj, const void *data) {
// Global variables used across files
extern struct loader_struct loader;
extern THREAD_LOCAL_DECL struct loader_instance *tls_instance;
#if defined(_WIN32) && !defined(LOADER_DYNAMIC_LIB)
extern LOADER_PLATFORM_THREAD_ONCE_DEFINITION(once_init);
#endif
extern loader_platform_thread_mutex loader_lock;
extern loader_platform_thread_mutex loader_json_lock;
@ -437,6 +443,7 @@ void loader_scanned_icd_clear(const struct loader_instance *inst, struct loader_
VkResult loader_icd_scan(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list);
void loader_layer_scan(const struct loader_instance *inst, struct loader_layer_list *instance_layers);
void loader_implicit_layer_scan(const struct loader_instance *inst, struct loader_layer_list *instance_layers);
bool loader_is_implicit_layer_enabled(const struct loader_instance *inst, const struct loader_layer_properties *prop);
VkResult loader_get_icd_loader_instance_extensions(const struct loader_instance *inst, struct loader_icd_tramp_list *icd_tramp_list,
struct loader_extension_list *inst_exts);
struct loader_icd_term *loader_get_icd_and_device(const VkDevice device, struct loader_device **found_dev, uint32_t *icd_index);

View File

@ -26,6 +26,9 @@ project("vulkan-loader")
defines({
"VK_USE_PLATFORM_WIN32_KHR",
})
links({
"Cfgmgr32"
})
filter("platforms:not Windows")
removefiles("dirent_on_windows.c")
filter("platforms:Linux")

View File

@ -46,13 +46,8 @@ LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkI
void *addr;
addr = globalGetProcAddr(pName);
if (instance == VK_NULL_HANDLE) {
// Get entrypoint addresses that are global (no dispatchable object)
if (instance == VK_NULL_HANDLE || addr != NULL) {
return addr;
} else {
// If a global entrypoint return NULL
if (addr) return NULL;
}
struct loader_instance *ptr_instance = loader_get_instance(instance);
@ -99,123 +94,178 @@ LOADER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDev
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName,
uint32_t *pPropertyCount,
VkExtensionProperties *pProperties) {
struct loader_extension_list *global_ext_list = NULL;
struct loader_layer_list instance_layers;
struct loader_extension_list local_ext_list;
struct loader_icd_tramp_list icd_tramp_list;
uint32_t copy_size;
VkResult res = VK_SUCCESS;
tls_instance = NULL;
memset(&local_ext_list, 0, sizeof(local_ext_list));
memset(&instance_layers, 0, sizeof(instance_layers));
loader_platform_thread_once(&once_init, loader_initialize);
LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
// Get layer libraries if needed
if (pLayerName && strlen(pLayerName) != 0) {
if (vk_string_validate(MaxLoaderStringLength, pLayerName) != VK_STRING_ERROR_NONE) {
assert(VK_FALSE &&
"vkEnumerateInstanceExtensionProperties: "
"pLayerName is too long or is badly formed");
res = VK_ERROR_EXTENSION_NOT_PRESENT;
goto out;
// We know we need to call at least the terminator
VkResult res = VK_SUCCESS;
VkEnumerateInstanceExtensionPropertiesChain chain_tail = {
.header =
{
.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES,
.version = VK_CURRENT_CHAIN_VERSION,
.size = sizeof(chain_tail),
},
.pfnNextLayer = &terminator_EnumerateInstanceExtensionProperties,
.pNextLink = NULL,
};
VkEnumerateInstanceExtensionPropertiesChain *chain_head = &chain_tail;
// Get the implicit layers
struct loader_layer_list layers;
memset(&layers, 0, sizeof(layers));
loader_implicit_layer_scan(NULL, &layers);
// We'll need to save the dl handles so we can close them later
loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count);
if (libs == NULL) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
size_t lib_count = 0;
// Prepend layers onto the chain if they implment this entry point
for (uint32_t i = 0; i < layers.count; ++i) {
if (!loader_is_implicit_layer_enabled(NULL, layers.list + i) ||
layers.list[i].pre_instance_functions.enumerate_instance_extension_properties[0] == '\0') {
continue;
}
loader_layer_scan(NULL, &instance_layers);
for (uint32_t i = 0; i < instance_layers.count; i++) {
struct loader_layer_properties *props = &instance_layers.list[i];
if (strcmp(props->info.layerName, pLayerName) == 0) {
global_ext_list = &props->instance_extension_list;
break;
}
}
} else {
// Scan/discover all ICD libraries
memset(&icd_tramp_list, 0, sizeof(struct loader_icd_tramp_list));
res = loader_icd_scan(NULL, &icd_tramp_list);
if (VK_SUCCESS != res) {
goto out;
}
// Get extensions from all ICD's, merge so no duplicates
res = loader_get_icd_loader_instance_extensions(NULL, &icd_tramp_list, &local_ext_list);
if (VK_SUCCESS != res) {
goto out;
}
loader_scanned_icd_clear(NULL, &icd_tramp_list);
// Append implicit layers.
loader_implicit_layer_scan(NULL, &instance_layers);
for (uint32_t i = 0; i < instance_layers.count; i++) {
struct loader_extension_list *ext_list = &instance_layers.list[i].instance_extension_list;
loader_add_to_ext_list(NULL, &local_ext_list, ext_list->count, ext_list->list);
loader_platform_dl_handle layer_lib = loader_platform_open_library(layers.list[i].lib_name);
libs[lib_count++] = layer_lib;
void *pfn = loader_platform_get_proc_address(layer_lib,
layers.list[i].pre_instance_functions.enumerate_instance_extension_properties);
if (pfn == NULL) {
loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
"%s: Unable to resolve symbol \"%s\" in implicit layer library \"%s\"", __FUNCTION__,
layers.list[i].pre_instance_functions.enumerate_instance_extension_properties, layers.list[i].lib_name);
continue;
}
global_ext_list = &local_ext_list;
VkEnumerateInstanceExtensionPropertiesChain *chain_link = malloc(sizeof(VkEnumerateInstanceExtensionPropertiesChain));
if (chain_link == NULL) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
chain_link->header.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES;
chain_link->header.version = VK_CURRENT_CHAIN_VERSION;
chain_link->header.size = sizeof(*chain_link);
chain_link->pfnNextLayer = pfn;
chain_link->pNextLink = chain_head;
chain_head = chain_link;
}
if (global_ext_list == NULL) {
res = VK_ERROR_LAYER_NOT_PRESENT;
goto out;
// Call down the chain
if (res == VK_SUCCESS) {
res = chain_head->pfnNextLayer(chain_head->pNextLink, pLayerName, pPropertyCount, pProperties);
}
if (pProperties == NULL) {
*pPropertyCount = global_ext_list->count;
goto out;
// Free up the layers
loader_delete_layer_properties(NULL, &layers);
// Tear down the chain
while (chain_head != &chain_tail) {
VkEnumerateInstanceExtensionPropertiesChain *holder = chain_head;
chain_head = (VkEnumerateInstanceExtensionPropertiesChain *)chain_head->pNextLink;
free(holder);
}
copy_size = *pPropertyCount < global_ext_list->count ? *pPropertyCount : global_ext_list->count;
for (uint32_t i = 0; i < copy_size; i++) {
memcpy(&pProperties[i], &global_ext_list->list[i], sizeof(VkExtensionProperties));
// Close the dl handles
for (size_t i = 0; i < lib_count; ++i) {
loader_platform_close_library(libs[i]);
}
*pPropertyCount = copy_size;
free(libs);
if (copy_size < global_ext_list->count) {
res = VK_INCOMPLETE;
goto out;
}
out:
loader_destroy_generic_list(NULL, (struct loader_generic_list *)&local_ext_list);
loader_delete_layer_properties(NULL, &instance_layers);
return res;
}
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pPropertyCount,
VkLayerProperties *pProperties) {
VkResult result = VK_SUCCESS;
struct loader_layer_list instance_layer_list;
tls_instance = NULL;
LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
loader_platform_thread_once(&once_init, loader_initialize);
// We know we need to call at least the terminator
VkResult res = VK_SUCCESS;
VkEnumerateInstanceLayerPropertiesChain chain_tail = {
.header =
{
.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES,
.version = VK_CURRENT_CHAIN_VERSION,
.size = sizeof(chain_tail),
},
.pfnNextLayer = &terminator_EnumerateInstanceLayerProperties,
.pNextLink = NULL,
};
VkEnumerateInstanceLayerPropertiesChain *chain_head = &chain_tail;
uint32_t copy_size;
// Get the implicit layers
struct loader_layer_list layers;
memset(&layers, 0, sizeof(layers));
loader_implicit_layer_scan(NULL, &layers);
// Get layer libraries
memset(&instance_layer_list, 0, sizeof(instance_layer_list));
loader_layer_scan(NULL, &instance_layer_list);
// We'll need to save the dl handles so we can close them later
loader_platform_dl_handle *libs = malloc(sizeof(loader_platform_dl_handle) * layers.count);
if (libs == NULL) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
size_t lib_count = 0;
if (pProperties == NULL) {
*pPropertyCount = instance_layer_list.count;
goto out;
// Prepend layers onto the chain if they implment this entry point
for (uint32_t i = 0; i < layers.count; ++i) {
if (!loader_is_implicit_layer_enabled(NULL, layers.list + i) ||
layers.list[i].pre_instance_functions.enumerate_instance_layer_properties[0] == '\0') {
continue;
}
loader_platform_dl_handle layer_lib = loader_platform_open_library(layers.list[i].lib_name);
libs[lib_count++] = layer_lib;
void *pfn =
loader_platform_get_proc_address(layer_lib, layers.list[i].pre_instance_functions.enumerate_instance_layer_properties);
if (pfn == NULL) {
loader_log(NULL, VK_DEBUG_REPORT_WARNING_BIT_EXT, 0,
"%s: Unable to resolve symbol \"%s\" in implicit layer library \"%s\"", __FUNCTION__,
layers.list[i].pre_instance_functions.enumerate_instance_layer_properties, layers.list[i].lib_name);
continue;
}
VkEnumerateInstanceLayerPropertiesChain *chain_link = malloc(sizeof(VkEnumerateInstanceLayerPropertiesChain));
if (chain_link == NULL) {
res = VK_ERROR_OUT_OF_HOST_MEMORY;
break;
}
chain_link->header.type = VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES;
chain_link->header.version = VK_CURRENT_CHAIN_VERSION;
chain_link->header.size = sizeof(*chain_link);
chain_link->pfnNextLayer = pfn;
chain_link->pNextLink = chain_head;
chain_head = chain_link;
}
copy_size = (*pPropertyCount < instance_layer_list.count) ? *pPropertyCount : instance_layer_list.count;
for (uint32_t i = 0; i < copy_size; i++) {
memcpy(&pProperties[i], &instance_layer_list.list[i].info, sizeof(VkLayerProperties));
// Call down the chain
if (res == VK_SUCCESS) {
res = chain_head->pfnNextLayer(chain_head->pNextLink, pPropertyCount, pProperties);
}
*pPropertyCount = copy_size;
// Free up the layers
loader_delete_layer_properties(NULL, &layers);
if (copy_size < instance_layer_list.count) {
result = VK_INCOMPLETE;
goto out;
// Tear down the chain
while (chain_head != &chain_tail) {
VkEnumerateInstanceLayerPropertiesChain *holder = chain_head;
chain_head = (VkEnumerateInstanceLayerPropertiesChain *)chain_head->pNextLink;
free(holder);
}
out:
// Close the dl handles
for (size_t i = 0; i < lib_count; ++i) {
loader_platform_close_library(libs[i]);
}
free(libs);
loader_delete_layer_properties(NULL, &instance_layer_list);
return result;
return res;
}
LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCreateInfo *pCreateInfo,
@ -225,7 +275,7 @@ LOADER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkCreateInstance(const VkInstanceCr
bool loaderLocked = false;
VkResult res = VK_ERROR_INITIALIZATION_FAILED;
loader_platform_thread_once(&once_init, loader_initialize);
LOADER_PLATFORM_THREAD_ONCE(&once_init, loader_initialize);
// Fail if the requested Vulkan apiVersion is > 1.0 since the loader only supports 1.0.
// Having pCreateInfo == NULL, pCreateInfo->pApplication == NULL, or

View File

@ -0,0 +1,873 @@
#
# Copyright (c) 2017 The Khronos Group Inc.
# Copyright (c) 2017 Valve Corporation
# Copyright (c) 2017 LunarG, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Lenny Komow <lenny@lunarg.com>
#
# This code is used to pass on device (including physical device) extensions through the call chain. It must do this without
# creating a stack frame, because the actual parameters of the call are not known. Since the first parameter is known to be a
# VkPhysicalDevice or a dispatchable object it can unwrap the object, possibly overwriting the wrapped physical device, and then
# jump to the next function in the call chain
.intel_syntax noprefix
.include "gen_defines.asm"
.ifdef X86_64
.macro PhysDevExtTramp num
.global vkPhysDevExtTramp\num
vkPhysDevExtTramp\num:
mov rax, [rdi]
mov rdi, [rdi + PHYS_DEV_OFFSET_PHYS_DEV_TRAMP]
jmp [rax + (PHYS_DEV_OFFSET_INST_DISPATCH + (PTR_SIZE * \num))]
.endm
.macro PhysDevExtTermin num
.global vkPhysDevExtTermin\num
vkPhysDevExtTermin\num:
mov rax, [rdi + ICD_TERM_OFFSET_PHYS_DEV_TERM] # Store the loader_icd_term* in rax
cmp qword ptr [rax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * \num))], 0 # Check if the next function in the chain is NULL
je terminError\num # Go to the error section if it is NULL
mov rdi, [rdi + PHYS_DEV_OFFSET_PHYS_DEV_TERM] # Load the unwrapped VkPhysicalDevice into the first arg
jmp [rax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * \num))] # Jump to the next function in the chain
terminError\num:
sub rsp, 56 # Create the stack frame
mov rdi, [rax + INSTANCE_OFFSET_ICD_TERM] # Load the loader_instance into rdi (first arg)
mov r8, [rdi + (HASH_OFFSET_INSTANCE + (HASH_SIZE * \num) + FUNC_NAME_OFFSET_HASH)] # Load the func name into r8 (fifth arg)
lea rcx, termin_error_string@GOTPCREL # Load the error string into rcx (fourth arg)
xor edx, edx # Set rdx to zero (third arg)
lea esi, [rdx + VK_DEBUG_REPORT_ERROR_BIT_EXT] # Write the error logging bit to rsi (second arg)
call loader_log # Log the error message before we crash
add rsp, 56 # Clean up the stack frame
mov rax, 0
jmp rax # Crash intentionally by jumping to address zero
.endm
.macro DevExtTramp num
.global vkdev_ext\num
vkdev_ext\num:
mov rax, [rdi] # Dereference the handle to get the dispatch table
jmp [rax + (EXT_OFFSET_DEVICE_DISPATCH + (PTR_SIZE * \num))] # Jump to the appropriate call chain
.endm
.else
.macro PhysDevExtTramp num
.global vkPhysDevExtTramp\num
vkPhysDevExtTramp\num:
mov eax, [esp + 4] # Load the wrapped VkPhysicalDevice into eax
mov ecx, [eax + PHYS_DEV_OFFSET_PHYS_DEV_TRAMP] # Load the unwrapped VkPhysicalDevice into ecx
mov [esp + 4], ecx # Overwrite the wrapped VkPhysicalDevice with the unwrapped one (on the stack)
mov eax, [eax] # Dereference the wrapped VkPhysicalDevice to get the dispatch table in eax
jmp [eax + (PHYS_DEV_OFFSET_INST_DISPATCH + (PTR_SIZE * \num))] # Dereference the wrapped VkPhysicalDevice to get the dispatch table in eax
.endm
.macro PhysDevExtTermin num
.global vkPhysDevExtTermin\num
vkPhysDevExtTermin\num:
mov ecx, [esp + 4] # Move the wrapped VkPhysicalDevice into ecx
mov eax, [ecx + ICD_TERM_OFFSET_PHYS_DEV_TERM] # Store the loader_icd_term* in eax
cmp dword ptr [eax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * \num))], 0 # Check if the next function in the chain is NULL
je terminError\num # Go to the error section if it is NULL
mov ecx, [ecx + PHYS_DEV_OFFSET_PHYS_DEV_TERM] # Unwrap the VkPhysicalDevice in ecx
mov [esp + 4], ecx # Copy the unwrapped VkPhysicalDevice into the first arg
jmp [eax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * \num))] # Jump to the next function in the chain
terminError\num:
mov eax, [eax + INSTANCE_OFFSET_ICD_TERM] # Load the loader_instance into eax
push [eax + (HASH_OFFSET_INSTANCE + (HASH_SIZE * \num) + FUNC_NAME_OFFSET_HASH)] # Push the func name (fifth arg)
push offset termin_error_string@GOT # Push the error string (fourth arg)
push 0 # Push zero (third arg)
push VK_DEBUG_REPORT_ERROR_BIT_EXT # Push the error logging bit (second arg)
push eax # Push the loader_instance (first arg)
call loader_log # Log the error message before we crash
add esp, 20 # Clean up the args
mov eax, 0
jmp eax # Crash intentionally by jumping to address zero
.endm
.macro DevExtTramp num
.global vkdev_ext\num
vkdev_ext\num:
mov eax, [esp + 4] # Dereference the handle to get the dispatch table
jmp [eax + (EXT_OFFSET_DEVICE_DISPATCH + (PTR_SIZE * \num))] # Jump to the appropriate call chain
.endm
.endif
#if defined(__ELF__)
.section .note.GNU-stack,"",%progbits
#endif
.data
termin_error_string:
.string "Extension %s not supported for this physical device"
.text
PhysDevExtTramp 0
PhysDevExtTramp 1
PhysDevExtTramp 2
PhysDevExtTramp 3
PhysDevExtTramp 4
PhysDevExtTramp 5
PhysDevExtTramp 6
PhysDevExtTramp 7
PhysDevExtTramp 8
PhysDevExtTramp 9
PhysDevExtTramp 10
PhysDevExtTramp 11
PhysDevExtTramp 12
PhysDevExtTramp 13
PhysDevExtTramp 14
PhysDevExtTramp 15
PhysDevExtTramp 16
PhysDevExtTramp 17
PhysDevExtTramp 18
PhysDevExtTramp 19
PhysDevExtTramp 20
PhysDevExtTramp 21
PhysDevExtTramp 22
PhysDevExtTramp 23
PhysDevExtTramp 24
PhysDevExtTramp 25
PhysDevExtTramp 26
PhysDevExtTramp 27
PhysDevExtTramp 28
PhysDevExtTramp 29
PhysDevExtTramp 30
PhysDevExtTramp 31
PhysDevExtTramp 32
PhysDevExtTramp 33
PhysDevExtTramp 34
PhysDevExtTramp 35
PhysDevExtTramp 36
PhysDevExtTramp 37
PhysDevExtTramp 38
PhysDevExtTramp 39
PhysDevExtTramp 40
PhysDevExtTramp 41
PhysDevExtTramp 42
PhysDevExtTramp 43
PhysDevExtTramp 44
PhysDevExtTramp 45
PhysDevExtTramp 46
PhysDevExtTramp 47
PhysDevExtTramp 48
PhysDevExtTramp 49
PhysDevExtTramp 50
PhysDevExtTramp 51
PhysDevExtTramp 52
PhysDevExtTramp 53
PhysDevExtTramp 54
PhysDevExtTramp 55
PhysDevExtTramp 56
PhysDevExtTramp 57
PhysDevExtTramp 58
PhysDevExtTramp 59
PhysDevExtTramp 60
PhysDevExtTramp 61
PhysDevExtTramp 62
PhysDevExtTramp 63
PhysDevExtTramp 64
PhysDevExtTramp 65
PhysDevExtTramp 66
PhysDevExtTramp 67
PhysDevExtTramp 68
PhysDevExtTramp 69
PhysDevExtTramp 70
PhysDevExtTramp 71
PhysDevExtTramp 72
PhysDevExtTramp 73
PhysDevExtTramp 74
PhysDevExtTramp 75
PhysDevExtTramp 76
PhysDevExtTramp 77
PhysDevExtTramp 78
PhysDevExtTramp 79
PhysDevExtTramp 80
PhysDevExtTramp 81
PhysDevExtTramp 82
PhysDevExtTramp 83
PhysDevExtTramp 84
PhysDevExtTramp 85
PhysDevExtTramp 86
PhysDevExtTramp 87
PhysDevExtTramp 88
PhysDevExtTramp 89
PhysDevExtTramp 90
PhysDevExtTramp 91
PhysDevExtTramp 92
PhysDevExtTramp 93
PhysDevExtTramp 94
PhysDevExtTramp 95
PhysDevExtTramp 96
PhysDevExtTramp 97
PhysDevExtTramp 98
PhysDevExtTramp 99
PhysDevExtTramp 100
PhysDevExtTramp 101
PhysDevExtTramp 102
PhysDevExtTramp 103
PhysDevExtTramp 104
PhysDevExtTramp 105
PhysDevExtTramp 106
PhysDevExtTramp 107
PhysDevExtTramp 108
PhysDevExtTramp 109
PhysDevExtTramp 110
PhysDevExtTramp 111
PhysDevExtTramp 112
PhysDevExtTramp 113
PhysDevExtTramp 114
PhysDevExtTramp 115
PhysDevExtTramp 116
PhysDevExtTramp 117
PhysDevExtTramp 118
PhysDevExtTramp 119
PhysDevExtTramp 120
PhysDevExtTramp 121
PhysDevExtTramp 122
PhysDevExtTramp 123
PhysDevExtTramp 124
PhysDevExtTramp 125
PhysDevExtTramp 126
PhysDevExtTramp 127
PhysDevExtTramp 128
PhysDevExtTramp 129
PhysDevExtTramp 130
PhysDevExtTramp 131
PhysDevExtTramp 132
PhysDevExtTramp 133
PhysDevExtTramp 134
PhysDevExtTramp 135
PhysDevExtTramp 136
PhysDevExtTramp 137
PhysDevExtTramp 138
PhysDevExtTramp 139
PhysDevExtTramp 140
PhysDevExtTramp 141
PhysDevExtTramp 142
PhysDevExtTramp 143
PhysDevExtTramp 144
PhysDevExtTramp 145
PhysDevExtTramp 146
PhysDevExtTramp 147
PhysDevExtTramp 148
PhysDevExtTramp 149
PhysDevExtTramp 150
PhysDevExtTramp 151
PhysDevExtTramp 152
PhysDevExtTramp 153
PhysDevExtTramp 154
PhysDevExtTramp 155
PhysDevExtTramp 156
PhysDevExtTramp 157
PhysDevExtTramp 158
PhysDevExtTramp 159
PhysDevExtTramp 160
PhysDevExtTramp 161
PhysDevExtTramp 162
PhysDevExtTramp 163
PhysDevExtTramp 164
PhysDevExtTramp 165
PhysDevExtTramp 166
PhysDevExtTramp 167
PhysDevExtTramp 168
PhysDevExtTramp 169
PhysDevExtTramp 170
PhysDevExtTramp 171
PhysDevExtTramp 172
PhysDevExtTramp 173
PhysDevExtTramp 174
PhysDevExtTramp 175
PhysDevExtTramp 176
PhysDevExtTramp 177
PhysDevExtTramp 178
PhysDevExtTramp 179
PhysDevExtTramp 180
PhysDevExtTramp 181
PhysDevExtTramp 182
PhysDevExtTramp 183
PhysDevExtTramp 184
PhysDevExtTramp 185
PhysDevExtTramp 186
PhysDevExtTramp 187
PhysDevExtTramp 188
PhysDevExtTramp 189
PhysDevExtTramp 190
PhysDevExtTramp 191
PhysDevExtTramp 192
PhysDevExtTramp 193
PhysDevExtTramp 194
PhysDevExtTramp 195
PhysDevExtTramp 196
PhysDevExtTramp 197
PhysDevExtTramp 198
PhysDevExtTramp 199
PhysDevExtTramp 200
PhysDevExtTramp 201
PhysDevExtTramp 202
PhysDevExtTramp 203
PhysDevExtTramp 204
PhysDevExtTramp 205
PhysDevExtTramp 206
PhysDevExtTramp 207
PhysDevExtTramp 208
PhysDevExtTramp 209
PhysDevExtTramp 210
PhysDevExtTramp 211
PhysDevExtTramp 212
PhysDevExtTramp 213
PhysDevExtTramp 214
PhysDevExtTramp 215
PhysDevExtTramp 216
PhysDevExtTramp 217
PhysDevExtTramp 218
PhysDevExtTramp 219
PhysDevExtTramp 220
PhysDevExtTramp 221
PhysDevExtTramp 222
PhysDevExtTramp 223
PhysDevExtTramp 224
PhysDevExtTramp 225
PhysDevExtTramp 226
PhysDevExtTramp 227
PhysDevExtTramp 228
PhysDevExtTramp 229
PhysDevExtTramp 230
PhysDevExtTramp 231
PhysDevExtTramp 232
PhysDevExtTramp 233
PhysDevExtTramp 234
PhysDevExtTramp 235
PhysDevExtTramp 236
PhysDevExtTramp 237
PhysDevExtTramp 238
PhysDevExtTramp 239
PhysDevExtTramp 240
PhysDevExtTramp 241
PhysDevExtTramp 242
PhysDevExtTramp 243
PhysDevExtTramp 244
PhysDevExtTramp 245
PhysDevExtTramp 246
PhysDevExtTramp 247
PhysDevExtTramp 248
PhysDevExtTramp 249
PhysDevExtTermin 0
PhysDevExtTermin 1
PhysDevExtTermin 2
PhysDevExtTermin 3
PhysDevExtTermin 4
PhysDevExtTermin 5
PhysDevExtTermin 6
PhysDevExtTermin 7
PhysDevExtTermin 8
PhysDevExtTermin 9
PhysDevExtTermin 10
PhysDevExtTermin 11
PhysDevExtTermin 12
PhysDevExtTermin 13
PhysDevExtTermin 14
PhysDevExtTermin 15
PhysDevExtTermin 16
PhysDevExtTermin 17
PhysDevExtTermin 18
PhysDevExtTermin 19
PhysDevExtTermin 20
PhysDevExtTermin 21
PhysDevExtTermin 22
PhysDevExtTermin 23
PhysDevExtTermin 24
PhysDevExtTermin 25
PhysDevExtTermin 26
PhysDevExtTermin 27
PhysDevExtTermin 28
PhysDevExtTermin 29
PhysDevExtTermin 30
PhysDevExtTermin 31
PhysDevExtTermin 32
PhysDevExtTermin 33
PhysDevExtTermin 34
PhysDevExtTermin 35
PhysDevExtTermin 36
PhysDevExtTermin 37
PhysDevExtTermin 38
PhysDevExtTermin 39
PhysDevExtTermin 40
PhysDevExtTermin 41
PhysDevExtTermin 42
PhysDevExtTermin 43
PhysDevExtTermin 44
PhysDevExtTermin 45
PhysDevExtTermin 46
PhysDevExtTermin 47
PhysDevExtTermin 48
PhysDevExtTermin 49
PhysDevExtTermin 50
PhysDevExtTermin 51
PhysDevExtTermin 52
PhysDevExtTermin 53
PhysDevExtTermin 54
PhysDevExtTermin 55
PhysDevExtTermin 56
PhysDevExtTermin 57
PhysDevExtTermin 58
PhysDevExtTermin 59
PhysDevExtTermin 60
PhysDevExtTermin 61
PhysDevExtTermin 62
PhysDevExtTermin 63
PhysDevExtTermin 64
PhysDevExtTermin 65
PhysDevExtTermin 66
PhysDevExtTermin 67
PhysDevExtTermin 68
PhysDevExtTermin 69
PhysDevExtTermin 70
PhysDevExtTermin 71
PhysDevExtTermin 72
PhysDevExtTermin 73
PhysDevExtTermin 74
PhysDevExtTermin 75
PhysDevExtTermin 76
PhysDevExtTermin 77
PhysDevExtTermin 78
PhysDevExtTermin 79
PhysDevExtTermin 80
PhysDevExtTermin 81
PhysDevExtTermin 82
PhysDevExtTermin 83
PhysDevExtTermin 84
PhysDevExtTermin 85
PhysDevExtTermin 86
PhysDevExtTermin 87
PhysDevExtTermin 88
PhysDevExtTermin 89
PhysDevExtTermin 90
PhysDevExtTermin 91
PhysDevExtTermin 92
PhysDevExtTermin 93
PhysDevExtTermin 94
PhysDevExtTermin 95
PhysDevExtTermin 96
PhysDevExtTermin 97
PhysDevExtTermin 98
PhysDevExtTermin 99
PhysDevExtTermin 100
PhysDevExtTermin 101
PhysDevExtTermin 102
PhysDevExtTermin 103
PhysDevExtTermin 104
PhysDevExtTermin 105
PhysDevExtTermin 106
PhysDevExtTermin 107
PhysDevExtTermin 108
PhysDevExtTermin 109
PhysDevExtTermin 110
PhysDevExtTermin 111
PhysDevExtTermin 112
PhysDevExtTermin 113
PhysDevExtTermin 114
PhysDevExtTermin 115
PhysDevExtTermin 116
PhysDevExtTermin 117
PhysDevExtTermin 118
PhysDevExtTermin 119
PhysDevExtTermin 120
PhysDevExtTermin 121
PhysDevExtTermin 122
PhysDevExtTermin 123
PhysDevExtTermin 124
PhysDevExtTermin 125
PhysDevExtTermin 126
PhysDevExtTermin 127
PhysDevExtTermin 128
PhysDevExtTermin 129
PhysDevExtTermin 130
PhysDevExtTermin 131
PhysDevExtTermin 132
PhysDevExtTermin 133
PhysDevExtTermin 134
PhysDevExtTermin 135
PhysDevExtTermin 136
PhysDevExtTermin 137
PhysDevExtTermin 138
PhysDevExtTermin 139
PhysDevExtTermin 140
PhysDevExtTermin 141
PhysDevExtTermin 142
PhysDevExtTermin 143
PhysDevExtTermin 144
PhysDevExtTermin 145
PhysDevExtTermin 146
PhysDevExtTermin 147
PhysDevExtTermin 148
PhysDevExtTermin 149
PhysDevExtTermin 150
PhysDevExtTermin 151
PhysDevExtTermin 152
PhysDevExtTermin 153
PhysDevExtTermin 154
PhysDevExtTermin 155
PhysDevExtTermin 156
PhysDevExtTermin 157
PhysDevExtTermin 158
PhysDevExtTermin 159
PhysDevExtTermin 160
PhysDevExtTermin 161
PhysDevExtTermin 162
PhysDevExtTermin 163
PhysDevExtTermin 164
PhysDevExtTermin 165
PhysDevExtTermin 166
PhysDevExtTermin 167
PhysDevExtTermin 168
PhysDevExtTermin 169
PhysDevExtTermin 170
PhysDevExtTermin 171
PhysDevExtTermin 172
PhysDevExtTermin 173
PhysDevExtTermin 174
PhysDevExtTermin 175
PhysDevExtTermin 176
PhysDevExtTermin 177
PhysDevExtTermin 178
PhysDevExtTermin 179
PhysDevExtTermin 180
PhysDevExtTermin 181
PhysDevExtTermin 182
PhysDevExtTermin 183
PhysDevExtTermin 184
PhysDevExtTermin 185
PhysDevExtTermin 186
PhysDevExtTermin 187
PhysDevExtTermin 188
PhysDevExtTermin 189
PhysDevExtTermin 190
PhysDevExtTermin 191
PhysDevExtTermin 192
PhysDevExtTermin 193
PhysDevExtTermin 194
PhysDevExtTermin 195
PhysDevExtTermin 196
PhysDevExtTermin 197
PhysDevExtTermin 198
PhysDevExtTermin 199
PhysDevExtTermin 200
PhysDevExtTermin 201
PhysDevExtTermin 202
PhysDevExtTermin 203
PhysDevExtTermin 204
PhysDevExtTermin 205
PhysDevExtTermin 206
PhysDevExtTermin 207
PhysDevExtTermin 208
PhysDevExtTermin 209
PhysDevExtTermin 210
PhysDevExtTermin 211
PhysDevExtTermin 212
PhysDevExtTermin 213
PhysDevExtTermin 214
PhysDevExtTermin 215
PhysDevExtTermin 216
PhysDevExtTermin 217
PhysDevExtTermin 218
PhysDevExtTermin 219
PhysDevExtTermin 220
PhysDevExtTermin 221
PhysDevExtTermin 222
PhysDevExtTermin 223
PhysDevExtTermin 224
PhysDevExtTermin 225
PhysDevExtTermin 226
PhysDevExtTermin 227
PhysDevExtTermin 228
PhysDevExtTermin 229
PhysDevExtTermin 230
PhysDevExtTermin 231
PhysDevExtTermin 232
PhysDevExtTermin 233
PhysDevExtTermin 234
PhysDevExtTermin 235
PhysDevExtTermin 236
PhysDevExtTermin 237
PhysDevExtTermin 238
PhysDevExtTermin 239
PhysDevExtTermin 240
PhysDevExtTermin 241
PhysDevExtTermin 242
PhysDevExtTermin 243
PhysDevExtTermin 244
PhysDevExtTermin 245
PhysDevExtTermin 246
PhysDevExtTermin 247
PhysDevExtTermin 248
PhysDevExtTermin 249
DevExtTramp 0
DevExtTramp 1
DevExtTramp 2
DevExtTramp 3
DevExtTramp 4
DevExtTramp 5
DevExtTramp 6
DevExtTramp 7
DevExtTramp 8
DevExtTramp 9
DevExtTramp 10
DevExtTramp 11
DevExtTramp 12
DevExtTramp 13
DevExtTramp 14
DevExtTramp 15
DevExtTramp 16
DevExtTramp 17
DevExtTramp 18
DevExtTramp 19
DevExtTramp 20
DevExtTramp 21
DevExtTramp 22
DevExtTramp 23
DevExtTramp 24
DevExtTramp 25
DevExtTramp 26
DevExtTramp 27
DevExtTramp 28
DevExtTramp 29
DevExtTramp 30
DevExtTramp 31
DevExtTramp 32
DevExtTramp 33
DevExtTramp 34
DevExtTramp 35
DevExtTramp 36
DevExtTramp 37
DevExtTramp 38
DevExtTramp 39
DevExtTramp 40
DevExtTramp 41
DevExtTramp 42
DevExtTramp 43
DevExtTramp 44
DevExtTramp 45
DevExtTramp 46
DevExtTramp 47
DevExtTramp 48
DevExtTramp 49
DevExtTramp 50
DevExtTramp 51
DevExtTramp 52
DevExtTramp 53
DevExtTramp 54
DevExtTramp 55
DevExtTramp 56
DevExtTramp 57
DevExtTramp 58
DevExtTramp 59
DevExtTramp 60
DevExtTramp 61
DevExtTramp 62
DevExtTramp 63
DevExtTramp 64
DevExtTramp 65
DevExtTramp 66
DevExtTramp 67
DevExtTramp 68
DevExtTramp 69
DevExtTramp 70
DevExtTramp 71
DevExtTramp 72
DevExtTramp 73
DevExtTramp 74
DevExtTramp 75
DevExtTramp 76
DevExtTramp 77
DevExtTramp 78
DevExtTramp 79
DevExtTramp 80
DevExtTramp 81
DevExtTramp 82
DevExtTramp 83
DevExtTramp 84
DevExtTramp 85
DevExtTramp 86
DevExtTramp 87
DevExtTramp 88
DevExtTramp 89
DevExtTramp 90
DevExtTramp 91
DevExtTramp 92
DevExtTramp 93
DevExtTramp 94
DevExtTramp 95
DevExtTramp 96
DevExtTramp 97
DevExtTramp 98
DevExtTramp 99
DevExtTramp 100
DevExtTramp 101
DevExtTramp 102
DevExtTramp 103
DevExtTramp 104
DevExtTramp 105
DevExtTramp 106
DevExtTramp 107
DevExtTramp 108
DevExtTramp 109
DevExtTramp 110
DevExtTramp 111
DevExtTramp 112
DevExtTramp 113
DevExtTramp 114
DevExtTramp 115
DevExtTramp 116
DevExtTramp 117
DevExtTramp 118
DevExtTramp 119
DevExtTramp 120
DevExtTramp 121
DevExtTramp 122
DevExtTramp 123
DevExtTramp 124
DevExtTramp 125
DevExtTramp 126
DevExtTramp 127
DevExtTramp 128
DevExtTramp 129
DevExtTramp 130
DevExtTramp 131
DevExtTramp 132
DevExtTramp 133
DevExtTramp 134
DevExtTramp 135
DevExtTramp 136
DevExtTramp 137
DevExtTramp 138
DevExtTramp 139
DevExtTramp 140
DevExtTramp 141
DevExtTramp 142
DevExtTramp 143
DevExtTramp 144
DevExtTramp 145
DevExtTramp 146
DevExtTramp 147
DevExtTramp 148
DevExtTramp 149
DevExtTramp 150
DevExtTramp 151
DevExtTramp 152
DevExtTramp 153
DevExtTramp 154
DevExtTramp 155
DevExtTramp 156
DevExtTramp 157
DevExtTramp 158
DevExtTramp 159
DevExtTramp 160
DevExtTramp 161
DevExtTramp 162
DevExtTramp 163
DevExtTramp 164
DevExtTramp 165
DevExtTramp 166
DevExtTramp 167
DevExtTramp 168
DevExtTramp 169
DevExtTramp 170
DevExtTramp 171
DevExtTramp 172
DevExtTramp 173
DevExtTramp 174
DevExtTramp 175
DevExtTramp 176
DevExtTramp 177
DevExtTramp 178
DevExtTramp 179
DevExtTramp 180
DevExtTramp 181
DevExtTramp 182
DevExtTramp 183
DevExtTramp 184
DevExtTramp 185
DevExtTramp 186
DevExtTramp 187
DevExtTramp 188
DevExtTramp 189
DevExtTramp 190
DevExtTramp 191
DevExtTramp 192
DevExtTramp 193
DevExtTramp 194
DevExtTramp 195
DevExtTramp 196
DevExtTramp 197
DevExtTramp 198
DevExtTramp 199
DevExtTramp 200
DevExtTramp 201
DevExtTramp 202
DevExtTramp 203
DevExtTramp 204
DevExtTramp 205
DevExtTramp 206
DevExtTramp 207
DevExtTramp 208
DevExtTramp 209
DevExtTramp 210
DevExtTramp 211
DevExtTramp 212
DevExtTramp 213
DevExtTramp 214
DevExtTramp 215
DevExtTramp 216
DevExtTramp 217
DevExtTramp 218
DevExtTramp 219
DevExtTramp 220
DevExtTramp 221
DevExtTramp 222
DevExtTramp 223
DevExtTramp 224
DevExtTramp 225
DevExtTramp 226
DevExtTramp 227
DevExtTramp 228
DevExtTramp 229
DevExtTramp 230
DevExtTramp 231
DevExtTramp 232
DevExtTramp 233
DevExtTramp 234
DevExtTramp 235
DevExtTramp 236
DevExtTramp 237
DevExtTramp 238
DevExtTramp 239
DevExtTramp 240
DevExtTramp 241
DevExtTramp 242
DevExtTramp 243
DevExtTramp 244
DevExtTramp 245
DevExtTramp 246
DevExtTramp 247
DevExtTramp 248
DevExtTramp 249

View File

@ -0,0 +1,883 @@
;
; Copyright (c) 2017 The Khronos Group Inc.
; Copyright (c) 2017 Valve Corporation
; Copyright (c) 2017 LunarG, Inc.
;
; Licensed under the Apache License, Version 2.0 (the "License");
; you may not use this file except in compliance with the License.
; You may obtain a copy of the License at
;
; http://www.apache.org/licenses/LICENSE-2.0
;
; Unless required by applicable law or agreed to in writing, software
; distributed under the License is distributed on an "AS IS" BASIS,
; WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
; See the License for the specific language governing permissions and
; limitations under the License.
;
; Author: Lenny Komow <lenny@lunarg.com>
;
; This code is used to pass on device (including physical device) extensions through the call chain. It must do this without
; creating a stack frame, because the actual parameters of the call are not known. Since the first parameter is known to be a
; VkPhysicalDevice or a dispatchable object it can unwrap the object, possibly overwriting the wrapped physical device, and then
; jump to the next function in the call chain
; Codegen defines a number of values, chiefly offsets of members within structs and sizes of data types within gen_defines.asm.
; Struct member offsets are defined in the format "XX_OFFSET_YY" where XX indicates the member within the struct and YY indicates
; the struct type that it is a member of. Data type sizes are defined in the format "XX_SIZE" where XX indicates the data type.
INCLUDE gen_defines.asm
; 64-bit values and macro
IFDEF rax
PhysDevExtTramp macro num:req
public vkPhysDevExtTramp&num&
vkPhysDevExtTramp&num&:
mov rax, qword ptr [rcx] ; Dereference the wrapped VkPhysicalDevice to get the dispatch table in rax
mov rcx, qword ptr [rcx + PHYS_DEV_OFFSET_PHYS_DEV_TRAMP] ; Load the unwrapped VkPhysicalDevice into rcx
jmp qword ptr [rax + (PHYS_DEV_OFFSET_INST_DISPATCH + (PTR_SIZE * num))] ; Jump to the next function in the chain, preserving the args in other registers
endm
PhysDevExtTermin macro num
public vkPhysDevExtTermin&num&
vkPhysDevExtTermin&num&:
mov rax, qword ptr [rcx + ICD_TERM_OFFSET_PHYS_DEV_TERM] ; Store the loader_icd_term* in rax
cmp qword ptr [rax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * num))], 0 ; Check if the next function in the chain is NULL
je terminError&num& ; Go to the error section if it is NULL
mov rcx, qword ptr [rcx + PHYS_DEV_OFFSET_PHYS_DEV_TERM] ; Load the unwrapped VkPhysicalDevice into the first arg
jmp qword ptr [rax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * num))] ; Jump to the next function in the chain
terminError&num&:
sub rsp, 56 ; Create the stack frame
mov rcx, qword ptr [rax + INSTANCE_OFFSET_ICD_TERM] ; Load the loader_instance into rcx (first arg)
mov rax, qword ptr [rcx + (HASH_OFFSET_INSTANCE + (HASH_SIZE * num) + FUNC_NAME_OFFSET_HASH)] ; Load the func name into rax
lea r9, termin_error_string ; Load the error string into r9 (fourth arg)
xor r8d, r8d ; Set r8 to zero (third arg)
mov qword ptr [rsp + 32], rax ; Move the func name onto the stack (fifth arg)
lea edx, [r8 + VK_DEBUG_REPORT_ERROR_BIT_EXT] ; Write the error logging bit to rdx (second arg)
call loader_log ; Log the error message before we crash
add rsp, 56 ; Clean up the stack frame
mov rax, 0
jmp rax ; Crash intentionally by jumping to address zero
endm
DevExtTramp macro num
public vkdev_ext&num&
vkdev_ext&num&:
mov rax, qword ptr [rcx] ; Dereference the handle to get the dispatch table
jmp qword ptr [rax + (EXT_OFFSET_DEVICE_DISPATCH + (PTR_SIZE * num))] ; Jump to the appropriate call chain
endm
; 32-bit values and macro
ELSE
PhysDevExtTramp macro num
public _vkPhysDevExtTramp&num&@4
_vkPhysDevExtTramp&num&@4:
mov eax, dword ptr [esp + 4] ; Load the wrapped VkPhysicalDevice into eax
mov ecx, [eax + PHYS_DEV_OFFSET_PHYS_DEV_TRAMP] ; Load the unwrapped VkPhysicalDevice into ecx
mov [esp + 4], ecx ; Overwrite the wrapped VkPhysicalDevice with the unwrapped one (on the stack)
mov eax, [eax] ; Dereference the wrapped VkPhysicalDevice to get the dispatch table in eax
jmp dword ptr [eax + (PHYS_DEV_OFFSET_INST_DISPATCH + (PTR_SIZE * num))] ; Jump to the next function in the chain, preserving the args on the stack
endm
PhysDevExtTermin macro num
public _vkPhysDevExtTermin&num&@4
_vkPhysDevExtTermin&num&@4:
mov ecx, dword ptr [esp + 4] ; Move the wrapped VkPhysicalDevice into ecx
mov eax, dword ptr [ecx + ICD_TERM_OFFSET_PHYS_DEV_TERM] ; Store the loader_icd_term* in eax
cmp dword ptr [eax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * num))], 0 ; Check if the next function in the chain is NULL
je terminError&num& ; Go to the error section if it is NULL
mov ecx, dword ptr [ecx + PHYS_DEV_OFFSET_PHYS_DEV_TERM] ; Unwrap the VkPhysicalDevice in ecx
mov dword ptr [esp + 4], ecx ; Copy the unwrapped VkPhysicalDevice into the first arg
jmp dword ptr [eax + (DISPATCH_OFFSET_ICD_TERM + (PTR_SIZE * num))] ; Jump to the next function in the chain
terminError&num&:
mov eax, dword ptr [eax + INSTANCE_OFFSET_ICD_TERM] ; Load the loader_instance into eax
push dword ptr [eax + (HASH_OFFSET_INSTANCE + (HASH_SIZE * num) + FUNC_NAME_OFFSET_HASH)] ; Push the func name (fifth arg)
push offset termin_error_string ; Push the error string (fourth arg)
push 0 ; Push zero (third arg)
push VK_DEBUG_REPORT_ERROR_BIT_EXT ; Push the error logging bit (second arg)
push eax ; Push the loader_instance (first arg)
call _loader_log ; Log the error message before we crash
add esp, 20 ; Clean up the args
mov eax, 0
jmp eax ; Crash intentionally by jumping to address zero
endm
DevExtTramp macro num
public _vkdev_ext&num&@4
_vkdev_ext&num&@4:
mov eax, dword ptr [esp + 4] ; Dereference the handle to get the dispatch table
jmp dword ptr [eax + (EXT_OFFSET_DEVICE_DISPATCH + (PTR_SIZE * num))] ; Jump to the appropriate call chain
endm
; This is also needed for 32-bit only
.model flat
ENDIF
.const
termin_error_string db 'Extension %s not supported for this physical device', 0
.code
IFDEF rax
extrn loader_log:near
ELSE
extrn _loader_log:near
ENDIF
PhysDevExtTramp 0
PhysDevExtTramp 1
PhysDevExtTramp 2
PhysDevExtTramp 3
PhysDevExtTramp 4
PhysDevExtTramp 5
PhysDevExtTramp 6
PhysDevExtTramp 7
PhysDevExtTramp 8
PhysDevExtTramp 9
PhysDevExtTramp 10
PhysDevExtTramp 11
PhysDevExtTramp 12
PhysDevExtTramp 13
PhysDevExtTramp 14
PhysDevExtTramp 15
PhysDevExtTramp 16
PhysDevExtTramp 17
PhysDevExtTramp 18
PhysDevExtTramp 19
PhysDevExtTramp 20
PhysDevExtTramp 21
PhysDevExtTramp 22
PhysDevExtTramp 23
PhysDevExtTramp 24
PhysDevExtTramp 25
PhysDevExtTramp 26
PhysDevExtTramp 27
PhysDevExtTramp 28
PhysDevExtTramp 29
PhysDevExtTramp 30
PhysDevExtTramp 31
PhysDevExtTramp 32
PhysDevExtTramp 33
PhysDevExtTramp 34
PhysDevExtTramp 35
PhysDevExtTramp 36
PhysDevExtTramp 37
PhysDevExtTramp 38
PhysDevExtTramp 39
PhysDevExtTramp 40
PhysDevExtTramp 41
PhysDevExtTramp 42
PhysDevExtTramp 43
PhysDevExtTramp 44
PhysDevExtTramp 45
PhysDevExtTramp 46
PhysDevExtTramp 47
PhysDevExtTramp 48
PhysDevExtTramp 49
PhysDevExtTramp 50
PhysDevExtTramp 51
PhysDevExtTramp 52
PhysDevExtTramp 53
PhysDevExtTramp 54
PhysDevExtTramp 55
PhysDevExtTramp 56
PhysDevExtTramp 57
PhysDevExtTramp 58
PhysDevExtTramp 59
PhysDevExtTramp 60
PhysDevExtTramp 61
PhysDevExtTramp 62
PhysDevExtTramp 63
PhysDevExtTramp 64
PhysDevExtTramp 65
PhysDevExtTramp 66
PhysDevExtTramp 67
PhysDevExtTramp 68
PhysDevExtTramp 69
PhysDevExtTramp 70
PhysDevExtTramp 71
PhysDevExtTramp 72
PhysDevExtTramp 73
PhysDevExtTramp 74
PhysDevExtTramp 75
PhysDevExtTramp 76
PhysDevExtTramp 77
PhysDevExtTramp 78
PhysDevExtTramp 79
PhysDevExtTramp 80
PhysDevExtTramp 81
PhysDevExtTramp 82
PhysDevExtTramp 83
PhysDevExtTramp 84
PhysDevExtTramp 85
PhysDevExtTramp 86
PhysDevExtTramp 87
PhysDevExtTramp 88
PhysDevExtTramp 89
PhysDevExtTramp 90
PhysDevExtTramp 91
PhysDevExtTramp 92
PhysDevExtTramp 93
PhysDevExtTramp 94
PhysDevExtTramp 95
PhysDevExtTramp 96
PhysDevExtTramp 97
PhysDevExtTramp 98
PhysDevExtTramp 99
PhysDevExtTramp 100
PhysDevExtTramp 101
PhysDevExtTramp 102
PhysDevExtTramp 103
PhysDevExtTramp 104
PhysDevExtTramp 105
PhysDevExtTramp 106
PhysDevExtTramp 107
PhysDevExtTramp 108
PhysDevExtTramp 109
PhysDevExtTramp 110
PhysDevExtTramp 111
PhysDevExtTramp 112
PhysDevExtTramp 113
PhysDevExtTramp 114
PhysDevExtTramp 115
PhysDevExtTramp 116
PhysDevExtTramp 117
PhysDevExtTramp 118
PhysDevExtTramp 119
PhysDevExtTramp 120
PhysDevExtTramp 121
PhysDevExtTramp 122
PhysDevExtTramp 123
PhysDevExtTramp 124
PhysDevExtTramp 125
PhysDevExtTramp 126
PhysDevExtTramp 127
PhysDevExtTramp 128
PhysDevExtTramp 129
PhysDevExtTramp 130
PhysDevExtTramp 131
PhysDevExtTramp 132
PhysDevExtTramp 133
PhysDevExtTramp 134
PhysDevExtTramp 135
PhysDevExtTramp 136
PhysDevExtTramp 137
PhysDevExtTramp 138
PhysDevExtTramp 139
PhysDevExtTramp 140
PhysDevExtTramp 141
PhysDevExtTramp 142
PhysDevExtTramp 143
PhysDevExtTramp 144
PhysDevExtTramp 145
PhysDevExtTramp 146
PhysDevExtTramp 147
PhysDevExtTramp 148
PhysDevExtTramp 149
PhysDevExtTramp 150
PhysDevExtTramp 151
PhysDevExtTramp 152
PhysDevExtTramp 153
PhysDevExtTramp 154
PhysDevExtTramp 155
PhysDevExtTramp 156
PhysDevExtTramp 157
PhysDevExtTramp 158
PhysDevExtTramp 159
PhysDevExtTramp 160
PhysDevExtTramp 161
PhysDevExtTramp 162
PhysDevExtTramp 163
PhysDevExtTramp 164
PhysDevExtTramp 165
PhysDevExtTramp 166
PhysDevExtTramp 167
PhysDevExtTramp 168
PhysDevExtTramp 169
PhysDevExtTramp 170
PhysDevExtTramp 171
PhysDevExtTramp 172
PhysDevExtTramp 173
PhysDevExtTramp 174
PhysDevExtTramp 175
PhysDevExtTramp 176
PhysDevExtTramp 177
PhysDevExtTramp 178
PhysDevExtTramp 179
PhysDevExtTramp 180
PhysDevExtTramp 181
PhysDevExtTramp 182
PhysDevExtTramp 183
PhysDevExtTramp 184
PhysDevExtTramp 185
PhysDevExtTramp 186
PhysDevExtTramp 187
PhysDevExtTramp 188
PhysDevExtTramp 189
PhysDevExtTramp 190
PhysDevExtTramp 191
PhysDevExtTramp 192
PhysDevExtTramp 193
PhysDevExtTramp 194
PhysDevExtTramp 195
PhysDevExtTramp 196
PhysDevExtTramp 197
PhysDevExtTramp 198
PhysDevExtTramp 199
PhysDevExtTramp 200
PhysDevExtTramp 201
PhysDevExtTramp 202
PhysDevExtTramp 203
PhysDevExtTramp 204
PhysDevExtTramp 205
PhysDevExtTramp 206
PhysDevExtTramp 207
PhysDevExtTramp 208
PhysDevExtTramp 209
PhysDevExtTramp 210
PhysDevExtTramp 211
PhysDevExtTramp 212
PhysDevExtTramp 213
PhysDevExtTramp 214
PhysDevExtTramp 215
PhysDevExtTramp 216
PhysDevExtTramp 217
PhysDevExtTramp 218
PhysDevExtTramp 219
PhysDevExtTramp 220
PhysDevExtTramp 221
PhysDevExtTramp 222
PhysDevExtTramp 223
PhysDevExtTramp 224
PhysDevExtTramp 225
PhysDevExtTramp 226
PhysDevExtTramp 227
PhysDevExtTramp 228
PhysDevExtTramp 229
PhysDevExtTramp 230
PhysDevExtTramp 231
PhysDevExtTramp 232
PhysDevExtTramp 233
PhysDevExtTramp 234
PhysDevExtTramp 235
PhysDevExtTramp 236
PhysDevExtTramp 237
PhysDevExtTramp 238
PhysDevExtTramp 239
PhysDevExtTramp 240
PhysDevExtTramp 241
PhysDevExtTramp 242
PhysDevExtTramp 243
PhysDevExtTramp 244
PhysDevExtTramp 245
PhysDevExtTramp 246
PhysDevExtTramp 247
PhysDevExtTramp 248
PhysDevExtTramp 249
PhysDevExtTermin 0
PhysDevExtTermin 1
PhysDevExtTermin 2
PhysDevExtTermin 3
PhysDevExtTermin 4
PhysDevExtTermin 5
PhysDevExtTermin 6
PhysDevExtTermin 7
PhysDevExtTermin 8
PhysDevExtTermin 9
PhysDevExtTermin 10
PhysDevExtTermin 11
PhysDevExtTermin 12
PhysDevExtTermin 13
PhysDevExtTermin 14
PhysDevExtTermin 15
PhysDevExtTermin 16
PhysDevExtTermin 17
PhysDevExtTermin 18
PhysDevExtTermin 19
PhysDevExtTermin 20
PhysDevExtTermin 21
PhysDevExtTermin 22
PhysDevExtTermin 23
PhysDevExtTermin 24
PhysDevExtTermin 25
PhysDevExtTermin 26
PhysDevExtTermin 27
PhysDevExtTermin 28
PhysDevExtTermin 29
PhysDevExtTermin 30
PhysDevExtTermin 31
PhysDevExtTermin 32
PhysDevExtTermin 33
PhysDevExtTermin 34
PhysDevExtTermin 35
PhysDevExtTermin 36
PhysDevExtTermin 37
PhysDevExtTermin 38
PhysDevExtTermin 39
PhysDevExtTermin 40
PhysDevExtTermin 41
PhysDevExtTermin 42
PhysDevExtTermin 43
PhysDevExtTermin 44
PhysDevExtTermin 45
PhysDevExtTermin 46
PhysDevExtTermin 47
PhysDevExtTermin 48
PhysDevExtTermin 49
PhysDevExtTermin 50
PhysDevExtTermin 51
PhysDevExtTermin 52
PhysDevExtTermin 53
PhysDevExtTermin 54
PhysDevExtTermin 55
PhysDevExtTermin 56
PhysDevExtTermin 57
PhysDevExtTermin 58
PhysDevExtTermin 59
PhysDevExtTermin 60
PhysDevExtTermin 61
PhysDevExtTermin 62
PhysDevExtTermin 63
PhysDevExtTermin 64
PhysDevExtTermin 65
PhysDevExtTermin 66
PhysDevExtTermin 67
PhysDevExtTermin 68
PhysDevExtTermin 69
PhysDevExtTermin 70
PhysDevExtTermin 71
PhysDevExtTermin 72
PhysDevExtTermin 73
PhysDevExtTermin 74
PhysDevExtTermin 75
PhysDevExtTermin 76
PhysDevExtTermin 77
PhysDevExtTermin 78
PhysDevExtTermin 79
PhysDevExtTermin 80
PhysDevExtTermin 81
PhysDevExtTermin 82
PhysDevExtTermin 83
PhysDevExtTermin 84
PhysDevExtTermin 85
PhysDevExtTermin 86
PhysDevExtTermin 87
PhysDevExtTermin 88
PhysDevExtTermin 89
PhysDevExtTermin 90
PhysDevExtTermin 91
PhysDevExtTermin 92
PhysDevExtTermin 93
PhysDevExtTermin 94
PhysDevExtTermin 95
PhysDevExtTermin 96
PhysDevExtTermin 97
PhysDevExtTermin 98
PhysDevExtTermin 99
PhysDevExtTermin 100
PhysDevExtTermin 101
PhysDevExtTermin 102
PhysDevExtTermin 103
PhysDevExtTermin 104
PhysDevExtTermin 105
PhysDevExtTermin 106
PhysDevExtTermin 107
PhysDevExtTermin 108
PhysDevExtTermin 109
PhysDevExtTermin 110
PhysDevExtTermin 111
PhysDevExtTermin 112
PhysDevExtTermin 113
PhysDevExtTermin 114
PhysDevExtTermin 115
PhysDevExtTermin 116
PhysDevExtTermin 117
PhysDevExtTermin 118
PhysDevExtTermin 119
PhysDevExtTermin 120
PhysDevExtTermin 121
PhysDevExtTermin 122
PhysDevExtTermin 123
PhysDevExtTermin 124
PhysDevExtTermin 125
PhysDevExtTermin 126
PhysDevExtTermin 127
PhysDevExtTermin 128
PhysDevExtTermin 129
PhysDevExtTermin 130
PhysDevExtTermin 131
PhysDevExtTermin 132
PhysDevExtTermin 133
PhysDevExtTermin 134
PhysDevExtTermin 135
PhysDevExtTermin 136
PhysDevExtTermin 137
PhysDevExtTermin 138
PhysDevExtTermin 139
PhysDevExtTermin 140
PhysDevExtTermin 141
PhysDevExtTermin 142
PhysDevExtTermin 143
PhysDevExtTermin 144
PhysDevExtTermin 145
PhysDevExtTermin 146
PhysDevExtTermin 147
PhysDevExtTermin 148
PhysDevExtTermin 149
PhysDevExtTermin 150
PhysDevExtTermin 151
PhysDevExtTermin 152
PhysDevExtTermin 153
PhysDevExtTermin 154
PhysDevExtTermin 155
PhysDevExtTermin 156
PhysDevExtTermin 157
PhysDevExtTermin 158
PhysDevExtTermin 159
PhysDevExtTermin 160
PhysDevExtTermin 161
PhysDevExtTermin 162
PhysDevExtTermin 163
PhysDevExtTermin 164
PhysDevExtTermin 165
PhysDevExtTermin 166
PhysDevExtTermin 167
PhysDevExtTermin 168
PhysDevExtTermin 169
PhysDevExtTermin 170
PhysDevExtTermin 171
PhysDevExtTermin 172
PhysDevExtTermin 173
PhysDevExtTermin 174
PhysDevExtTermin 175
PhysDevExtTermin 176
PhysDevExtTermin 177
PhysDevExtTermin 178
PhysDevExtTermin 179
PhysDevExtTermin 180
PhysDevExtTermin 181
PhysDevExtTermin 182
PhysDevExtTermin 183
PhysDevExtTermin 184
PhysDevExtTermin 185
PhysDevExtTermin 186
PhysDevExtTermin 187
PhysDevExtTermin 188
PhysDevExtTermin 189
PhysDevExtTermin 190
PhysDevExtTermin 191
PhysDevExtTermin 192
PhysDevExtTermin 193
PhysDevExtTermin 194
PhysDevExtTermin 195
PhysDevExtTermin 196
PhysDevExtTermin 197
PhysDevExtTermin 198
PhysDevExtTermin 199
PhysDevExtTermin 200
PhysDevExtTermin 201
PhysDevExtTermin 202
PhysDevExtTermin 203
PhysDevExtTermin 204
PhysDevExtTermin 205
PhysDevExtTermin 206
PhysDevExtTermin 207
PhysDevExtTermin 208
PhysDevExtTermin 209
PhysDevExtTermin 210
PhysDevExtTermin 211
PhysDevExtTermin 212
PhysDevExtTermin 213
PhysDevExtTermin 214
PhysDevExtTermin 215
PhysDevExtTermin 216
PhysDevExtTermin 217
PhysDevExtTermin 218
PhysDevExtTermin 219
PhysDevExtTermin 220
PhysDevExtTermin 221
PhysDevExtTermin 222
PhysDevExtTermin 223
PhysDevExtTermin 224
PhysDevExtTermin 225
PhysDevExtTermin 226
PhysDevExtTermin 227
PhysDevExtTermin 228
PhysDevExtTermin 229
PhysDevExtTermin 230
PhysDevExtTermin 231
PhysDevExtTermin 232
PhysDevExtTermin 233
PhysDevExtTermin 234
PhysDevExtTermin 235
PhysDevExtTermin 236
PhysDevExtTermin 237
PhysDevExtTermin 238
PhysDevExtTermin 239
PhysDevExtTermin 240
PhysDevExtTermin 241
PhysDevExtTermin 242
PhysDevExtTermin 243
PhysDevExtTermin 244
PhysDevExtTermin 245
PhysDevExtTermin 246
PhysDevExtTermin 247
PhysDevExtTermin 248
PhysDevExtTermin 249
DevExtTramp 0
DevExtTramp 1
DevExtTramp 2
DevExtTramp 3
DevExtTramp 4
DevExtTramp 5
DevExtTramp 6
DevExtTramp 7
DevExtTramp 8
DevExtTramp 9
DevExtTramp 10
DevExtTramp 11
DevExtTramp 12
DevExtTramp 13
DevExtTramp 14
DevExtTramp 15
DevExtTramp 16
DevExtTramp 17
DevExtTramp 18
DevExtTramp 19
DevExtTramp 20
DevExtTramp 21
DevExtTramp 22
DevExtTramp 23
DevExtTramp 24
DevExtTramp 25
DevExtTramp 26
DevExtTramp 27
DevExtTramp 28
DevExtTramp 29
DevExtTramp 30
DevExtTramp 31
DevExtTramp 32
DevExtTramp 33
DevExtTramp 34
DevExtTramp 35
DevExtTramp 36
DevExtTramp 37
DevExtTramp 38
DevExtTramp 39
DevExtTramp 40
DevExtTramp 41
DevExtTramp 42
DevExtTramp 43
DevExtTramp 44
DevExtTramp 45
DevExtTramp 46
DevExtTramp 47
DevExtTramp 48
DevExtTramp 49
DevExtTramp 50
DevExtTramp 51
DevExtTramp 52
DevExtTramp 53
DevExtTramp 54
DevExtTramp 55
DevExtTramp 56
DevExtTramp 57
DevExtTramp 58
DevExtTramp 59
DevExtTramp 60
DevExtTramp 61
DevExtTramp 62
DevExtTramp 63
DevExtTramp 64
DevExtTramp 65
DevExtTramp 66
DevExtTramp 67
DevExtTramp 68
DevExtTramp 69
DevExtTramp 70
DevExtTramp 71
DevExtTramp 72
DevExtTramp 73
DevExtTramp 74
DevExtTramp 75
DevExtTramp 76
DevExtTramp 77
DevExtTramp 78
DevExtTramp 79
DevExtTramp 80
DevExtTramp 81
DevExtTramp 82
DevExtTramp 83
DevExtTramp 84
DevExtTramp 85
DevExtTramp 86
DevExtTramp 87
DevExtTramp 88
DevExtTramp 89
DevExtTramp 90
DevExtTramp 91
DevExtTramp 92
DevExtTramp 93
DevExtTramp 94
DevExtTramp 95
DevExtTramp 96
DevExtTramp 97
DevExtTramp 98
DevExtTramp 99
DevExtTramp 100
DevExtTramp 101
DevExtTramp 102
DevExtTramp 103
DevExtTramp 104
DevExtTramp 105
DevExtTramp 106
DevExtTramp 107
DevExtTramp 108
DevExtTramp 109
DevExtTramp 110
DevExtTramp 111
DevExtTramp 112
DevExtTramp 113
DevExtTramp 114
DevExtTramp 115
DevExtTramp 116
DevExtTramp 117
DevExtTramp 118
DevExtTramp 119
DevExtTramp 120
DevExtTramp 121
DevExtTramp 122
DevExtTramp 123
DevExtTramp 124
DevExtTramp 125
DevExtTramp 126
DevExtTramp 127
DevExtTramp 128
DevExtTramp 129
DevExtTramp 130
DevExtTramp 131
DevExtTramp 132
DevExtTramp 133
DevExtTramp 134
DevExtTramp 135
DevExtTramp 136
DevExtTramp 137
DevExtTramp 138
DevExtTramp 139
DevExtTramp 140
DevExtTramp 141
DevExtTramp 142
DevExtTramp 143
DevExtTramp 144
DevExtTramp 145
DevExtTramp 146
DevExtTramp 147
DevExtTramp 148
DevExtTramp 149
DevExtTramp 150
DevExtTramp 151
DevExtTramp 152
DevExtTramp 153
DevExtTramp 154
DevExtTramp 155
DevExtTramp 156
DevExtTramp 157
DevExtTramp 158
DevExtTramp 159
DevExtTramp 160
DevExtTramp 161
DevExtTramp 162
DevExtTramp 163
DevExtTramp 164
DevExtTramp 165
DevExtTramp 166
DevExtTramp 167
DevExtTramp 168
DevExtTramp 169
DevExtTramp 170
DevExtTramp 171
DevExtTramp 172
DevExtTramp 173
DevExtTramp 174
DevExtTramp 175
DevExtTramp 176
DevExtTramp 177
DevExtTramp 178
DevExtTramp 179
DevExtTramp 180
DevExtTramp 181
DevExtTramp 182
DevExtTramp 183
DevExtTramp 184
DevExtTramp 185
DevExtTramp 186
DevExtTramp 187
DevExtTramp 188
DevExtTramp 189
DevExtTramp 190
DevExtTramp 191
DevExtTramp 192
DevExtTramp 193
DevExtTramp 194
DevExtTramp 195
DevExtTramp 196
DevExtTramp 197
DevExtTramp 198
DevExtTramp 199
DevExtTramp 200
DevExtTramp 201
DevExtTramp 202
DevExtTramp 203
DevExtTramp 204
DevExtTramp 205
DevExtTramp 206
DevExtTramp 207
DevExtTramp 208
DevExtTramp 209
DevExtTramp 210
DevExtTramp 211
DevExtTramp 212
DevExtTramp 213
DevExtTramp 214
DevExtTramp 215
DevExtTramp 216
DevExtTramp 217
DevExtTramp 218
DevExtTramp 219
DevExtTramp 220
DevExtTramp 221
DevExtTramp 222
DevExtTramp 223
DevExtTramp 224
DevExtTramp 225
DevExtTramp 226
DevExtTramp 227
DevExtTramp 228
DevExtTramp 229
DevExtTramp 230
DevExtTramp 231
DevExtTramp 232
DevExtTramp 233
DevExtTramp 234
DevExtTramp 235
DevExtTramp 236
DevExtTramp 237
DevExtTramp 238
DevExtTramp 239
DevExtTramp 240
DevExtTramp 241
DevExtTramp 242
DevExtTramp 243
DevExtTramp 244
DevExtTramp 245
DevExtTramp 246
DevExtTramp 247
DevExtTramp 248
DevExtTramp 249
end

View File

@ -215,6 +215,9 @@ VKAPI_ATTR bool VKAPI_CALL loader_icd_init_entries(struct loader_icd_term *icd_t
LOOKUP_GIPA(CreateMacOSSurfaceMVK, false);
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
LOOKUP_GIPA(GetPhysicalDeviceMultisamplePropertiesEXT, false);
#undef LOOKUP_GIPA
return true;
@ -421,6 +424,14 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo
table->GetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2KHR)gpa(dev, "vkGetBufferMemoryRequirements2KHR");
table->GetImageSparseMemoryRequirements2KHR = (PFN_vkGetImageSparseMemoryRequirements2KHR)gpa(dev, "vkGetImageSparseMemoryRequirements2KHR");
// ---- VK_KHR_sampler_ycbcr_conversion extension commands
table->CreateSamplerYcbcrConversionKHR = (PFN_vkCreateSamplerYcbcrConversionKHR)gpa(dev, "vkCreateSamplerYcbcrConversionKHR");
table->DestroySamplerYcbcrConversionKHR = (PFN_vkDestroySamplerYcbcrConversionKHR)gpa(dev, "vkDestroySamplerYcbcrConversionKHR");
// ---- VK_KHR_bind_memory2 extension commands
table->BindBufferMemory2KHR = (PFN_vkBindBufferMemory2KHR)gpa(dev, "vkBindBufferMemory2KHR");
table->BindImageMemory2KHR = (PFN_vkBindImageMemory2KHR)gpa(dev, "vkBindImageMemory2KHR");
// ---- VK_EXT_debug_marker extension commands
table->DebugMarkerSetObjectTagEXT = (PFN_vkDebugMarkerSetObjectTagEXT)gpa(dev, "vkDebugMarkerSetObjectTagEXT");
table->DebugMarkerSetObjectNameEXT = (PFN_vkDebugMarkerSetObjectNameEXT)gpa(dev, "vkDebugMarkerSetObjectNameEXT");
@ -432,6 +443,9 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo
table->CmdDrawIndirectCountAMD = (PFN_vkCmdDrawIndirectCountAMD)gpa(dev, "vkCmdDrawIndirectCountAMD");
table->CmdDrawIndexedIndirectCountAMD = (PFN_vkCmdDrawIndexedIndirectCountAMD)gpa(dev, "vkCmdDrawIndexedIndirectCountAMD");
// ---- VK_AMD_shader_info extension commands
table->GetShaderInfoAMD = (PFN_vkGetShaderInfoAMD)gpa(dev, "vkGetShaderInfoAMD");
// ---- VK_NV_external_memory_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
table->GetMemoryWin32HandleNV = (PFN_vkGetMemoryWin32HandleNV)gpa(dev, "vkGetMemoryWin32HandleNV");
@ -439,13 +453,11 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo
// ---- VK_KHX_device_group extension commands
table->GetDeviceGroupPeerMemoryFeaturesKHX = (PFN_vkGetDeviceGroupPeerMemoryFeaturesKHX)gpa(dev, "vkGetDeviceGroupPeerMemoryFeaturesKHX");
table->BindBufferMemory2KHX = (PFN_vkBindBufferMemory2KHX)gpa(dev, "vkBindBufferMemory2KHX");
table->BindImageMemory2KHX = (PFN_vkBindImageMemory2KHX)gpa(dev, "vkBindImageMemory2KHX");
table->CmdSetDeviceMaskKHX = (PFN_vkCmdSetDeviceMaskKHX)gpa(dev, "vkCmdSetDeviceMaskKHX");
table->CmdDispatchBaseKHX = (PFN_vkCmdDispatchBaseKHX)gpa(dev, "vkCmdDispatchBaseKHX");
table->GetDeviceGroupPresentCapabilitiesKHX = (PFN_vkGetDeviceGroupPresentCapabilitiesKHX)gpa(dev, "vkGetDeviceGroupPresentCapabilitiesKHX");
table->GetDeviceGroupSurfacePresentModesKHX = (PFN_vkGetDeviceGroupSurfacePresentModesKHX)gpa(dev, "vkGetDeviceGroupSurfacePresentModesKHX");
table->AcquireNextImage2KHX = (PFN_vkAcquireNextImage2KHX)gpa(dev, "vkAcquireNextImage2KHX");
table->CmdDispatchBaseKHX = (PFN_vkCmdDispatchBaseKHX)gpa(dev, "vkCmdDispatchBaseKHX");
// ---- VK_NVX_device_generated_commands extension commands
table->CmdProcessCommandsNVX = (PFN_vkCmdProcessCommandsNVX)gpa(dev, "vkCmdProcessCommandsNVX");
@ -475,6 +487,18 @@ VKAPI_ATTR void VKAPI_CALL loader_init_device_extension_dispatch_table(struct lo
// ---- VK_EXT_hdr_metadata extension commands
table->SetHdrMetadataEXT = (PFN_vkSetHdrMetadataEXT)gpa(dev, "vkSetHdrMetadataEXT");
// ---- VK_EXT_sample_locations extension commands
table->CmdSetSampleLocationsEXT = (PFN_vkCmdSetSampleLocationsEXT)gpa(dev, "vkCmdSetSampleLocationsEXT");
// ---- VK_EXT_validation_cache extension commands
table->CreateValidationCacheEXT = (PFN_vkCreateValidationCacheEXT)gpa(dev, "vkCreateValidationCacheEXT");
table->DestroyValidationCacheEXT = (PFN_vkDestroyValidationCacheEXT)gpa(dev, "vkDestroyValidationCacheEXT");
table->MergeValidationCachesEXT = (PFN_vkMergeValidationCachesEXT)gpa(dev, "vkMergeValidationCachesEXT");
table->GetValidationCacheDataEXT = (PFN_vkGetValidationCacheDataEXT)gpa(dev, "vkGetValidationCacheDataEXT");
// ---- VK_EXT_external_memory_host extension commands
table->GetMemoryHostPointerPropertiesEXT = (PFN_vkGetMemoryHostPointerPropertiesEXT)gpa(dev, "vkGetMemoryHostPointerPropertiesEXT");
}
// Init Instance function pointer dispatch table with core commands
@ -628,6 +652,9 @@ VKAPI_ATTR void VKAPI_CALL loader_init_instance_extension_dispatch_table(VkLayer
#ifdef VK_USE_PLATFORM_MACOS_MVK
table->CreateMacOSSurfaceMVK = (PFN_vkCreateMacOSSurfaceMVK)gpa(inst, "vkCreateMacOSSurfaceMVK");
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
table->GetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)gpa(inst, "vkGetPhysicalDeviceMultisamplePropertiesEXT");
}
// Device command lookup function
@ -825,6 +852,14 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis
if (!strcmp(name, "GetBufferMemoryRequirements2KHR")) return (void *)table->GetBufferMemoryRequirements2KHR;
if (!strcmp(name, "GetImageSparseMemoryRequirements2KHR")) return (void *)table->GetImageSparseMemoryRequirements2KHR;
// ---- VK_KHR_sampler_ycbcr_conversion extension commands
if (!strcmp(name, "CreateSamplerYcbcrConversionKHR")) return (void *)table->CreateSamplerYcbcrConversionKHR;
if (!strcmp(name, "DestroySamplerYcbcrConversionKHR")) return (void *)table->DestroySamplerYcbcrConversionKHR;
// ---- VK_KHR_bind_memory2 extension commands
if (!strcmp(name, "BindBufferMemory2KHR")) return (void *)table->BindBufferMemory2KHR;
if (!strcmp(name, "BindImageMemory2KHR")) return (void *)table->BindImageMemory2KHR;
// ---- VK_EXT_debug_marker extension commands
if (!strcmp(name, "DebugMarkerSetObjectTagEXT")) return (void *)table->DebugMarkerSetObjectTagEXT;
if (!strcmp(name, "DebugMarkerSetObjectNameEXT")) return (void *)table->DebugMarkerSetObjectNameEXT;
@ -836,6 +871,9 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis
if (!strcmp(name, "CmdDrawIndirectCountAMD")) return (void *)table->CmdDrawIndirectCountAMD;
if (!strcmp(name, "CmdDrawIndexedIndirectCountAMD")) return (void *)table->CmdDrawIndexedIndirectCountAMD;
// ---- VK_AMD_shader_info extension commands
if (!strcmp(name, "GetShaderInfoAMD")) return (void *)table->GetShaderInfoAMD;
// ---- VK_NV_external_memory_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
if (!strcmp(name, "GetMemoryWin32HandleNV")) return (void *)table->GetMemoryWin32HandleNV;
@ -843,13 +881,11 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis
// ---- VK_KHX_device_group extension commands
if (!strcmp(name, "GetDeviceGroupPeerMemoryFeaturesKHX")) return (void *)table->GetDeviceGroupPeerMemoryFeaturesKHX;
if (!strcmp(name, "BindBufferMemory2KHX")) return (void *)table->BindBufferMemory2KHX;
if (!strcmp(name, "BindImageMemory2KHX")) return (void *)table->BindImageMemory2KHX;
if (!strcmp(name, "CmdSetDeviceMaskKHX")) return (void *)table->CmdSetDeviceMaskKHX;
if (!strcmp(name, "CmdDispatchBaseKHX")) return (void *)table->CmdDispatchBaseKHX;
if (!strcmp(name, "GetDeviceGroupPresentCapabilitiesKHX")) return (void *)table->GetDeviceGroupPresentCapabilitiesKHX;
if (!strcmp(name, "GetDeviceGroupSurfacePresentModesKHX")) return (void *)table->GetDeviceGroupSurfacePresentModesKHX;
if (!strcmp(name, "AcquireNextImage2KHX")) return (void *)table->AcquireNextImage2KHX;
if (!strcmp(name, "CmdDispatchBaseKHX")) return (void *)table->CmdDispatchBaseKHX;
// ---- VK_NVX_device_generated_commands extension commands
if (!strcmp(name, "CmdProcessCommandsNVX")) return (void *)table->CmdProcessCommandsNVX;
@ -880,6 +916,18 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_device_dispatch_table(const VkLayerDis
// ---- VK_EXT_hdr_metadata extension commands
if (!strcmp(name, "SetHdrMetadataEXT")) return (void *)table->SetHdrMetadataEXT;
// ---- VK_EXT_sample_locations extension commands
if (!strcmp(name, "CmdSetSampleLocationsEXT")) return (void *)table->CmdSetSampleLocationsEXT;
// ---- VK_EXT_validation_cache extension commands
if (!strcmp(name, "CreateValidationCacheEXT")) return (void *)table->CreateValidationCacheEXT;
if (!strcmp(name, "DestroyValidationCacheEXT")) return (void *)table->DestroyValidationCacheEXT;
if (!strcmp(name, "MergeValidationCachesEXT")) return (void *)table->MergeValidationCachesEXT;
if (!strcmp(name, "GetValidationCacheDataEXT")) return (void *)table->GetValidationCacheDataEXT;
// ---- VK_EXT_external_memory_host extension commands
if (!strcmp(name, "GetMemoryHostPointerPropertiesEXT")) return (void *)table->GetMemoryHostPointerPropertiesEXT;
return NULL;
}
@ -1037,6 +1085,9 @@ VKAPI_ATTR void* VKAPI_CALL loader_lookup_instance_dispatch_table(const VkLayerI
if (!strcmp(name, "CreateMacOSSurfaceMVK")) return (void *)table->CreateMacOSSurfaceMVK;
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
if (!strcmp(name, "GetPhysicalDeviceMultisamplePropertiesEXT")) return (void *)table->GetPhysicalDeviceMultisamplePropertiesEXT;
*found_name = false;
return NULL;
}
@ -1268,11 +1319,50 @@ VKAPI_ATTR void VKAPI_CALL GetImageSparseMemoryRequirements2KHR(
}
// ---- VK_KHR_sampler_ycbcr_conversion extension trampoline/terminators
VKAPI_ATTR VkResult VKAPI_CALL CreateSamplerYcbcrConversionKHR(
VkDevice device,
const VkSamplerYcbcrConversionCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSamplerYcbcrConversionKHR* pYcbcrConversion) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->CreateSamplerYcbcrConversionKHR(device, pCreateInfo, pAllocator, pYcbcrConversion);
}
VKAPI_ATTR void VKAPI_CALL DestroySamplerYcbcrConversionKHR(
VkDevice device,
VkSamplerYcbcrConversionKHR ycbcrConversion,
const VkAllocationCallbacks* pAllocator) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
disp->DestroySamplerYcbcrConversionKHR(device, ycbcrConversion, pAllocator);
}
// ---- VK_KHR_bind_memory2 extension trampoline/terminators
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHR(
VkDevice device,
uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHR* pBindInfos) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->BindBufferMemory2KHR(device, bindInfoCount, pBindInfos);
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHR(
VkDevice device,
uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHR* pBindInfos) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->BindImageMemory2KHR(device, bindInfoCount, pBindInfos);
}
// ---- VK_EXT_debug_marker extension trampoline/terminators
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(
VkDevice device,
VkDebugMarkerObjectTagInfoEXT* pTagInfo) {
const VkDebugMarkerObjectTagInfoEXT* pTagInfo) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
VkDebugMarkerObjectTagInfoEXT local_tag_info;
memcpy(&local_tag_info, pTagInfo, sizeof(VkDebugMarkerObjectTagInfoEXT));
@ -1286,7 +1376,7 @@ VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(
VKAPI_ATTR VkResult VKAPI_CALL terminator_DebugMarkerSetObjectTagEXT(
VkDevice device,
VkDebugMarkerObjectTagInfoEXT* pTagInfo) {
const VkDebugMarkerObjectTagInfoEXT* pTagInfo) {
uint32_t icd_index = 0;
struct loader_device *dev;
struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index);
@ -1314,7 +1404,7 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_DebugMarkerSetObjectTagEXT(
VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(
VkDevice device,
VkDebugMarkerObjectNameInfoEXT* pNameInfo) {
const VkDebugMarkerObjectNameInfoEXT* pNameInfo) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
VkDebugMarkerObjectNameInfoEXT local_name_info;
memcpy(&local_name_info, pNameInfo, sizeof(VkDebugMarkerObjectNameInfoEXT));
@ -1328,7 +1418,7 @@ VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(
VKAPI_ATTR VkResult VKAPI_CALL terminator_DebugMarkerSetObjectNameEXT(
VkDevice device,
VkDebugMarkerObjectNameInfoEXT* pNameInfo) {
const VkDebugMarkerObjectNameInfoEXT* pNameInfo) {
uint32_t icd_index = 0;
struct loader_device *dev;
struct loader_icd_term *icd_term = loader_get_icd_and_device(device, &dev, &icd_index);
@ -1356,7 +1446,7 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_DebugMarkerSetObjectNameEXT(
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerBeginEXT(
VkCommandBuffer commandBuffer,
VkDebugMarkerMarkerInfoEXT* pMarkerInfo) {
const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) {
const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer);
disp->CmdDebugMarkerBeginEXT(commandBuffer, pMarkerInfo);
}
@ -1369,7 +1459,7 @@ VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerEndEXT(
VKAPI_ATTR void VKAPI_CALL CmdDebugMarkerInsertEXT(
VkCommandBuffer commandBuffer,
VkDebugMarkerMarkerInfoEXT* pMarkerInfo) {
const VkDebugMarkerMarkerInfoEXT* pMarkerInfo) {
const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer);
disp->CmdDebugMarkerInsertEXT(commandBuffer, pMarkerInfo);
}
@ -1402,6 +1492,20 @@ VKAPI_ATTR void VKAPI_CALL CmdDrawIndexedIndirectCountAMD(
}
// ---- VK_AMD_shader_info extension trampoline/terminators
VKAPI_ATTR VkResult VKAPI_CALL GetShaderInfoAMD(
VkDevice device,
VkPipeline pipeline,
VkShaderStageFlagBits shaderStage,
VkShaderInfoTypeAMD infoType,
size_t* pInfoSize,
void* pInfo) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->GetShaderInfoAMD(device, pipeline, shaderStage, infoType, pInfoSize, pInfo);
}
// ---- VK_NV_external_memory_win32 extension trampoline/terminators
#ifdef VK_USE_PLATFORM_WIN32_KHR
@ -1428,22 +1532,6 @@ VKAPI_ATTR void VKAPI_CALL GetDeviceGroupPeerMemoryFeaturesKHX(
disp->GetDeviceGroupPeerMemoryFeaturesKHX(device, heapIndex, localDeviceIndex, remoteDeviceIndex, pPeerMemoryFeatures);
}
VKAPI_ATTR VkResult VKAPI_CALL BindBufferMemory2KHX(
VkDevice device,
uint32_t bindInfoCount,
const VkBindBufferMemoryInfoKHX* pBindInfos) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->BindBufferMemory2KHX(device, bindInfoCount, pBindInfos);
}
VKAPI_ATTR VkResult VKAPI_CALL BindImageMemory2KHX(
VkDevice device,
uint32_t bindInfoCount,
const VkBindImageMemoryInfoKHX* pBindInfos) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->BindImageMemory2KHX(device, bindInfoCount, pBindInfos);
}
VKAPI_ATTR void VKAPI_CALL CmdSetDeviceMaskKHX(
VkCommandBuffer commandBuffer,
uint32_t deviceMask) {
@ -1451,6 +1539,18 @@ VKAPI_ATTR void VKAPI_CALL CmdSetDeviceMaskKHX(
disp->CmdSetDeviceMaskKHX(commandBuffer, deviceMask);
}
VKAPI_ATTR void VKAPI_CALL CmdDispatchBaseKHX(
VkCommandBuffer commandBuffer,
uint32_t baseGroupX,
uint32_t baseGroupY,
uint32_t baseGroupZ,
uint32_t groupCountX,
uint32_t groupCountY,
uint32_t groupCountZ) {
const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer);
disp->CmdDispatchBaseKHX(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
}
VKAPI_ATTR VkResult VKAPI_CALL GetDeviceGroupPresentCapabilitiesKHX(
VkDevice device,
VkDeviceGroupPresentCapabilitiesKHX* pDeviceGroupPresentCapabilities) {
@ -1483,26 +1583,6 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_GetDeviceGroupSurfacePresentModesKHX(
return VK_SUCCESS;
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImage2KHX(
VkDevice device,
const VkAcquireNextImageInfoKHX* pAcquireInfo,
uint32_t* pImageIndex) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->AcquireNextImage2KHX(device, pAcquireInfo, pImageIndex);
}
VKAPI_ATTR void VKAPI_CALL CmdDispatchBaseKHX(
VkCommandBuffer commandBuffer,
uint32_t baseGroupX,
uint32_t baseGroupY,
uint32_t baseGroupZ,
uint32_t groupCountX,
uint32_t groupCountY,
uint32_t groupCountZ) {
const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer);
disp->CmdDispatchBaseKHX(commandBuffer, baseGroupX, baseGroupY, baseGroupZ, groupCountX, groupCountY, groupCountZ);
}
VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDevicePresentRectanglesKHX(
VkPhysicalDevice physicalDevice,
VkSurfaceKHR surface,
@ -1533,6 +1613,14 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_GetPhysicalDevicePresentRectanglesKHX(
return icd_term->dispatch.GetPhysicalDevicePresentRectanglesKHX(phys_dev_term->phys_dev, surface, pRectCount, pRects);
}
VKAPI_ATTR VkResult VKAPI_CALL AcquireNextImage2KHX(
VkDevice device,
const VkAcquireNextImageInfoKHX* pAcquireInfo,
uint32_t* pImageIndex) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->AcquireNextImage2KHX(device, pAcquireInfo, pImageIndex);
}
// ---- VK_NN_vi_surface extension trampoline/terminators
@ -1788,6 +1876,89 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateMacOSSurfaceMVK(
}
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension trampoline/terminators
VKAPI_ATTR void VKAPI_CALL CmdSetSampleLocationsEXT(
VkCommandBuffer commandBuffer,
const VkSampleLocationsInfoEXT* pSampleLocationsInfo) {
const VkLayerDispatchTable *disp = loader_get_dispatch(commandBuffer);
disp->CmdSetSampleLocationsEXT(commandBuffer, pSampleLocationsInfo);
}
VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceMultisamplePropertiesEXT(
VkPhysicalDevice physicalDevice,
VkSampleCountFlagBits samples,
VkMultisamplePropertiesEXT* pMultisampleProperties) {
const VkLayerInstanceDispatchTable *disp;
VkPhysicalDevice unwrapped_phys_dev = loader_unwrap_physical_device(physicalDevice);
disp = loader_get_instance_layer_dispatch(physicalDevice);
disp->GetPhysicalDeviceMultisamplePropertiesEXT(unwrapped_phys_dev, samples, pMultisampleProperties);
}
VKAPI_ATTR void VKAPI_CALL terminator_GetPhysicalDeviceMultisamplePropertiesEXT(
VkPhysicalDevice physicalDevice,
VkSampleCountFlagBits samples,
VkMultisamplePropertiesEXT* pMultisampleProperties) {
struct loader_physical_device_term *phys_dev_term = (struct loader_physical_device_term *)physicalDevice;
struct loader_icd_term *icd_term = phys_dev_term->this_icd_term;
if (NULL == icd_term->dispatch.GetPhysicalDeviceMultisamplePropertiesEXT) {
loader_log(icd_term->this_instance, VK_DEBUG_REPORT_ERROR_BIT_EXT, 0,
"ICD associated with VkPhysicalDevice does not support GetPhysicalDeviceMultisamplePropertiesEXT");
}
icd_term->dispatch.GetPhysicalDeviceMultisamplePropertiesEXT(phys_dev_term->phys_dev, samples, pMultisampleProperties);
}
// ---- VK_EXT_validation_cache extension trampoline/terminators
VKAPI_ATTR VkResult VKAPI_CALL CreateValidationCacheEXT(
VkDevice device,
const VkValidationCacheCreateInfoEXT* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkValidationCacheEXT* pValidationCache) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->CreateValidationCacheEXT(device, pCreateInfo, pAllocator, pValidationCache);
}
VKAPI_ATTR void VKAPI_CALL DestroyValidationCacheEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
const VkAllocationCallbacks* pAllocator) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
disp->DestroyValidationCacheEXT(device, validationCache, pAllocator);
}
VKAPI_ATTR VkResult VKAPI_CALL MergeValidationCachesEXT(
VkDevice device,
VkValidationCacheEXT dstCache,
uint32_t srcCacheCount,
const VkValidationCacheEXT* pSrcCaches) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->MergeValidationCachesEXT(device, dstCache, srcCacheCount, pSrcCaches);
}
VKAPI_ATTR VkResult VKAPI_CALL GetValidationCacheDataEXT(
VkDevice device,
VkValidationCacheEXT validationCache,
size_t* pDataSize,
void* pData) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->GetValidationCacheDataEXT(device, validationCache, pDataSize, pData);
}
// ---- VK_EXT_external_memory_host extension trampoline/terminators
VKAPI_ATTR VkResult VKAPI_CALL GetMemoryHostPointerPropertiesEXT(
VkDevice device,
VkExternalMemoryHandleTypeFlagBitsKHR handleType,
const void* pHostPointer,
VkMemoryHostPointerPropertiesEXT* pMemoryHostPointerProperties) {
const VkLayerDispatchTable *disp = loader_get_dispatch(device);
return disp->GetMemoryHostPointerPropertiesEXT(device, handleType, pHostPointer, pMemoryHostPointerProperties);
}
// GPA helpers for extensions
bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *name, void **addr) {
*addr = NULL;
@ -1997,6 +2168,26 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na
return true;
}
// ---- VK_KHR_sampler_ycbcr_conversion extension commands
if (!strcmp("vkCreateSamplerYcbcrConversionKHR", name)) {
*addr = (void *)CreateSamplerYcbcrConversionKHR;
return true;
}
if (!strcmp("vkDestroySamplerYcbcrConversionKHR", name)) {
*addr = (void *)DestroySamplerYcbcrConversionKHR;
return true;
}
// ---- VK_KHR_bind_memory2 extension commands
if (!strcmp("vkBindBufferMemory2KHR", name)) {
*addr = (void *)BindBufferMemory2KHR;
return true;
}
if (!strcmp("vkBindImageMemory2KHR", name)) {
*addr = (void *)BindImageMemory2KHR;
return true;
}
// ---- VK_EXT_debug_marker extension commands
if (!strcmp("vkDebugMarkerSetObjectTagEXT", name)) {
*addr = (void *)DebugMarkerSetObjectTagEXT;
@ -2029,6 +2220,12 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na
return true;
}
// ---- VK_AMD_shader_info extension commands
if (!strcmp("vkGetShaderInfoAMD", name)) {
*addr = (void *)GetShaderInfoAMD;
return true;
}
// ---- VK_NV_external_memory_capabilities extension commands
if (!strcmp("vkGetPhysicalDeviceExternalImageFormatPropertiesNV", name)) {
*addr = (ptr_instance->enabled_known_extensions.nv_external_memory_capabilities == 1)
@ -2050,18 +2247,14 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na
*addr = (void *)GetDeviceGroupPeerMemoryFeaturesKHX;
return true;
}
if (!strcmp("vkBindBufferMemory2KHX", name)) {
*addr = (void *)BindBufferMemory2KHX;
return true;
}
if (!strcmp("vkBindImageMemory2KHX", name)) {
*addr = (void *)BindImageMemory2KHX;
return true;
}
if (!strcmp("vkCmdSetDeviceMaskKHX", name)) {
*addr = (void *)CmdSetDeviceMaskKHX;
return true;
}
if (!strcmp("vkCmdDispatchBaseKHX", name)) {
*addr = (void *)CmdDispatchBaseKHX;
return true;
}
if (!strcmp("vkGetDeviceGroupPresentCapabilitiesKHX", name)) {
*addr = (void *)GetDeviceGroupPresentCapabilitiesKHX;
return true;
@ -2070,18 +2263,14 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na
*addr = (void *)GetDeviceGroupSurfacePresentModesKHX;
return true;
}
if (!strcmp("vkAcquireNextImage2KHX", name)) {
*addr = (void *)AcquireNextImage2KHX;
return true;
}
if (!strcmp("vkCmdDispatchBaseKHX", name)) {
*addr = (void *)CmdDispatchBaseKHX;
return true;
}
if (!strcmp("vkGetPhysicalDevicePresentRectanglesKHX", name)) {
*addr = (void *)GetPhysicalDevicePresentRectanglesKHX;
return true;
}
if (!strcmp("vkAcquireNextImage2KHX", name)) {
*addr = (void *)AcquireNextImage2KHX;
return true;
}
// ---- VK_NN_vi_surface extension commands
#ifdef VK_USE_PLATFORM_VI_NN
@ -2238,6 +2427,40 @@ bool extension_instance_gpa(struct loader_instance *ptr_instance, const char *na
return true;
}
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
if (!strcmp("vkCmdSetSampleLocationsEXT", name)) {
*addr = (void *)CmdSetSampleLocationsEXT;
return true;
}
if (!strcmp("vkGetPhysicalDeviceMultisamplePropertiesEXT", name)) {
*addr = (void *)GetPhysicalDeviceMultisamplePropertiesEXT;
return true;
}
// ---- VK_EXT_validation_cache extension commands
if (!strcmp("vkCreateValidationCacheEXT", name)) {
*addr = (void *)CreateValidationCacheEXT;
return true;
}
if (!strcmp("vkDestroyValidationCacheEXT", name)) {
*addr = (void *)DestroyValidationCacheEXT;
return true;
}
if (!strcmp("vkMergeValidationCachesEXT", name)) {
*addr = (void *)MergeValidationCachesEXT;
return true;
}
if (!strcmp("vkGetValidationCacheDataEXT", name)) {
*addr = (void *)GetValidationCacheDataEXT;
return true;
}
// ---- VK_EXT_external_memory_host extension commands
if (!strcmp("vkGetMemoryHostPointerPropertiesEXT", name)) {
*addr = (void *)GetMemoryHostPointerPropertiesEXT;
return true;
}
return false;
}
@ -2482,6 +2705,9 @@ const VkLayerInstanceDispatchTable instance_disp = {
#ifdef VK_USE_PLATFORM_MACOS_MVK
.CreateMacOSSurfaceMVK = terminator_CreateMacOSSurfaceMVK,
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
.GetPhysicalDeviceMultisamplePropertiesEXT = terminator_GetPhysicalDeviceMultisamplePropertiesEXT,
};
// A null-terminated list of all of the instance extensions supported by the loader.

View File

@ -128,6 +128,7 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_CreateDevice(
const VkAllocationCallbacks* pAllocator,
VkDevice* pDevice);
VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceExtensionProperties(
const VkEnumerateInstanceExtensionPropertiesChain* chain,
const char* pLayerName,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties);
@ -137,6 +138,7 @@ VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceExtensionProperties(
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties);
VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateInstanceLayerProperties(
const VkEnumerateInstanceLayerPropertiesChain* chain,
uint32_t* pPropertyCount,
VkLayerProperties* pProperties);
VKAPI_ATTR VkResult VKAPI_CALL terminator_EnumerateDeviceLayerProperties(
@ -312,6 +314,9 @@ struct loader_icd_term_dispatch {
#ifdef VK_USE_PLATFORM_MACOS_MVK
PFN_vkCreateMacOSSurfaceMVK CreateMacOSSurfaceMVK;
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT GetPhysicalDeviceMultisamplePropertiesEXT;
};
union loader_instance_extension_enables {

View File

@ -1,8 +1,8 @@
/*
*
* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
* Copyright (c) 2015-2018 The Khronos Group Inc.
* Copyright (c) 2015-2018 Valve Corporation
* Copyright (c) 2015-2018 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -18,6 +18,7 @@
*
* Author: Ian Elliot <ian@lunarg.com>
* Author: Jon Ashburn <jon@lunarg.com>
* Author: Lenny Komow <lenny@lunarg.com>
*
*/
#pragma once
@ -70,6 +71,7 @@
#define LAYERS_SOURCE_PATH NULL
#endif
#define LAYERS_PATH_ENV "VK_LAYER_PATH"
#define ENABLED_LAYERS_ENV "VK_INSTANCE_LAYERS"
#define RELATIVE_VK_DRIVERS_INFO VULKAN_DIR VULKAN_ICDCONF_DIR
#define RELATIVE_VK_ELAYERS_INFO VULKAN_DIR VULKAN_ELAYERCONF_DIR
@ -116,13 +118,11 @@ static inline const char *loader_platform_get_proc_address_error(const char *nam
// Threads:
typedef pthread_t loader_platform_thread;
#define THREAD_LOCAL_DECL __thread
#define LOADER_PLATFORM_THREAD_ONCE_DECLARATION(var) pthread_once_t var = PTHREAD_ONCE_INIT;
#define LOADER_PLATFORM_THREAD_ONCE_DEFINITION(var) pthread_once_t var;
static inline void loader_platform_thread_once(pthread_once_t *ctl, void (*func)(void)) {
assert(func != NULL);
assert(ctl != NULL);
pthread_once(ctl, func);
}
// The once init functionality is not used on Linux
#define LOADER_PLATFORM_THREAD_ONCE_DECLARATION(var)
#define LOADER_PLATFORM_THREAD_ONCE_DEFINITION(var)
#define LOADER_PLATFORM_THREAD_ONCE(ctl, func)
// Thread IDs:
typedef pthread_t loader_platform_thread_id;
@ -182,11 +182,38 @@ static inline void loader_platform_thread_cond_broadcast(loader_platform_thread_
#define LAYERS_SOURCE_PATH NULL
#endif
#define LAYERS_PATH_ENV "VK_LAYER_PATH"
#define ENABLED_LAYERS_ENV "VK_INSTANCE_LAYERS"
#define RELATIVE_VK_DRIVERS_INFO ""
#define RELATIVE_VK_ELAYERS_INFO ""
#define RELATIVE_VK_ILAYERS_INFO ""
#define PRINTF_SIZE_T_SPECIFIER "%Iu"
#if defined(_WIN32)
// Get the key for the plug n play driver registry
// The string returned by this function should NOT be freed
static inline const char *LoaderPnpDriverRegistry() {
BOOL is_wow;
IsWow64Process(GetCurrentProcess(), &is_wow);
return is_wow ? (API_NAME "DriverNameWow") : (API_NAME "DriverName");
}
// Get the key for the plug 'n play explicit layer registry
// The string returned by this function should NOT be freed
static inline const char *LoaderPnpELayerRegistry() {
BOOL is_wow;
IsWow64Process(GetCurrentProcess(), &is_wow);
return is_wow ? (API_NAME "ExplicitLayersWow") : (API_NAME "ExplicitLayers");
}
// Get the key for the plug 'n play implicit layer registry
// The string returned by this function should NOT be freed
static inline const char *LoaderPnpILayerRegistry() {
BOOL is_wow;
IsWow64Process(GetCurrentProcess(), &is_wow);
return is_wow ? (API_NAME "ImplicitLayersWow") : (API_NAME "ImplicitLayers");
}
#endif
// File IO
static bool loader_platform_file_exists(const char *path) {
if ((_access(path, 0)) == -1)
@ -254,7 +281,7 @@ static loader_platform_dl_handle loader_platform_open_library(const char *lib_pa
}
static char *loader_platform_open_library_error(const char *libPath) {
static char errorMsg[164];
(void)snprintf(errorMsg, 163, "Failed to open dynamic library \"%s\" with error %d", libPath, GetLastError());
(void)snprintf(errorMsg, 163, "Failed to open dynamic library \"%s\" with error %lu", libPath, GetLastError());
return errorMsg;
}
static void loader_platform_close_library(loader_platform_dl_handle library) { FreeLibrary(library); }
@ -272,19 +299,29 @@ static char *loader_platform_get_proc_address_error(const char *name) {
// Threads:
typedef HANDLE loader_platform_thread;
#define THREAD_LOCAL_DECL __declspec(thread)
// The once init functionality is not used when building a DLL on Windows. This is because there is no way to clean up the
// resources allocated by anything allocated by once init. This isn't a problem for static libraries, but it is for dynamic
// ones. When building a DLL, we use DllMain() instead to allow properly cleaning up resources.
#if defined(LOADER_DYNAMIC_LIB)
#define LOADER_PLATFORM_THREAD_ONCE_DECLARATION(var)
#define LOADER_PLATFORM_THREAD_ONCE_DEFINITION(var)
#define LOADER_PLATFORM_THREAD_ONCE(ctl, func)
#else
#define LOADER_PLATFORM_THREAD_ONCE_DECLARATION(var) INIT_ONCE var = INIT_ONCE_STATIC_INIT;
#define LOADER_PLATFORM_THREAD_ONCE_DEFINITION(var) INIT_ONCE var;
#define LOADER_PLATFORM_THREAD_ONCE(ctl, func) loader_platform_thread_once_fn(ctl, func)
static BOOL CALLBACK InitFuncWrapper(PINIT_ONCE InitOnce, PVOID Parameter, PVOID *Context) {
void (*func)(void) = (void (*)(void))Parameter;
func();
return TRUE;
}
static void loader_platform_thread_once(void *ctl, void (*func)(void)) {
static void loader_platform_thread_once_fn(void *ctl, void (*func)(void)) {
assert(func != NULL);
assert(ctl != NULL);
InitOnceExecuteOnce((PINIT_ONCE)ctl, InitFuncWrapper, func, NULL);
}
#endif
// Thread IDs:
typedef DWORD loader_platform_thread_id;

View File

@ -47,7 +47,7 @@
typedef VkResult (VKAPI_PTR *PFN_vkNegotiateLoaderICDInterfaceVersion)(uint32_t *pVersion);
// This is defined in vk_layer.h which will be found by the loader, but if an ICD is building against this
// flie directly, it won't be found.
// file directly, it won't be found.
#ifndef PFN_GetPhysicalDeviceProcAddr
typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
#endif

View File

@ -48,6 +48,8 @@
#define CURRENT_LOADER_LAYER_INTERFACE_VERSION 2
#define MIN_SUPPORTED_LOADER_LAYER_INTERFACE_VERSION 1
#define VK_CURRENT_CHAIN_VERSION 1
// Version negotiation values
typedef enum VkNegotiateLayerStructType {
LAYER_NEGOTIATE_UNINTIALIZED = 0,
@ -138,6 +140,43 @@ extern "C" {
VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct);
typedef enum VkChainType {
VK_CHAIN_TYPE_UNKNOWN = 0,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_EXTENSION_PROPERTIES = 1,
VK_CHAIN_TYPE_ENUMERATE_INSTANCE_LAYER_PROPERTIES = 2,
} VkChainType;
typedef struct VkChainHeader {
VkChainType type;
uint32_t version;
uint32_t size;
} VkChainHeader;
typedef struct VkEnumerateInstanceExtensionPropertiesChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceExtensionPropertiesChain *, const char *, uint32_t *,
VkExtensionProperties *);
const struct VkEnumerateInstanceExtensionPropertiesChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(const char *pLayerName, uint32_t *pPropertyCount, VkExtensionProperties *pProperties) const {
return pfnNextLayer(pNextLink, pLayerName, pPropertyCount, pProperties);
}
#endif
} VkEnumerateInstanceExtensionPropertiesChain;
typedef struct VkEnumerateInstanceLayerPropertiesChain {
VkChainHeader header;
VkResult(VKAPI_PTR *pfnNextLayer)(const struct VkEnumerateInstanceLayerPropertiesChain *, uint32_t *, VkLayerProperties *);
const struct VkEnumerateInstanceLayerPropertiesChain *pNextLink;
#if defined(__cplusplus)
inline VkResult CallDown(uint32_t *pPropertyCount, VkLayerProperties *pProperties) const {
return pfnNextLayer(pNextLink, pPropertyCount, pProperties);
}
#endif
} VkEnumerateInstanceLayerPropertiesChain;
#ifdef __cplusplus
}
#endif

View File

@ -177,6 +177,9 @@ typedef struct VkLayerInstanceDispatchTable_ {
#ifdef VK_USE_PLATFORM_MACOS_MVK
PFN_vkCreateMacOSSurfaceMVK CreateMacOSSurfaceMVK;
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_sample_locations extension commands
PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT GetPhysicalDeviceMultisamplePropertiesEXT;
} VkLayerInstanceDispatchTable;
// Device function pointer dispatch table
@ -371,6 +374,14 @@ typedef struct VkLayerDispatchTable_ {
PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2KHR;
PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2KHR;
// ---- VK_KHR_sampler_ycbcr_conversion extension commands
PFN_vkCreateSamplerYcbcrConversionKHR CreateSamplerYcbcrConversionKHR;
PFN_vkDestroySamplerYcbcrConversionKHR DestroySamplerYcbcrConversionKHR;
// ---- VK_KHR_bind_memory2 extension commands
PFN_vkBindBufferMemory2KHR BindBufferMemory2KHR;
PFN_vkBindImageMemory2KHR BindImageMemory2KHR;
// ---- VK_EXT_debug_marker extension commands
PFN_vkDebugMarkerSetObjectTagEXT DebugMarkerSetObjectTagEXT;
PFN_vkDebugMarkerSetObjectNameEXT DebugMarkerSetObjectNameEXT;
@ -382,6 +393,9 @@ typedef struct VkLayerDispatchTable_ {
PFN_vkCmdDrawIndirectCountAMD CmdDrawIndirectCountAMD;
PFN_vkCmdDrawIndexedIndirectCountAMD CmdDrawIndexedIndirectCountAMD;
// ---- VK_AMD_shader_info extension commands
PFN_vkGetShaderInfoAMD GetShaderInfoAMD;
// ---- VK_NV_external_memory_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetMemoryWin32HandleNV GetMemoryWin32HandleNV;
@ -389,13 +403,11 @@ typedef struct VkLayerDispatchTable_ {
// ---- VK_KHX_device_group extension commands
PFN_vkGetDeviceGroupPeerMemoryFeaturesKHX GetDeviceGroupPeerMemoryFeaturesKHX;
PFN_vkBindBufferMemory2KHX BindBufferMemory2KHX;
PFN_vkBindImageMemory2KHX BindImageMemory2KHX;
PFN_vkCmdSetDeviceMaskKHX CmdSetDeviceMaskKHX;
PFN_vkCmdDispatchBaseKHX CmdDispatchBaseKHX;
PFN_vkGetDeviceGroupPresentCapabilitiesKHX GetDeviceGroupPresentCapabilitiesKHX;
PFN_vkGetDeviceGroupSurfacePresentModesKHX GetDeviceGroupSurfacePresentModesKHX;
PFN_vkAcquireNextImage2KHX AcquireNextImage2KHX;
PFN_vkCmdDispatchBaseKHX CmdDispatchBaseKHX;
// ---- VK_NVX_device_generated_commands extension commands
PFN_vkCmdProcessCommandsNVX CmdProcessCommandsNVX;
@ -425,6 +437,18 @@ typedef struct VkLayerDispatchTable_ {
// ---- VK_EXT_hdr_metadata extension commands
PFN_vkSetHdrMetadataEXT SetHdrMetadataEXT;
// ---- VK_EXT_sample_locations extension commands
PFN_vkCmdSetSampleLocationsEXT CmdSetSampleLocationsEXT;
// ---- VK_EXT_validation_cache extension commands
PFN_vkCreateValidationCacheEXT CreateValidationCacheEXT;
PFN_vkDestroyValidationCacheEXT DestroyValidationCacheEXT;
PFN_vkMergeValidationCachesEXT MergeValidationCachesEXT;
PFN_vkGetValidationCacheDataEXT GetValidationCacheDataEXT;
// ---- VK_EXT_external_memory_host extension commands
PFN_vkGetMemoryHostPointerPropertiesEXT GetMemoryHostPointerPropertiesEXT;
} VkLayerDispatchTable;

View File

@ -43,4 +43,27 @@
#endif // _WIN32
#endif // VK_SDK_PLATFORM_H
// Check for noexcept support using clang, with fallback to Windows or GCC version numbers
#ifndef NOEXCEPT
#if defined(__clang__)
#if __has_feature(cxx_noexcept)
#define HAS_NOEXCEPT
#endif
#else
#if defined(__GXX_EXPERIMENTAL_CXX0X__) && __GNUC__ * 10 + __GNUC_MINOR__ >= 46
#define HAS_NOEXCEPT
#else
#if defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023026 && defined(_HAS_EXCEPTIONS) && _HAS_EXCEPTIONS
#define HAS_NOEXCEPT
#endif
#endif
#endif
#ifdef HAS_NOEXCEPT
#define NOEXCEPT noexcept
#else
#define NOEXCEPT
#endif
#endif
#endif // VK_SDK_PLATFORM_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff