Basic texture uploads/address lookups/etc
Freeing of descriptor sets when the GPU is finished with them.
This commit is contained in:
parent
0e41774e36
commit
b2457d7e72
|
@ -26,19 +26,26 @@ using xe::ui::vulkan::CheckResult;
|
||||||
|
|
||||||
constexpr uint32_t kMaxTextureSamplers = 32;
|
constexpr uint32_t kMaxTextureSamplers = 32;
|
||||||
|
|
||||||
TextureCache::TextureCache(RegisterFile* register_file,
|
struct TextureConfig {
|
||||||
|
TextureFormat guest_format;
|
||||||
|
VkFormat host_format;
|
||||||
|
};
|
||||||
|
|
||||||
|
TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
|
||||||
TraceWriter* trace_writer,
|
TraceWriter* trace_writer,
|
||||||
ui::vulkan::VulkanDevice* device)
|
ui::vulkan::VulkanDevice* device)
|
||||||
: register_file_(register_file),
|
: memory_(memory),
|
||||||
|
register_file_(register_file),
|
||||||
trace_writer_(trace_writer),
|
trace_writer_(trace_writer),
|
||||||
device_(device) {
|
device_(device),
|
||||||
|
staging_buffer_(device) {
|
||||||
// Descriptor pool used for all of our cached descriptors.
|
// Descriptor pool used for all of our cached descriptors.
|
||||||
VkDescriptorPoolCreateInfo descriptor_pool_info;
|
VkDescriptorPoolCreateInfo descriptor_pool_info;
|
||||||
descriptor_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
descriptor_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
|
||||||
descriptor_pool_info.pNext = nullptr;
|
descriptor_pool_info.pNext = nullptr;
|
||||||
descriptor_pool_info.flags =
|
descriptor_pool_info.flags =
|
||||||
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
|
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
|
||||||
descriptor_pool_info.maxSets = 256;
|
descriptor_pool_info.maxSets = 4096;
|
||||||
VkDescriptorPoolSize pool_sizes[2];
|
VkDescriptorPoolSize pool_sizes[2];
|
||||||
pool_sizes[0].type = VK_DESCRIPTOR_TYPE_SAMPLER;
|
pool_sizes[0].type = VK_DESCRIPTOR_TYPE_SAMPLER;
|
||||||
pool_sizes[0].descriptorCount = 32;
|
pool_sizes[0].descriptorCount = 32;
|
||||||
|
@ -81,50 +88,21 @@ TextureCache::TextureCache(RegisterFile* register_file,
|
||||||
nullptr, &texture_descriptor_set_layout_);
|
nullptr, &texture_descriptor_set_layout_);
|
||||||
CheckResult(err, "vkCreateDescriptorSetLayout");
|
CheckResult(err, "vkCreateDescriptorSetLayout");
|
||||||
|
|
||||||
// Allocate memory for a staging buffer.
|
int width = 4096;
|
||||||
VkBufferCreateInfo staging_buffer_info;
|
int height = 4096;
|
||||||
staging_buffer_info.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
|
if (!staging_buffer_.Initialize(width * height * 4,
|
||||||
staging_buffer_info.pNext = nullptr;
|
VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) {
|
||||||
staging_buffer_info.flags = 0;
|
|
||||||
staging_buffer_info.size = 2048 * 2048 * 4; // 16MB buffer
|
|
||||||
staging_buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
|
|
||||||
staging_buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
|
||||||
staging_buffer_info.queueFamilyIndexCount = 0;
|
|
||||||
staging_buffer_info.pQueueFamilyIndices = nullptr;
|
|
||||||
err =
|
|
||||||
vkCreateBuffer(*device_, &staging_buffer_info, nullptr, &staging_buffer_);
|
|
||||||
CheckResult(err, "vkCreateBuffer");
|
|
||||||
if (err != VK_SUCCESS) {
|
|
||||||
// This isn't good.
|
|
||||||
assert_always();
|
assert_always();
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VkMemoryRequirements staging_buffer_reqs;
|
|
||||||
vkGetBufferMemoryRequirements(*device_, staging_buffer_,
|
|
||||||
&staging_buffer_reqs);
|
|
||||||
staging_buffer_mem_ = device_->AllocateMemory(staging_buffer_reqs);
|
|
||||||
assert_not_null(staging_buffer_mem_);
|
|
||||||
|
|
||||||
err = vkBindBufferMemory(*device_, staging_buffer_, staging_buffer_mem_, 0);
|
|
||||||
CheckResult(err, "vkBindBufferMemory");
|
|
||||||
|
|
||||||
// Upload a grid into the staging buffer.
|
// Upload a grid into the staging buffer.
|
||||||
uint32_t* gpu_data = nullptr;
|
auto gpu_data = reinterpret_cast<uint32_t*>(staging_buffer_.host_base());
|
||||||
err = vkMapMemory(*device_, staging_buffer_mem_, 0, staging_buffer_info.size,
|
|
||||||
0, reinterpret_cast<void**>(&gpu_data));
|
|
||||||
CheckResult(err, "vkMapMemory");
|
|
||||||
|
|
||||||
int width = 2048;
|
|
||||||
int height = 2048;
|
|
||||||
for (int y = 0; y < height; ++y) {
|
for (int y = 0; y < height; ++y) {
|
||||||
for (int x = 0; x < width; ++x) {
|
for (int x = 0; x < width; ++x) {
|
||||||
gpu_data[y * width + x] =
|
gpu_data[y * width + x] =
|
||||||
((y % 32 < 16) ^ (x % 32 >= 16)) ? 0xFF0000FF : 0xFFFFFFFF;
|
((y % 32 < 16) ^ (x % 32 >= 16)) ? 0xFF0000FF : 0xFFFFFFFF;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
vkUnmapMemory(*device_, staging_buffer_mem_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TextureCache::~TextureCache() {
|
TextureCache::~TextureCache() {
|
||||||
|
@ -223,6 +201,10 @@ TextureCache::Texture* TextureCache::AllocateTexture(
|
||||||
auto texture_view = std::make_unique<TextureView>();
|
auto texture_view = std::make_unique<TextureView>();
|
||||||
texture_view->texture = texture;
|
texture_view->texture = texture;
|
||||||
texture_view->view = view;
|
texture_view->view = view;
|
||||||
|
texture_view->swiz_x = 0;
|
||||||
|
texture_view->swiz_y = 1;
|
||||||
|
texture_view->swiz_z = 2;
|
||||||
|
texture_view->swiz_w = 3;
|
||||||
texture->views.push_back(std::move(texture_view));
|
texture->views.push_back(std::move(texture_view));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,28 +227,16 @@ TextureCache::Texture* TextureCache::DemandResolveTexture(
|
||||||
return texture;
|
return texture;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check resolve textures.
|
|
||||||
for (auto it = resolve_textures_.begin(); it != resolve_textures_.end();
|
|
||||||
++it) {
|
|
||||||
texture = (*it).get();
|
|
||||||
if (texture_info.guest_address == texture->texture_info.guest_address &&
|
|
||||||
texture_info.size_2d.logical_width ==
|
|
||||||
texture->texture_info.size_2d.logical_width &&
|
|
||||||
texture_info.size_2d.logical_height ==
|
|
||||||
texture->texture_info.size_2d.logical_height) {
|
|
||||||
// Exact match.
|
|
||||||
return texture;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// No texture at this location. Make a new one.
|
// No texture at this location. Make a new one.
|
||||||
texture = AllocateTexture(texture_info);
|
texture = AllocateTexture(texture_info);
|
||||||
|
texture->is_full_texture = false;
|
||||||
resolve_textures_.push_back(std::unique_ptr<Texture>(texture));
|
resolve_textures_.push_back(std::unique_ptr<Texture>(texture));
|
||||||
return texture;
|
return texture;
|
||||||
}
|
}
|
||||||
|
|
||||||
TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
|
TextureCache::Texture* TextureCache::Demand(
|
||||||
VkCommandBuffer command_buffer) {
|
const TextureInfo& texture_info, VkCommandBuffer command_buffer,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence) {
|
||||||
// Run a tight loop to scan for an exact match existing texture.
|
// Run a tight loop to scan for an exact match existing texture.
|
||||||
auto texture_hash = texture_info.hash();
|
auto texture_hash = texture_info.hash();
|
||||||
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
|
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
|
||||||
|
@ -285,9 +255,13 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
|
||||||
texture_info.size_2d.logical_height ==
|
texture_info.size_2d.logical_height ==
|
||||||
texture->texture_info.size_2d.logical_height) {
|
texture->texture_info.size_2d.logical_height) {
|
||||||
// Exact match.
|
// Exact match.
|
||||||
// TODO: Lazy match
|
// TODO: Lazy match (at an offset)
|
||||||
|
// Upgrade this texture to a full texture.
|
||||||
|
texture->is_full_texture = true;
|
||||||
texture->texture_info = texture_info;
|
texture->texture_info = texture_info;
|
||||||
textures_[texture_hash] = std::move(*it);
|
textures_[texture_hash] = std::move(*it);
|
||||||
|
it = resolve_textures_.erase(it);
|
||||||
|
return textures_[texture_hash].get();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -305,7 +279,21 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!UploadTexture2D(command_buffer, texture, texture_info)) {
|
bool uploaded = false;
|
||||||
|
switch (texture_info.dimension) {
|
||||||
|
case Dimension::k2D: {
|
||||||
|
uploaded = UploadTexture2D(command_buffer, completion_fence, texture,
|
||||||
|
texture_info);
|
||||||
|
} break;
|
||||||
|
default:
|
||||||
|
assert_unhandled_case(texture_info.dimension);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Okay. Now that the texture is uploaded from system memory, put a writewatch
|
||||||
|
// on it to tell us if it's been modified from the guest.
|
||||||
|
|
||||||
|
if (!uploaded) {
|
||||||
// TODO: Destroy the texture.
|
// TODO: Destroy the texture.
|
||||||
assert_always();
|
assert_always();
|
||||||
return nullptr;
|
return nullptr;
|
||||||
|
@ -314,6 +302,7 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
|
||||||
// Though we didn't find an exact match, that doesn't mean we're out of the
|
// Though we didn't find an exact match, that doesn't mean we're out of the
|
||||||
// woods yet. This texture could either be a portion of another texture or
|
// woods yet. This texture could either be a portion of another texture or
|
||||||
// vice versa. Copy any overlapping textures into this texture.
|
// vice versa. Copy any overlapping textures into this texture.
|
||||||
|
// TODO: Byte count -> pixel count (on x and y axes)
|
||||||
for (auto it = textures_.begin(); it != textures_.end(); ++it) {
|
for (auto it = textures_.begin(); it != textures_.end(); ++it) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -322,6 +311,67 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
|
||||||
return texture;
|
return texture;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TextureCache::TextureView* TextureCache::DemandView(Texture* texture,
|
||||||
|
uint16_t swizzle) {
|
||||||
|
for (auto it = texture->views.begin(); it != texture->views.end(); ++it) {
|
||||||
|
if ((*it)->swizzle == swizzle) {
|
||||||
|
return (*it).get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
VkImageViewCreateInfo view_info;
|
||||||
|
view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
|
||||||
|
view_info.pNext = nullptr;
|
||||||
|
view_info.flags = 0;
|
||||||
|
view_info.image = texture->image;
|
||||||
|
view_info.format = texture->format;
|
||||||
|
|
||||||
|
switch (texture->texture_info.dimension) {
|
||||||
|
case Dimension::k1D:
|
||||||
|
view_info.viewType = VK_IMAGE_VIEW_TYPE_1D;
|
||||||
|
break;
|
||||||
|
case Dimension::k2D:
|
||||||
|
view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
|
||||||
|
break;
|
||||||
|
case Dimension::k3D:
|
||||||
|
view_info.viewType = VK_IMAGE_VIEW_TYPE_3D;
|
||||||
|
break;
|
||||||
|
case Dimension::kCube:
|
||||||
|
view_info.viewType = VK_IMAGE_VIEW_TYPE_CUBE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert_always();
|
||||||
|
}
|
||||||
|
|
||||||
|
VkComponentSwizzle swiz_component_map[] = {
|
||||||
|
VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G,
|
||||||
|
VK_COMPONENT_SWIZZLE_B, VK_COMPONENT_SWIZZLE_A,
|
||||||
|
VK_COMPONENT_SWIZZLE_ONE, VK_COMPONENT_SWIZZLE_ZERO,
|
||||||
|
VK_COMPONENT_SWIZZLE_IDENTITY,
|
||||||
|
};
|
||||||
|
|
||||||
|
view_info.components = {
|
||||||
|
swiz_component_map[(swizzle >> 0) & 0x7],
|
||||||
|
swiz_component_map[(swizzle >> 3) & 0x7],
|
||||||
|
swiz_component_map[(swizzle >> 6) & 0x7],
|
||||||
|
swiz_component_map[(swizzle >> 9) & 0x7],
|
||||||
|
};
|
||||||
|
view_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
|
||||||
|
VkImageView view;
|
||||||
|
auto status = vkCreateImageView(*device_, &view_info, nullptr, &view);
|
||||||
|
CheckResult(status, "vkCreateImageView");
|
||||||
|
if (status == VK_SUCCESS) {
|
||||||
|
auto texture_view = new TextureView();
|
||||||
|
texture_view->texture = texture;
|
||||||
|
texture_view->view = view;
|
||||||
|
texture_view->swizzle = swizzle;
|
||||||
|
texture->views.push_back(std::unique_ptr<TextureView>(texture_view));
|
||||||
|
return texture_view;
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
TextureCache::Sampler* TextureCache::Demand(const SamplerInfo& sampler_info) {
|
TextureCache::Sampler* TextureCache::Demand(const SamplerInfo& sampler_info) {
|
||||||
auto sampler_hash = sampler_info.hash();
|
auto sampler_hash = sampler_info.hash();
|
||||||
for (auto it = samplers_.find(sampler_hash); it != samplers_.end(); ++it) {
|
for (auto it = samplers_.find(sampler_hash); it != samplers_.end(); ++it) {
|
||||||
|
@ -339,12 +389,28 @@ TextureCache::Sampler* TextureCache::Demand(const SamplerInfo& sampler_info) {
|
||||||
sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
|
sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
|
||||||
sampler_create_info.pNext = nullptr;
|
sampler_create_info.pNext = nullptr;
|
||||||
sampler_create_info.flags = 0;
|
sampler_create_info.flags = 0;
|
||||||
sampler_create_info.magFilter = VK_FILTER_NEAREST;
|
|
||||||
sampler_create_info.minFilter = VK_FILTER_NEAREST;
|
sampler_create_info.minFilter = VK_FILTER_NEAREST;
|
||||||
|
sampler_create_info.magFilter = VK_FILTER_NEAREST;
|
||||||
sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
|
sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
|
||||||
sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
|
|
||||||
sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
|
// FIXME: Both halfway / mirror clamp to border aren't mapped properly.
|
||||||
sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
|
VkSamplerAddressMode address_mode_map[] = {
|
||||||
|
/* kRepeat */ VK_SAMPLER_ADDRESS_MODE_REPEAT,
|
||||||
|
/* kMirroredRepeat */ VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT,
|
||||||
|
/* kClampToEdge */ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
|
||||||
|
/* kMirrorClampToEdge */ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,
|
||||||
|
/* kClampToHalfway */ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE,
|
||||||
|
/* kMirrorClampToHalfway */ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,
|
||||||
|
/* kClampToBorder */ VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,
|
||||||
|
/* kMirrorClampToBorder */ VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE,
|
||||||
|
};
|
||||||
|
sampler_create_info.addressModeU =
|
||||||
|
address_mode_map[static_cast<int>(sampler_info.clamp_u)];
|
||||||
|
sampler_create_info.addressModeV =
|
||||||
|
address_mode_map[static_cast<int>(sampler_info.clamp_v)];
|
||||||
|
sampler_create_info.addressModeW =
|
||||||
|
address_mode_map[static_cast<int>(sampler_info.clamp_w)];
|
||||||
|
|
||||||
sampler_create_info.mipLodBias = 0.0f;
|
sampler_create_info.mipLodBias = 0.0f;
|
||||||
sampler_create_info.anisotropyEnable = VK_FALSE;
|
sampler_create_info.anisotropyEnable = VK_FALSE;
|
||||||
sampler_create_info.maxAnisotropy = 1.0f;
|
sampler_create_info.maxAnisotropy = 1.0f;
|
||||||
|
@ -375,6 +441,22 @@ TextureCache::Texture* TextureCache::LookupAddress(
|
||||||
TextureFormat format, uint32_t* offset_x, uint32_t* offset_y) {
|
TextureFormat format, uint32_t* offset_x, uint32_t* offset_y) {
|
||||||
for (auto it = textures_.begin(); it != textures_.end(); ++it) {
|
for (auto it = textures_.begin(); it != textures_.end(); ++it) {
|
||||||
const auto& texture_info = it->second->texture_info;
|
const auto& texture_info = it->second->texture_info;
|
||||||
|
if (guest_address >= texture_info.guest_address &&
|
||||||
|
guest_address <
|
||||||
|
texture_info.guest_address + texture_info.input_length &&
|
||||||
|
offset_x && offset_y) {
|
||||||
|
auto offset_bytes = guest_address - texture_info.guest_address;
|
||||||
|
|
||||||
|
if (texture_info.dimension == Dimension::k2D) {
|
||||||
|
*offset_y = offset_bytes / texture_info.size_2d.input_pitch;
|
||||||
|
if (offset_bytes % texture_info.size_2d.input_pitch != 0) {
|
||||||
|
// TODO: offset_x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return it->second.get();
|
||||||
|
}
|
||||||
|
|
||||||
if (texture_info.guest_address == guest_address &&
|
if (texture_info.guest_address == guest_address &&
|
||||||
texture_info.dimension == Dimension::k2D &&
|
texture_info.dimension == Dimension::k2D &&
|
||||||
texture_info.size_2d.input_width == width &&
|
texture_info.size_2d.input_width == width &&
|
||||||
|
@ -383,20 +465,86 @@ TextureCache::Texture* TextureCache::LookupAddress(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Try to match at an offset.
|
// Check resolve textures
|
||||||
|
for (auto it = resolve_textures_.begin(); it != resolve_textures_.end();
|
||||||
|
++it) {
|
||||||
|
const auto& texture_info = (*it)->texture_info;
|
||||||
|
if (guest_address >= texture_info.guest_address &&
|
||||||
|
guest_address <
|
||||||
|
texture_info.guest_address + texture_info.input_length &&
|
||||||
|
offset_x && offset_y) {
|
||||||
|
auto offset_bytes = guest_address - texture_info.guest_address;
|
||||||
|
|
||||||
|
if (texture_info.dimension == Dimension::k2D) {
|
||||||
|
*offset_y = offset_bytes / texture_info.size_2d.input_pitch;
|
||||||
|
if (offset_bytes % texture_info.size_2d.input_pitch != 0) {
|
||||||
|
// TODO: offset_x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return (*it).get();
|
||||||
|
}
|
||||||
|
|
||||||
|
if (texture_info.guest_address == guest_address &&
|
||||||
|
texture_info.dimension == Dimension::k2D &&
|
||||||
|
texture_info.size_2d.input_width == width &&
|
||||||
|
texture_info.size_2d.input_height == height) {
|
||||||
|
return (*it).get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TextureCache::UploadTexture2D(VkCommandBuffer command_buffer,
|
void TextureSwap(Endian endianness, void* dest, const void* src,
|
||||||
Texture* dest, TextureInfo src) {
|
size_t length) {
|
||||||
// TODO: We need to allocate memory to use as a staging buffer. We can then
|
switch (endianness) {
|
||||||
// raw copy the texture from system memory into the staging buffer and use a
|
case Endian::k8in16:
|
||||||
// shader to convert the texture into a format consumable by the host GPU.
|
xe::copy_and_swap_16_aligned(dest, src, length / 2);
|
||||||
|
break;
|
||||||
|
case Endian::k8in32:
|
||||||
|
xe::copy_and_swap_32_aligned(dest, src, length / 4);
|
||||||
|
break;
|
||||||
|
case Endian::k16in32: // Swap high and low 16 bits within a 32 bit word
|
||||||
|
xe::copy_and_swap_16_in_32_aligned(dest, src, length);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
case Endian::kUnspecified:
|
||||||
|
std::memcpy(dest, src, length);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool TextureCache::UploadTexture2D(
|
||||||
|
VkCommandBuffer command_buffer,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence, Texture* dest,
|
||||||
|
TextureInfo src) {
|
||||||
|
SCOPE_profile_cpu_f("gpu");
|
||||||
|
assert_true(src.dimension == Dimension::k2D);
|
||||||
|
|
||||||
|
if (!staging_buffer_.CanAcquire(src.input_length)) {
|
||||||
// Need to have unique memory for every upload for at least one frame. If we
|
// Need to have unique memory for every upload for at least one frame. If we
|
||||||
// run out of memory, we need to flush all queued upload commands to the GPU.
|
// run out of memory, we need to flush all queued upload commands to the
|
||||||
|
// GPU.
|
||||||
|
// TODO: Actually flush commands.
|
||||||
|
assert_always();
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: Upload memory here.
|
// Grab some temporary memory for staging.
|
||||||
|
auto alloc = staging_buffer_.Acquire(src.input_length, completion_fence);
|
||||||
|
assert_not_null(alloc);
|
||||||
|
|
||||||
|
// TODO: Support these cases.
|
||||||
|
// assert_false(src.is_tiled);
|
||||||
|
// assert_false(src.is_compressed());
|
||||||
|
|
||||||
|
// Upload texture into GPU memory.
|
||||||
|
// TODO: If the GPU supports it, we can submit a compute batch to convert the
|
||||||
|
// texture and copy it to its destination. Otherwise, fallback to conversion
|
||||||
|
// on the CPU.
|
||||||
|
auto guest_ptr = memory_->TranslatePhysical(src.guest_address);
|
||||||
|
TextureSwap(src.endianness, alloc->host_ptr, guest_ptr, src.input_length);
|
||||||
|
staging_buffer_.Flush(alloc);
|
||||||
|
|
||||||
// Insert a memory barrier into the command buffer to ensure the upload has
|
// Insert a memory barrier into the command buffer to ensure the upload has
|
||||||
// finished before we copy it into the destination texture.
|
// finished before we copy it into the destination texture.
|
||||||
|
@ -407,9 +555,9 @@ bool TextureCache::UploadTexture2D(VkCommandBuffer command_buffer,
|
||||||
VK_ACCESS_TRANSFER_READ_BIT,
|
VK_ACCESS_TRANSFER_READ_BIT,
|
||||||
VK_QUEUE_FAMILY_IGNORED,
|
VK_QUEUE_FAMILY_IGNORED,
|
||||||
VK_QUEUE_FAMILY_IGNORED,
|
VK_QUEUE_FAMILY_IGNORED,
|
||||||
staging_buffer_,
|
staging_buffer_.gpu_buffer(),
|
||||||
0,
|
alloc->offset,
|
||||||
2048 * 2048 * 4,
|
alloc->aligned_length,
|
||||||
};
|
};
|
||||||
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
vkCmdPipelineBarrier(command_buffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 1,
|
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 1,
|
||||||
|
@ -432,18 +580,24 @@ bool TextureCache::UploadTexture2D(VkCommandBuffer command_buffer,
|
||||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
|
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, 0, 0, nullptr, 0,
|
||||||
nullptr, 1, &barrier);
|
nullptr, 1, &barrier);
|
||||||
|
|
||||||
|
assert_true(src.size_2d.input_width >=
|
||||||
|
dest->texture_info.size_2d.output_width);
|
||||||
|
assert_true(src.size_2d.input_height >=
|
||||||
|
dest->texture_info.size_2d.output_height);
|
||||||
|
|
||||||
// For now, just transfer the grid we uploaded earlier into the texture.
|
// For now, just transfer the grid we uploaded earlier into the texture.
|
||||||
VkBufferImageCopy copy_region;
|
VkBufferImageCopy copy_region;
|
||||||
copy_region.bufferOffset = 0;
|
copy_region.bufferOffset = alloc->offset;
|
||||||
copy_region.bufferRowLength = 2048;
|
copy_region.bufferRowLength = src.size_2d.input_width;
|
||||||
copy_region.bufferImageHeight = 2048;
|
copy_region.bufferImageHeight = src.size_2d.input_height;
|
||||||
copy_region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
|
copy_region.imageSubresource = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1};
|
||||||
copy_region.imageOffset = {0, 0, 0};
|
copy_region.imageOffset = {0, 0, 0};
|
||||||
copy_region.imageExtent = {dest->texture_info.width + 1,
|
copy_region.imageExtent = {dest->texture_info.size_2d.output_width + 1,
|
||||||
dest->texture_info.height + 1,
|
dest->texture_info.size_2d.output_height + 1,
|
||||||
dest->texture_info.depth + 1};
|
dest->texture_info.depth + 1};
|
||||||
vkCmdCopyBufferToImage(command_buffer, staging_buffer_, dest->image,
|
vkCmdCopyBufferToImage(command_buffer, staging_buffer_.gpu_buffer(),
|
||||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ©_region);
|
dest->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1,
|
||||||
|
©_region);
|
||||||
|
|
||||||
// Now transition the texture into a shader readonly source.
|
// Now transition the texture into a shader readonly source.
|
||||||
barrier.srcAccessMask = barrier.dstAccessMask;
|
barrier.srcAccessMask = barrier.dstAccessMask;
|
||||||
|
@ -460,6 +614,7 @@ bool TextureCache::UploadTexture2D(VkCommandBuffer command_buffer,
|
||||||
|
|
||||||
VkDescriptorSet TextureCache::PrepareTextureSet(
|
VkDescriptorSet TextureCache::PrepareTextureSet(
|
||||||
VkCommandBuffer command_buffer,
|
VkCommandBuffer command_buffer,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
const std::vector<Shader::TextureBinding>& vertex_bindings,
|
const std::vector<Shader::TextureBinding>& vertex_bindings,
|
||||||
const std::vector<Shader::TextureBinding>& pixel_bindings) {
|
const std::vector<Shader::TextureBinding>& pixel_bindings) {
|
||||||
// Clear state.
|
// Clear state.
|
||||||
|
@ -476,11 +631,11 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
|
||||||
// This does things lazily and de-dupes fetch constants reused in both
|
// This does things lazily and de-dupes fetch constants reused in both
|
||||||
// shaders.
|
// shaders.
|
||||||
bool any_failed = false;
|
bool any_failed = false;
|
||||||
any_failed =
|
any_failed = !SetupTextureBindings(command_buffer, completion_fence,
|
||||||
!SetupTextureBindings(update_set_info, vertex_bindings, command_buffer) ||
|
update_set_info, vertex_bindings) ||
|
||||||
any_failed;
|
any_failed;
|
||||||
any_failed =
|
any_failed = !SetupTextureBindings(command_buffer, completion_fence,
|
||||||
!SetupTextureBindings(update_set_info, pixel_bindings, command_buffer) ||
|
update_set_info, pixel_bindings) ||
|
||||||
any_failed;
|
any_failed;
|
||||||
if (any_failed) {
|
if (any_failed) {
|
||||||
XELOGW("Failed to setup one or more texture bindings");
|
XELOGW("Failed to setup one or more texture bindings");
|
||||||
|
@ -518,6 +673,7 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
|
||||||
sampler_write.pImageInfo = update_set_info->sampler_infos;
|
sampler_write.pImageInfo = update_set_info->sampler_infos;
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
// FIXME: These are not be lined up properly with tf binding points!!!!!
|
||||||
if (update_set_info->image_1d_write_count) {
|
if (update_set_info->image_1d_write_count) {
|
||||||
auto& image_write = descriptor_writes[descriptor_write_count++];
|
auto& image_write = descriptor_writes[descriptor_write_count++];
|
||||||
image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
||||||
|
@ -567,20 +723,22 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
|
||||||
0, nullptr);
|
0, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
in_flight_sets_.push_back({descriptor_set, completion_fence});
|
||||||
return descriptor_set;
|
return descriptor_set;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TextureCache::SetupTextureBindings(
|
bool TextureCache::SetupTextureBindings(
|
||||||
|
VkCommandBuffer command_buffer,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
UpdateSetInfo* update_set_info,
|
UpdateSetInfo* update_set_info,
|
||||||
const std::vector<Shader::TextureBinding>& bindings,
|
const std::vector<Shader::TextureBinding>& bindings) {
|
||||||
VkCommandBuffer command_buffer) {
|
|
||||||
bool any_failed = false;
|
bool any_failed = false;
|
||||||
for (auto& binding : bindings) {
|
for (auto& binding : bindings) {
|
||||||
uint32_t fetch_bit = 1 << binding.fetch_constant;
|
uint32_t fetch_bit = 1 << binding.fetch_constant;
|
||||||
if ((update_set_info->has_setup_fetch_mask & fetch_bit) == 0) {
|
if ((update_set_info->has_setup_fetch_mask & fetch_bit) == 0) {
|
||||||
// Needs setup.
|
// Needs setup.
|
||||||
any_failed =
|
any_failed = !SetupTextureBinding(command_buffer, completion_fence,
|
||||||
!SetupTextureBinding(update_set_info, binding, command_buffer) ||
|
update_set_info, binding) ||
|
||||||
any_failed;
|
any_failed;
|
||||||
update_set_info->has_setup_fetch_mask |= fetch_bit;
|
update_set_info->has_setup_fetch_mask |= fetch_bit;
|
||||||
}
|
}
|
||||||
|
@ -588,9 +746,10 @@ bool TextureCache::SetupTextureBindings(
|
||||||
return !any_failed;
|
return !any_failed;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TextureCache::SetupTextureBinding(UpdateSetInfo* update_set_info,
|
bool TextureCache::SetupTextureBinding(
|
||||||
const Shader::TextureBinding& binding,
|
VkCommandBuffer command_buffer,
|
||||||
VkCommandBuffer command_buffer) {
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
|
UpdateSetInfo* update_set_info, const Shader::TextureBinding& binding) {
|
||||||
auto& regs = *register_file_;
|
auto& regs = *register_file_;
|
||||||
int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 + binding.fetch_constant * 6;
|
int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 + binding.fetch_constant * 6;
|
||||||
auto group =
|
auto group =
|
||||||
|
@ -615,18 +774,48 @@ bool TextureCache::SetupTextureBinding(UpdateSetInfo* update_set_info,
|
||||||
return false; // invalid texture used
|
return false; // invalid texture used
|
||||||
}
|
}
|
||||||
|
|
||||||
auto texture = Demand(texture_info, command_buffer);
|
auto texture = Demand(texture_info, command_buffer, completion_fence);
|
||||||
auto sampler = Demand(sampler_info);
|
auto sampler = Demand(sampler_info);
|
||||||
assert_true(texture != nullptr && sampler != nullptr);
|
assert_true(texture != nullptr && sampler != nullptr);
|
||||||
|
if (texture == nullptr || sampler == nullptr) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
uint16_t swizzle = static_cast<uint16_t>(fetch.swizzle);
|
||||||
|
auto view = DemandView(texture, swizzle);
|
||||||
|
|
||||||
trace_writer_->WriteMemoryRead(texture_info.guest_address,
|
trace_writer_->WriteMemoryRead(texture_info.guest_address,
|
||||||
texture_info.input_length);
|
texture_info.input_length);
|
||||||
|
|
||||||
auto& image_write =
|
VkDescriptorImageInfo* image_write = nullptr;
|
||||||
update_set_info->image_2d_infos[update_set_info->image_2d_write_count++];
|
switch (texture_info.dimension) {
|
||||||
image_write.imageView = texture->views[0]->view;
|
case Dimension::k1D:
|
||||||
image_write.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
image_write =
|
||||||
image_write.sampler = sampler->sampler;
|
&update_set_info
|
||||||
|
->image_1d_infos[update_set_info->image_1d_write_count++];
|
||||||
|
break;
|
||||||
|
case Dimension::k2D:
|
||||||
|
image_write =
|
||||||
|
&update_set_info
|
||||||
|
->image_2d_infos[update_set_info->image_2d_write_count++];
|
||||||
|
break;
|
||||||
|
case Dimension::k3D:
|
||||||
|
image_write =
|
||||||
|
&update_set_info
|
||||||
|
->image_3d_infos[update_set_info->image_3d_write_count++];
|
||||||
|
break;
|
||||||
|
case Dimension::kCube:
|
||||||
|
image_write =
|
||||||
|
&update_set_info
|
||||||
|
->image_cube_infos[update_set_info->image_cube_write_count++];
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
assert_unhandled_case(texture_info.dimension);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
image_write->imageView = view->view;
|
||||||
|
image_write->imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||||
|
image_write->sampler = sampler->sampler;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -635,6 +824,22 @@ void TextureCache::ClearCache() {
|
||||||
// TODO(benvanik): caching.
|
// TODO(benvanik): caching.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void TextureCache::Scavenge() {
|
||||||
|
// Free unused descriptor sets
|
||||||
|
for (auto it = in_flight_sets_.begin(); it != in_flight_sets_.end();) {
|
||||||
|
if (vkGetFenceStatus(*device_, *it->second) == VK_SUCCESS) {
|
||||||
|
// We can free this one.
|
||||||
|
vkFreeDescriptorSets(*device_, descriptor_pool_, 1, &it->first);
|
||||||
|
it = in_flight_sets_.erase(it);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
++it;
|
||||||
|
}
|
||||||
|
|
||||||
|
staging_buffer_.Scavenge();
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace vulkan
|
} // namespace vulkan
|
||||||
} // namespace gpu
|
} // namespace gpu
|
||||||
} // namespace xe
|
} // namespace xe
|
||||||
|
|
|
@ -17,7 +17,9 @@
|
||||||
#include "xenia/gpu/shader.h"
|
#include "xenia/gpu/shader.h"
|
||||||
#include "xenia/gpu/texture_info.h"
|
#include "xenia/gpu/texture_info.h"
|
||||||
#include "xenia/gpu/trace_writer.h"
|
#include "xenia/gpu/trace_writer.h"
|
||||||
|
#include "xenia/gpu/vulkan/vulkan_command_processor.h"
|
||||||
#include "xenia/gpu/xenos.h"
|
#include "xenia/gpu/xenos.h"
|
||||||
|
#include "xenia/ui/vulkan/circular_buffer.h"
|
||||||
#include "xenia/ui/vulkan/vulkan.h"
|
#include "xenia/ui/vulkan/vulkan.h"
|
||||||
#include "xenia/ui/vulkan/vulkan_device.h"
|
#include "xenia/ui/vulkan/vulkan_device.h"
|
||||||
|
|
||||||
|
@ -38,22 +40,38 @@ class TextureCache {
|
||||||
// True if we know all info about this texture, false otherwise.
|
// True if we know all info about this texture, false otherwise.
|
||||||
// (e.g. we resolve to system memory and may not know the full details about
|
// (e.g. we resolve to system memory and may not know the full details about
|
||||||
// this texture)
|
// this texture)
|
||||||
bool full_texture;
|
bool is_full_texture;
|
||||||
VkFormat format;
|
VkFormat format;
|
||||||
VkImage image;
|
VkImage image;
|
||||||
VkImageLayout image_layout;
|
VkImageLayout image_layout;
|
||||||
VkDeviceMemory image_memory;
|
VkDeviceMemory image_memory;
|
||||||
VkDeviceSize memory_offset;
|
VkDeviceSize memory_offset;
|
||||||
VkDeviceSize memory_size;
|
VkDeviceSize memory_size;
|
||||||
|
|
||||||
|
uintptr_t access_watch_handle;
|
||||||
|
bool pending_invalidation;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct TextureView {
|
struct TextureView {
|
||||||
Texture* texture;
|
Texture* texture;
|
||||||
VkImageView view;
|
VkImageView view;
|
||||||
|
|
||||||
|
union {
|
||||||
|
struct {
|
||||||
|
// FIXME: This only applies on little-endian platforms!
|
||||||
|
uint16_t swiz_x : 3;
|
||||||
|
uint16_t swiz_y : 3;
|
||||||
|
uint16_t swiz_z : 3;
|
||||||
|
uint16_t swiz_w : 3;
|
||||||
|
uint16_t : 4;
|
||||||
};
|
};
|
||||||
|
|
||||||
TextureCache(RegisterFile* register_file, TraceWriter* trace_writer,
|
uint16_t swizzle;
|
||||||
ui::vulkan::VulkanDevice* device);
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
TextureCache(Memory* memory, RegisterFile* register_file,
|
||||||
|
TraceWriter* trace_writer, ui::vulkan::VulkanDevice* device);
|
||||||
~TextureCache();
|
~TextureCache();
|
||||||
|
|
||||||
// Descriptor set layout containing all possible texture bindings.
|
// Descriptor set layout containing all possible texture bindings.
|
||||||
|
@ -64,8 +82,11 @@ class TextureCache {
|
||||||
|
|
||||||
// Prepares a descriptor set containing the samplers and images for all
|
// Prepares a descriptor set containing the samplers and images for all
|
||||||
// bindings. The textures will be uploaded/converted/etc as needed.
|
// bindings. The textures will be uploaded/converted/etc as needed.
|
||||||
|
// Requires a fence to be provided that will be signaled when finished
|
||||||
|
// using the returned descriptor set.
|
||||||
VkDescriptorSet PrepareTextureSet(
|
VkDescriptorSet PrepareTextureSet(
|
||||||
VkCommandBuffer command_buffer,
|
VkCommandBuffer setup_command_buffer,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
const std::vector<Shader::TextureBinding>& vertex_bindings,
|
const std::vector<Shader::TextureBinding>& vertex_bindings,
|
||||||
const std::vector<Shader::TextureBinding>& pixel_bindings);
|
const std::vector<Shader::TextureBinding>& pixel_bindings);
|
||||||
|
|
||||||
|
@ -73,6 +94,16 @@ class TextureCache {
|
||||||
// TODO(benvanik): Resolve.
|
// TODO(benvanik): Resolve.
|
||||||
// TODO(benvanik): ReadTexture.
|
// TODO(benvanik): ReadTexture.
|
||||||
|
|
||||||
|
// Looks for a texture either containing or matching these parameters.
|
||||||
|
// Caller is responsible for checking if the texture returned is an exact
|
||||||
|
// match or just contains the texture given by the parameters.
|
||||||
|
// If offset_x and offset_y are not null, this may return a texture that
|
||||||
|
// contains this address at an offset.
|
||||||
|
Texture* LookupAddress(uint32_t guest_address, uint32_t width,
|
||||||
|
uint32_t height, TextureFormat format,
|
||||||
|
uint32_t* offset_x = nullptr,
|
||||||
|
uint32_t* offset_y = nullptr);
|
||||||
|
|
||||||
// Demands a texture for the purpose of resolving from EDRAM. This either
|
// Demands a texture for the purpose of resolving from EDRAM. This either
|
||||||
// creates a new texture or returns a previously created texture. texture_info
|
// creates a new texture or returns a previously created texture. texture_info
|
||||||
// is not required to be completely filled out, just guest_address and size.
|
// is not required to be completely filled out, just guest_address and size.
|
||||||
|
@ -89,6 +120,9 @@ class TextureCache {
|
||||||
// Clears all cached content.
|
// Clears all cached content.
|
||||||
void ClearCache();
|
void ClearCache();
|
||||||
|
|
||||||
|
// Frees any unused resources
|
||||||
|
void Scavenge();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
struct UpdateSetInfo;
|
struct UpdateSetInfo;
|
||||||
|
|
||||||
|
@ -104,31 +138,30 @@ class TextureCache {
|
||||||
|
|
||||||
// Demands a texture. If command_buffer is null and the texture hasn't been
|
// Demands a texture. If command_buffer is null and the texture hasn't been
|
||||||
// uploaded to graphics memory already, we will return null and bail.
|
// uploaded to graphics memory already, we will return null and bail.
|
||||||
Texture* Demand(const TextureInfo& texture_info,
|
Texture* Demand(
|
||||||
VkCommandBuffer command_buffer = nullptr);
|
const TextureInfo& texture_info, VkCommandBuffer command_buffer = nullptr,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence = nullptr);
|
||||||
|
TextureView* DemandView(Texture* texture, uint16_t swizzle);
|
||||||
Sampler* Demand(const SamplerInfo& sampler_info);
|
Sampler* Demand(const SamplerInfo& sampler_info);
|
||||||
|
|
||||||
// Looks for a texture either containing or matching these parameters.
|
|
||||||
// Caller is responsible for checking if the texture returned is an exact
|
|
||||||
// match or just contains the texture given by the parameters.
|
|
||||||
// If offset_x and offset_y are not null, this may return a texture that
|
|
||||||
// contains this image at an offset.
|
|
||||||
Texture* LookupAddress(uint32_t guest_address, uint32_t width,
|
|
||||||
uint32_t height, TextureFormat format,
|
|
||||||
uint32_t* offset_x, uint32_t* offset_y);
|
|
||||||
|
|
||||||
// Queues commands to upload a texture from system memory, applying any
|
// Queues commands to upload a texture from system memory, applying any
|
||||||
// conversions necessary. This may flush the command buffer to the GPU if we
|
// conversions necessary. This may flush the command buffer to the GPU if we
|
||||||
// run out of staging memory.
|
// run out of staging memory.
|
||||||
bool UploadTexture2D(VkCommandBuffer command_buffer, Texture* dest,
|
bool UploadTexture2D(VkCommandBuffer command_buffer,
|
||||||
TextureInfo src);
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
|
Texture* dest, TextureInfo src);
|
||||||
|
|
||||||
bool SetupTextureBindings(UpdateSetInfo* update_set_info,
|
bool SetupTextureBindings(
|
||||||
const std::vector<Shader::TextureBinding>& bindings,
|
VkCommandBuffer command_buffer,
|
||||||
VkCommandBuffer command_buffer = nullptr);
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
bool SetupTextureBinding(UpdateSetInfo* update_set_info,
|
UpdateSetInfo* update_set_info,
|
||||||
const Shader::TextureBinding& binding,
|
const std::vector<Shader::TextureBinding>& bindings);
|
||||||
VkCommandBuffer command_buffer = nullptr);
|
bool SetupTextureBinding(VkCommandBuffer command_buffer,
|
||||||
|
std::shared_ptr<ui::vulkan::Fence> completion_fence,
|
||||||
|
UpdateSetInfo* update_set_info,
|
||||||
|
const Shader::TextureBinding& binding);
|
||||||
|
|
||||||
|
Memory* memory_ = nullptr;
|
||||||
|
|
||||||
RegisterFile* register_file_ = nullptr;
|
RegisterFile* register_file_ = nullptr;
|
||||||
TraceWriter* trace_writer_ = nullptr;
|
TraceWriter* trace_writer_ = nullptr;
|
||||||
|
@ -136,10 +169,11 @@ class TextureCache {
|
||||||
|
|
||||||
VkDescriptorPool descriptor_pool_ = nullptr;
|
VkDescriptorPool descriptor_pool_ = nullptr;
|
||||||
VkDescriptorSetLayout texture_descriptor_set_layout_ = nullptr;
|
VkDescriptorSetLayout texture_descriptor_set_layout_ = nullptr;
|
||||||
|
std::vector<std::pair<VkDescriptorSet, std::shared_ptr<ui::vulkan::Fence>>>
|
||||||
|
in_flight_sets_;
|
||||||
|
|
||||||
// Temporary until we have circular buffers.
|
// Temporary until we have circular buffers.
|
||||||
VkBuffer staging_buffer_ = nullptr;
|
ui::vulkan::CircularBuffer staging_buffer_;
|
||||||
VkDeviceMemory staging_buffer_mem_ = nullptr;
|
|
||||||
std::unordered_map<uint64_t, std::unique_ptr<Texture>> textures_;
|
std::unordered_map<uint64_t, std::unique_ptr<Texture>> textures_;
|
||||||
std::unordered_map<uint64_t, std::unique_ptr<Sampler>> samplers_;
|
std::unordered_map<uint64_t, std::unique_ptr<Sampler>> samplers_;
|
||||||
std::vector<std::unique_ptr<Texture>> resolve_textures_;
|
std::vector<std::unique_ptr<Texture>> resolve_textures_;
|
||||||
|
|
Loading…
Reference in New Issue