diff --git a/src/xenia/gpu/vulkan/texture_cache.cc b/src/xenia/gpu/vulkan/texture_cache.cc index 2a67f1727..4e93a46ca 100644 --- a/src/xenia/gpu/vulkan/texture_cache.cc +++ b/src/xenia/gpu/vulkan/texture_cache.cc @@ -14,6 +14,8 @@ #include "xenia/base/memory.h" #include "xenia/base/profiling.h" #include "xenia/gpu/gpu_flags.h" +#include "xenia/gpu/sampler_info.h" +#include "xenia/gpu/texture_info.h" #include "xenia/gpu/vulkan/vulkan_gpu_flags.h" namespace xe { @@ -25,8 +27,11 @@ using xe::ui::vulkan::CheckResult; constexpr uint32_t kMaxTextureSamplers = 32; TextureCache::TextureCache(RegisterFile* register_file, + TraceWriter* trace_writer, ui::vulkan::VulkanDevice* device) - : register_file_(register_file), device_(*device) { + : register_file_(register_file), + trace_writer_(trace_writer), + device_(device) { // Descriptor pool used for all of our cached descriptors. VkDescriptorPoolCreateInfo descriptor_pool_info; descriptor_pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; @@ -41,13 +46,13 @@ TextureCache::TextureCache(RegisterFile* register_file, pool_sizes[1].descriptorCount = 32; descriptor_pool_info.poolSizeCount = 2; descriptor_pool_info.pPoolSizes = pool_sizes; - auto err = vkCreateDescriptorPool(device_, &descriptor_pool_info, nullptr, + auto err = vkCreateDescriptorPool(*device_, &descriptor_pool_info, nullptr, &descriptor_pool_); CheckResult(err, "vkCreateDescriptorPool"); // Create the descriptor set layout used for rendering. // We always have the same number of samplers but only some are used. - VkDescriptorSetLayoutBinding bindings[2]; + VkDescriptorSetLayoutBinding bindings[5]; auto& sampler_binding = bindings[0]; sampler_binding.binding = 0; sampler_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; @@ -55,13 +60,15 @@ TextureCache::TextureCache(RegisterFile* register_file, sampler_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; sampler_binding.pImmutableSamplers = nullptr; - auto& texture_binding = bindings[1]; - texture_binding.binding = 1; - texture_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; - texture_binding.descriptorCount = kMaxTextureSamplers; - texture_binding.stageFlags = - VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; - texture_binding.pImmutableSamplers = nullptr; + for (int i = 0; i < 4; ++i) { + auto& texture_binding = bindings[1 + i]; + texture_binding.binding = 1 + i; + texture_binding.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; + texture_binding.descriptorCount = kMaxTextureSamplers; + texture_binding.stageFlags = + VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; + texture_binding.pImmutableSamplers = nullptr; + } VkDescriptorSetLayoutCreateInfo descriptor_set_layout_info; descriptor_set_layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; @@ -70,15 +77,274 @@ TextureCache::TextureCache(RegisterFile* register_file, descriptor_set_layout_info.bindingCount = static_cast(xe::countof(bindings)); descriptor_set_layout_info.pBindings = bindings; - err = vkCreateDescriptorSetLayout(device_, &descriptor_set_layout_info, + err = vkCreateDescriptorSetLayout(*device_, &descriptor_set_layout_info, nullptr, &texture_descriptor_set_layout_); CheckResult(err, "vkCreateDescriptorSetLayout"); + + SetupGridImages(); } TextureCache::~TextureCache() { - vkDestroyDescriptorSetLayout(device_, texture_descriptor_set_layout_, + vkDestroyImageView(*device_, grid_image_2d_view_, nullptr); + vkDestroyImage(*device_, grid_image_2d_, nullptr); + vkFreeMemory(*device_, grid_image_2d_memory_, nullptr); + + vkDestroyDescriptorSetLayout(*device_, texture_descriptor_set_layout_, nullptr); - vkDestroyDescriptorPool(device_, descriptor_pool_, nullptr); + vkDestroyDescriptorPool(*device_, descriptor_pool_, nullptr); +} + +void TextureCache::SetupGridImages() { + VkImageCreateInfo image_info; + image_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO; + image_info.pNext = nullptr; + image_info.flags = 0; + image_info.imageType = VK_IMAGE_TYPE_2D; + image_info.format = VK_FORMAT_R8G8B8A8_UNORM; + image_info.extent = {8, 8, 1}; + image_info.mipLevels = 1; + image_info.arrayLayers = 1; + image_info.samples = VK_SAMPLE_COUNT_1_BIT; + image_info.tiling = VK_IMAGE_TILING_LINEAR; + image_info.usage = VK_IMAGE_USAGE_SAMPLED_BIT; + image_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; + image_info.queueFamilyIndexCount = 0; + image_info.pQueueFamilyIndices = nullptr; + image_info.initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + auto err = vkCreateImage(*device_, &image_info, nullptr, &grid_image_2d_); + CheckResult(err, "vkCreateImage"); + + VkMemoryRequirements memory_requirements; + vkGetImageMemoryRequirements(*device_, grid_image_2d_, &memory_requirements); + grid_image_2d_memory_ = device_->AllocateMemory( + memory_requirements, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT); + err = vkBindImageMemory(*device_, grid_image_2d_, grid_image_2d_memory_, 0); + CheckResult(err, "vkBindImageMemory"); + + VkImageViewCreateInfo view_info; + view_info.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO; + view_info.pNext = nullptr; + view_info.flags = 0; + view_info.image = grid_image_2d_; + view_info.viewType = VK_IMAGE_VIEW_TYPE_2D; + view_info.format = VK_FORMAT_R8G8B8A8_UNORM; + view_info.components = { + VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B, + VK_COMPONENT_SWIZZLE_A, + }; + view_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}; + err = vkCreateImageView(*device_, &view_info, nullptr, &grid_image_2d_view_); + CheckResult(err, "vkCreateImageView"); + + VkImageSubresource subresource; + subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; + subresource.mipLevel = 0; + subresource.arrayLayer = 0; + VkSubresourceLayout layout; + vkGetImageSubresourceLayout(*device_, grid_image_2d_, &subresource, &layout); + + void* gpu_data = nullptr; + err = vkMapMemory(*device_, grid_image_2d_memory_, 0, layout.size, 0, + &gpu_data); + CheckResult(err, "vkMapMemory"); + + uint32_t grid_pixels[8 * 8]; + for (int y = 0; y < 8; ++y) { + for (int x = 0; x < 8; ++x) { + grid_pixels[y * 8 + x] = + ((y % 2 == 0) ^ (x % 2 != 0)) ? 0xFFFFFFFF : 0xFF0000FF; + } + } + std::memcpy(gpu_data, grid_pixels, sizeof(grid_pixels)); + + vkUnmapMemory(*device_, grid_image_2d_memory_); +} + +VkDescriptorSet TextureCache::PrepareTextureSet( + VkCommandBuffer command_buffer, + const std::vector& vertex_bindings, + const std::vector& pixel_bindings) { + // Clear state. + auto update_set_info = &update_set_info_; + update_set_info->has_setup_fetch_mask = 0; + update_set_info->image_1d_write_count = 0; + update_set_info->image_2d_write_count = 0; + update_set_info->image_3d_write_count = 0; + update_set_info->image_cube_write_count = 0; + + std::memset(update_set_info, 0, sizeof(update_set_info_)); + + // Process vertex and pixel shader bindings. + // This does things lazily and de-dupes fetch constants reused in both + // shaders. + bool any_failed = false; + any_failed = + !SetupTextureBindings(update_set_info, vertex_bindings) || any_failed; + any_failed = + !SetupTextureBindings(update_set_info, pixel_bindings) || any_failed; + if (any_failed) { + XELOGW("Failed to setup one or more texture bindings"); + // TODO(benvanik): actually bail out here? + } + + // TODO(benvanik): reuse. + VkDescriptorSet descriptor_set = nullptr; + VkDescriptorSetAllocateInfo set_alloc_info; + set_alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; + set_alloc_info.pNext = nullptr; + set_alloc_info.descriptorPool = descriptor_pool_; + set_alloc_info.descriptorSetCount = 1; + set_alloc_info.pSetLayouts = &texture_descriptor_set_layout_; + auto err = + vkAllocateDescriptorSets(*device_, &set_alloc_info, &descriptor_set); + CheckResult(err, "vkAllocateDescriptorSets"); + + // Write all updated descriptors. + // TODO(benvanik): optimize? split into multiple sets? set per type? + VkWriteDescriptorSet descriptor_writes[4]; + std::memset(descriptor_writes, 0, sizeof(descriptor_writes)); + uint32_t descriptor_write_count = 0; + if (update_set_info->sampler_write_count) { + auto& sampler_write = descriptor_writes[descriptor_write_count++]; + sampler_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + sampler_write.pNext = nullptr; + sampler_write.dstSet = descriptor_set; + sampler_write.dstBinding = 0; + sampler_write.dstArrayElement = 0; + sampler_write.descriptorCount = update_set_info->sampler_write_count; + sampler_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; + sampler_write.pImageInfo = update_set_info->sampler_infos; + } + if (update_set_info->image_1d_write_count) { + auto& image_write = descriptor_writes[descriptor_write_count++]; + image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + image_write.pNext = nullptr; + image_write.dstSet = descriptor_set; + image_write.dstBinding = 1; + image_write.dstArrayElement = 0; + image_write.descriptorCount = update_set_info->image_1d_write_count; + image_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; + image_write.pImageInfo = update_set_info->image_1d_infos; + } + if (update_set_info->image_2d_write_count) { + auto& image_write = descriptor_writes[descriptor_write_count++]; + image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + image_write.pNext = nullptr; + image_write.dstSet = descriptor_set; + image_write.dstBinding = 2; + image_write.dstArrayElement = 0; + image_write.descriptorCount = update_set_info->image_2d_write_count; + image_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; + image_write.pImageInfo = update_set_info->image_2d_infos; + } + if (update_set_info->image_3d_write_count) { + auto& image_write = descriptor_writes[descriptor_write_count++]; + image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + image_write.pNext = nullptr; + image_write.dstSet = descriptor_set; + image_write.dstBinding = 3; + image_write.dstArrayElement = 0; + image_write.descriptorCount = update_set_info->image_3d_write_count; + image_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; + image_write.pImageInfo = update_set_info->image_3d_infos; + } + if (update_set_info->image_cube_write_count) { + auto& image_write = descriptor_writes[descriptor_write_count++]; + image_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + image_write.pNext = nullptr; + image_write.dstSet = descriptor_set; + image_write.dstBinding = 4; + image_write.dstArrayElement = 0; + image_write.descriptorCount = update_set_info->image_cube_write_count; + image_write.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; + image_write.pImageInfo = update_set_info->image_cube_infos; + } + if (descriptor_write_count) { + vkUpdateDescriptorSets(*device_, descriptor_write_count, descriptor_writes, + 0, nullptr); + } + + return descriptor_set; +} + +bool TextureCache::SetupTextureBindings( + UpdateSetInfo* update_set_info, + const std::vector& bindings) { + bool any_failed = false; + for (auto& binding : bindings) { + uint32_t fetch_bit = 1 << binding.fetch_constant; + if ((update_set_info->has_setup_fetch_mask & fetch_bit) == 0) { + // Needs setup. + any_failed = !SetupTextureBinding(update_set_info, binding) || any_failed; + update_set_info->has_setup_fetch_mask |= fetch_bit; + } + } + return !any_failed; +} + +bool TextureCache::SetupTextureBinding(UpdateSetInfo* update_set_info, + const Shader::TextureBinding& binding) { + auto& regs = *register_file_; + int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 + binding.fetch_constant * 6; + auto group = + reinterpret_cast(®s.values[r]); + auto& fetch = group->texture_fetch; + + // Disabled? + // TODO(benvanik): reset sampler. + if (!fetch.type) { + return true; + } + assert_true(fetch.type == 0x2); + + TextureInfo texture_info; + if (!TextureInfo::Prepare(fetch, &texture_info)) { + XELOGE("Unable to parse texture fetcher info"); + return false; // invalid texture used + } + SamplerInfo sampler_info; + if (!SamplerInfo::Prepare(fetch, binding.fetch_instr, &sampler_info)) { + XELOGE("Unable to parse sampler info"); + return false; // invalid texture used + } + + trace_writer_->WriteMemoryRead(texture_info.guest_address, + texture_info.input_length); + + // TODO(benvanik): reuse. + VkSamplerCreateInfo sampler_create_info; + sampler_create_info.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO; + sampler_create_info.pNext = nullptr; + sampler_create_info.flags = 0; + sampler_create_info.magFilter = VK_FILTER_NEAREST; + sampler_create_info.minFilter = VK_FILTER_NEAREST; + sampler_create_info.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST; + sampler_create_info.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT; + sampler_create_info.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT; + sampler_create_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT; + sampler_create_info.mipLodBias = 0.0f; + sampler_create_info.anisotropyEnable = VK_FALSE; + sampler_create_info.maxAnisotropy = 1.0f; + sampler_create_info.compareEnable = VK_FALSE; + sampler_create_info.compareOp = VK_COMPARE_OP_ALWAYS; + sampler_create_info.minLod = 0.0f; + sampler_create_info.maxLod = 0.0f; + sampler_create_info.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK; + sampler_create_info.unnormalizedCoordinates = VK_FALSE; + VkSampler sampler; + auto err = vkCreateSampler(*device_, &sampler_create_info, nullptr, &sampler); + CheckResult(err, "vkCreateSampler"); + + auto& sampler_write = + update_set_info->sampler_infos[update_set_info->sampler_write_count++]; + sampler_write.sampler = sampler; + + auto& image_write = + update_set_info->image_2d_infos[update_set_info->image_2d_write_count++]; + image_write.imageView = grid_image_2d_view_; + image_write.imageLayout = VK_IMAGE_LAYOUT_GENERAL; + + return true; } void TextureCache::ClearCache() { diff --git a/src/xenia/gpu/vulkan/texture_cache.h b/src/xenia/gpu/vulkan/texture_cache.h index 34ae6f114..9ba3f3577 100644 --- a/src/xenia/gpu/vulkan/texture_cache.h +++ b/src/xenia/gpu/vulkan/texture_cache.h @@ -12,6 +12,7 @@ #include "xenia/gpu/register_file.h" #include "xenia/gpu/shader.h" +#include "xenia/gpu/trace_writer.h" #include "xenia/gpu/xenos.h" #include "xenia/ui/vulkan/vulkan.h" #include "xenia/ui/vulkan/vulkan_device.h" @@ -23,7 +24,8 @@ namespace vulkan { // class TextureCache { public: - TextureCache(RegisterFile* register_file, ui::vulkan::VulkanDevice* device); + TextureCache(RegisterFile* register_file, TraceWriter* trace_writer, + ui::vulkan::VulkanDevice* device); ~TextureCache(); // Descriptor set layout containing all possible texture bindings. @@ -32,6 +34,13 @@ class TextureCache { return texture_descriptor_set_layout_; } + // Prepares a descriptor set containing the samplers and images for all + // bindings. The textures will be uploaded/converted/etc as needed. + VkDescriptorSet PrepareTextureSet( + VkCommandBuffer command_buffer, + const std::vector& vertex_bindings, + const std::vector& pixel_bindings); + // TODO(benvanik): UploadTexture. // TODO(benvanik): Resolve. // TODO(benvanik): ReadTexture. @@ -40,11 +49,42 @@ class TextureCache { void ClearCache(); private: + struct UpdateSetInfo; + + void SetupGridImages(); + + bool SetupTextureBindings( + UpdateSetInfo* update_set_info, + const std::vector& bindings); + bool SetupTextureBinding(UpdateSetInfo* update_set_info, + const Shader::TextureBinding& binding); + RegisterFile* register_file_ = nullptr; - VkDevice device_ = nullptr; + TraceWriter* trace_writer_ = nullptr; + ui::vulkan::VulkanDevice* device_ = nullptr; VkDescriptorPool descriptor_pool_ = nullptr; VkDescriptorSetLayout texture_descriptor_set_layout_ = nullptr; + + VkDeviceMemory grid_image_2d_memory_ = nullptr; + VkImage grid_image_2d_ = nullptr; + VkImageView grid_image_2d_view_ = nullptr; + + struct UpdateSetInfo { + // Bitmap of all 32 fetch constants and whether they have been setup yet. + // This prevents duplication across the vertex and pixel shader. + uint32_t has_setup_fetch_mask; + uint32_t sampler_write_count = 0; + VkDescriptorImageInfo sampler_infos[32]; + uint32_t image_1d_write_count = 0; + VkDescriptorImageInfo image_1d_infos[32]; + uint32_t image_2d_write_count = 0; + VkDescriptorImageInfo image_2d_infos[32]; + uint32_t image_3d_write_count = 0; + VkDescriptorImageInfo image_3d_infos[32]; + uint32_t image_cube_write_count = 0; + VkDescriptorImageInfo image_cube_infos[32]; + } update_set_info_; }; } // namespace vulkan diff --git a/src/xenia/gpu/vulkan/vulkan_command_processor.cc b/src/xenia/gpu/vulkan/vulkan_command_processor.cc index 723fa8d07..f04ec1ad3 100644 --- a/src/xenia/gpu/vulkan/vulkan_command_processor.cc +++ b/src/xenia/gpu/vulkan/vulkan_command_processor.cc @@ -69,7 +69,8 @@ bool VulkanCommandProcessor::SetupContext() { // Initialize the state machine caches. buffer_cache_ = std::make_unique(register_file_, device_, kDefaultBufferCacheCapacity); - texture_cache_ = std::make_unique(register_file_, device_); + texture_cache_ = + std::make_unique(register_file_, &trace_writer_, device_); pipeline_cache_ = std::make_unique( register_file_, device_, buffer_cache_->constant_descriptor_set_layout(), texture_cache_->texture_descriptor_set_layout()); @@ -472,68 +473,18 @@ bool VulkanCommandProcessor::PopulateSamplers(VkCommandBuffer command_buffer, SCOPE_profile_cpu_f("gpu"); #endif // FINE_GRAINED_DRAW_SCOPES - bool any_failed = false; - - // VS and PS samplers are shared, but may be used exclusively. - // We walk each and setup lazily. - bool has_setup_sampler[32] = {false}; - - // Vertex texture samplers. - for (auto& texture_binding : vertex_shader->texture_bindings()) { - if (has_setup_sampler[texture_binding.fetch_constant]) { - continue; - } - has_setup_sampler[texture_binding.fetch_constant] = true; - any_failed = - !PopulateSampler(command_buffer, texture_binding) || any_failed; + auto descriptor_set = texture_cache_->PrepareTextureSet( + command_buffer, vertex_shader->texture_bindings(), + pixel_shader->texture_bindings()); + if (!descriptor_set) { + // Unable to bind set. + return false; } - // Pixel shader texture sampler. - for (auto& texture_binding : pixel_shader->texture_bindings()) { - if (has_setup_sampler[texture_binding.fetch_constant]) { - continue; - } - has_setup_sampler[texture_binding.fetch_constant] = true; - any_failed = - !PopulateSampler(command_buffer, texture_binding) || any_failed; - } - - return !any_failed; -} - -bool VulkanCommandProcessor::PopulateSampler( - VkCommandBuffer command_buffer, - const Shader::TextureBinding& texture_binding) { - auto& regs = *register_file_; - int r = XE_GPU_REG_SHADER_CONSTANT_FETCH_00_0 + - texture_binding.fetch_constant * 6; - auto group = reinterpret_cast(®s.values[r]); - auto& fetch = group->texture_fetch; - - // Disabled? - // TODO(benvanik): reset sampler. - if (!fetch.type) { - return true; - } - assert_true(fetch.type == 0x2); - - TextureInfo texture_info; - if (!TextureInfo::Prepare(fetch, &texture_info)) { - XELOGE("Unable to parse texture fetcher info"); - return true; // invalid texture used - } - SamplerInfo sampler_info; - if (!SamplerInfo::Prepare(fetch, texture_binding.fetch_instr, - &sampler_info)) { - XELOGE("Unable to parse sampler info"); - return true; // invalid texture used - } - - trace_writer_.WriteMemoryRead(texture_info.guest_address, - texture_info.input_length); - - // TODO(benvanik): texture cache lookup. - // TODO(benvanik): bind or return so PopulateSamplers can batch. + // Bind samplers/textures. + vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, + pipeline_cache_->pipeline_layout(), 1, 1, + &descriptor_set, 0, nullptr); return true; } diff --git a/src/xenia/gpu/vulkan/vulkan_command_processor.h b/src/xenia/gpu/vulkan/vulkan_command_processor.h index 179c31a73..43aec9edd 100644 --- a/src/xenia/gpu/vulkan/vulkan_command_processor.h +++ b/src/xenia/gpu/vulkan/vulkan_command_processor.h @@ -76,8 +76,6 @@ class VulkanCommandProcessor : public CommandProcessor { bool PopulateSamplers(VkCommandBuffer command_buffer, VulkanShader* vertex_shader, VulkanShader* pixel_shader); - bool PopulateSampler(VkCommandBuffer command_buffer, - const Shader::TextureBinding& texture_binding); bool IssueCopy() override; xe::ui::vulkan::VulkanDevice* device_ = nullptr; diff --git a/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc b/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc index aa9c84c72..23dffd6c6 100644 --- a/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc +++ b/src/xenia/ui/vulkan/vulkan_immediate_drawer.cc @@ -278,7 +278,8 @@ VulkanImmediateDrawer::VulkanImmediateDrawer(VulkanContext* graphics_context) sampler_info.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE; sampler_info.mipLodBias = 0.0f; sampler_info.anisotropyEnable = VK_FALSE; - sampler_info.maxAnisotropy = 1; + sampler_info.maxAnisotropy = 1.0f; + sampler_info.compareEnable = VK_FALSE; sampler_info.compareOp = VK_COMPARE_OP_NEVER; sampler_info.minLod = 0.0f; sampler_info.maxLod = 0.0f;