[Legacy Vulkan] Add Vulkan prefix to Pipeline/TextureCache to avoid future name collisions

This commit is contained in:
Triang3l 2022-05-11 21:21:33 +03:00
parent e6fb9883d2
commit af3158f1bf
6 changed files with 125 additions and 112 deletions

View File

@ -95,8 +95,8 @@ bool VulkanCommandProcessor::SetupContext() {
return false;
}
texture_cache_ = std::make_unique<TextureCache>(memory_, register_file_,
&trace_writer_, provider);
texture_cache_ = std::make_unique<VulkanTextureCache>(
memory_, register_file_, &trace_writer_, provider);
status = texture_cache_->Initialize();
if (status != VK_SUCCESS) {
XELOGE("Unable to initialize texture cache");
@ -104,7 +104,8 @@ bool VulkanCommandProcessor::SetupContext() {
return false;
}
pipeline_cache_ = std::make_unique<PipelineCache>(register_file_, provider);
pipeline_cache_ =
std::make_unique<VulkanPipelineCache>(register_file_, provider);
status = pipeline_cache_->Initialize(
buffer_cache_->constant_descriptor_set_layout(),
texture_cache_->texture_descriptor_set_layout(),
@ -714,9 +715,9 @@ bool VulkanCommandProcessor::IssueDraw(xenos::PrimitiveType primitive_type,
auto pipeline_status = pipeline_cache_->ConfigurePipeline(
command_buffer, current_render_state_, vertex_shader, pixel_shader,
primitive_type, &pipeline);
if (pipeline_status == PipelineCache::UpdateStatus::kError) {
if (pipeline_status == VulkanPipelineCache::UpdateStatus::kError) {
return false;
} else if (pipeline_status == PipelineCache::UpdateStatus::kMismatch ||
} else if (pipeline_status == VulkanPipelineCache::UpdateStatus::kMismatch ||
full_update) {
dfn.vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline);

View File

@ -25,11 +25,11 @@
#include "xenia/gpu/command_processor.h"
#include "xenia/gpu/register_file.h"
#include "xenia/gpu/vulkan/buffer_cache.h"
#include "xenia/gpu/vulkan/pipeline_cache.h"
#include "xenia/gpu/vulkan/render_cache.h"
#include "xenia/gpu/vulkan/texture_cache.h"
#include "xenia/gpu/vulkan/vulkan_graphics_system.h"
#include "xenia/gpu/vulkan/vulkan_pipeline_cache.h"
#include "xenia/gpu/vulkan/vulkan_shader.h"
#include "xenia/gpu/vulkan/vulkan_texture_cache.h"
#include "xenia/gpu/xenos.h"
#include "xenia/kernel/xthread.h"
#include "xenia/memory.h"
@ -43,7 +43,7 @@ namespace xe {
namespace gpu {
namespace vulkan {
class TextureCache;
class VulkanTextureCache;
class VulkanCommandProcessor : public CommandProcessor {
public:
@ -114,9 +114,9 @@ class VulkanCommandProcessor : public CommandProcessor {
bool cache_clear_requested_ = false;
std::unique_ptr<BufferCache> buffer_cache_;
std::unique_ptr<PipelineCache> pipeline_cache_;
std::unique_ptr<VulkanPipelineCache> pipeline_cache_;
std::unique_ptr<RenderCache> render_cache_;
std::unique_ptr<TextureCache> texture_cache_;
std::unique_ptr<VulkanTextureCache> texture_cache_;
std::unique_ptr<ui::vulkan::Blitter> blitter_;
std::unique_ptr<ui::vulkan::CommandBufferPool> command_buffer_pool_;

View File

@ -7,7 +7,7 @@
******************************************************************************
*/
#include "xenia/gpu/vulkan/pipeline_cache.h"
#include "xenia/gpu/vulkan/vulkan_pipeline_cache.h"
#include "xenia/base/logging.h"
#include "xenia/base/math.h"
@ -36,15 +36,15 @@ namespace shaders {
#include "xenia/gpu/vulkan/shaders/bytecode/vulkan_spirv/rect_list_gs.h"
} // namespace shaders
PipelineCache::PipelineCache(RegisterFile* register_file,
const ui::vulkan::VulkanProvider& provider)
VulkanPipelineCache::VulkanPipelineCache(
RegisterFile* register_file, const ui::vulkan::VulkanProvider& provider)
: register_file_(register_file), provider_(provider) {
shader_translator_.reset(new SpirvShaderTranslator());
}
PipelineCache::~PipelineCache() { Shutdown(); }
VulkanPipelineCache::~VulkanPipelineCache() { Shutdown(); }
VkResult PipelineCache::Initialize(
VkResult VulkanPipelineCache::Initialize(
VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout,
VkDescriptorSetLayout vertex_descriptor_set_layout) {
@ -170,7 +170,7 @@ VkResult PipelineCache::Initialize(
return VK_SUCCESS;
}
void PipelineCache::Shutdown() {
void VulkanPipelineCache::Shutdown() {
ClearCache();
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
@ -209,10 +209,10 @@ void PipelineCache::Shutdown() {
}
}
VulkanShader* PipelineCache::LoadShader(xenos::ShaderType shader_type,
uint32_t guest_address,
const uint32_t* host_address,
uint32_t dword_count) {
VulkanShader* VulkanPipelineCache::LoadShader(xenos::ShaderType shader_type,
uint32_t guest_address,
const uint32_t* host_address,
uint32_t dword_count) {
// Hash the input memory and lookup the shader.
uint64_t data_hash =
XXH3_64bits(host_address, dword_count * sizeof(uint32_t));
@ -232,7 +232,7 @@ VulkanShader* PipelineCache::LoadShader(xenos::ShaderType shader_type,
return shader;
}
PipelineCache::UpdateStatus PipelineCache::ConfigurePipeline(
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::ConfigurePipeline(
VkCommandBuffer command_buffer, const RenderState* render_state,
VulkanShader* vertex_shader, VulkanShader* pixel_shader,
xenos::PrimitiveType primitive_type, VkPipeline* pipeline_out) {
@ -279,7 +279,7 @@ PipelineCache::UpdateStatus PipelineCache::ConfigurePipeline(
return update_status;
}
void PipelineCache::ClearCache() {
void VulkanPipelineCache::ClearCache() {
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
VkDevice device = provider_.device();
// Destroy all pipelines.
@ -296,8 +296,8 @@ void PipelineCache::ClearCache() {
shader_map_.clear();
}
VkPipeline PipelineCache::GetPipeline(const RenderState* render_state,
uint64_t hash_key) {
VkPipeline VulkanPipelineCache::GetPipeline(const RenderState* render_state,
uint64_t hash_key) {
// Lookup the pipeline in the cache.
auto it = cached_pipelines_.find(hash_key);
if (it != cached_pipelines_.end()) {
@ -374,7 +374,7 @@ VkPipeline PipelineCache::GetPipeline(const RenderState* render_state,
return pipeline;
}
bool PipelineCache::TranslateShader(
bool VulkanPipelineCache::TranslateShader(
VulkanShader::VulkanTranslation& translation) {
translation.shader().AnalyzeUcode(ucode_disasm_buffer_);
// Perform translation.
@ -425,7 +425,7 @@ static void DumpShaderStatisticsAMD(const VkShaderStatisticsInfoAMD& stats) {
XELOGI("numAvailableSgprs: {}", stats.numAvailableSgprs);
}
void PipelineCache::DumpShaderDisasmAMD(VkPipeline pipeline) {
void VulkanPipelineCache::DumpShaderDisasmAMD(VkPipeline pipeline) {
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
VkDevice device = provider_.device();
VkResult status = VK_SUCCESS;
@ -455,7 +455,7 @@ void PipelineCache::DumpShaderDisasmAMD(VkPipeline pipeline) {
// TODO(DrChat): Eventually dump the disasm...
}
void PipelineCache::DumpShaderDisasmNV(
void VulkanPipelineCache::DumpShaderDisasmNV(
const VkGraphicsPipelineCreateInfo& pipeline_info) {
// !! HACK !!: This only works on NVidia drivers. Dumps shader disasm.
// This code is super ugly. Update this when NVidia includes an official
@ -548,7 +548,7 @@ void PipelineCache::DumpShaderDisasmNV(
dfn.vkDestroyPipelineCache(device, dummy_pipeline_cache, nullptr);
}
VkShaderModule PipelineCache::GetGeometryShader(
VkShaderModule VulkanPipelineCache::GetGeometryShader(
xenos::PrimitiveType primitive_type, bool is_line_mode) {
switch (primitive_type) {
case xenos::PrimitiveType::kLineList:
@ -583,8 +583,8 @@ VkShaderModule PipelineCache::GetGeometryShader(
}
}
bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
bool full_update) {
bool VulkanPipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
bool full_update) {
#if FINE_GRAINED_DRAW_SCOPES
SCOPE_profile_cpu_f("gpu");
#endif // FINE_GRAINED_DRAW_SCOPES
@ -938,7 +938,8 @@ bool PipelineCache::SetDynamicState(VkCommandBuffer command_buffer,
return true;
}
bool PipelineCache::SetShadowRegister(uint32_t* dest, uint32_t register_name) {
bool VulkanPipelineCache::SetShadowRegister(uint32_t* dest,
uint32_t register_name) {
uint32_t value = register_file_->values[register_name].u32;
if (*dest == value) {
return false;
@ -947,7 +948,8 @@ bool PipelineCache::SetShadowRegister(uint32_t* dest, uint32_t register_name) {
return true;
}
bool PipelineCache::SetShadowRegister(float* dest, uint32_t register_name) {
bool VulkanPipelineCache::SetShadowRegister(float* dest,
uint32_t register_name) {
float value = register_file_->values[register_name].f32;
if (*dest == value) {
return false;
@ -956,8 +958,8 @@ bool PipelineCache::SetShadowRegister(float* dest, uint32_t register_name) {
return true;
}
bool PipelineCache::SetShadowRegisterArray(uint32_t* dest, uint32_t num,
uint32_t register_name) {
bool VulkanPipelineCache::SetShadowRegisterArray(uint32_t* dest, uint32_t num,
uint32_t register_name) {
bool dirty = false;
for (uint32_t i = 0; i < num; i++) {
uint32_t value = register_file_->values[register_name + i].u32;
@ -972,7 +974,7 @@ bool PipelineCache::SetShadowRegisterArray(uint32_t* dest, uint32_t num,
return dirty;
}
PipelineCache::UpdateStatus PipelineCache::UpdateState(
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateState(
VulkanShader* vertex_shader, VulkanShader* pixel_shader,
xenos::PrimitiveType primitive_type) {
bool mismatch = false;
@ -1014,7 +1016,8 @@ PipelineCache::UpdateStatus PipelineCache::UpdateState(
return mismatch ? UpdateStatus::kMismatch : UpdateStatus::kCompatible;
}
PipelineCache::UpdateStatus PipelineCache::UpdateRenderTargetState() {
VulkanPipelineCache::UpdateStatus
VulkanPipelineCache::UpdateRenderTargetState() {
auto& regs = update_render_targets_regs_;
bool dirty = false;
@ -1053,7 +1056,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateRenderTargetState() {
return UpdateStatus::kMismatch;
}
PipelineCache::UpdateStatus PipelineCache::UpdateShaderStages(
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateShaderStages(
VulkanShader* vertex_shader, VulkanShader* pixel_shader,
xenos::PrimitiveType primitive_type) {
auto& regs = update_shader_stages_regs_;
@ -1159,7 +1162,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateShaderStages(
return UpdateStatus::kMismatch;
}
PipelineCache::UpdateStatus PipelineCache::UpdateVertexInputState(
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateVertexInputState(
VulkanShader* vertex_shader) {
auto& regs = update_vertex_input_state_regs_;
auto& state_info = update_vertex_input_state_info_;
@ -1184,7 +1187,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateVertexInputState(
return UpdateStatus::kCompatible;
}
PipelineCache::UpdateStatus PipelineCache::UpdateInputAssemblyState(
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateInputAssemblyState(
xenos::PrimitiveType primitive_type) {
auto& regs = update_input_assembly_state_regs_;
auto& state_info = update_input_assembly_state_info_;
@ -1260,7 +1263,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateInputAssemblyState(
return UpdateStatus::kMismatch;
}
PipelineCache::UpdateStatus PipelineCache::UpdateViewportState() {
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateViewportState() {
auto& state_info = update_viewport_state_info_;
state_info.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
@ -1277,7 +1280,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateViewportState() {
return UpdateStatus::kCompatible;
}
PipelineCache::UpdateStatus PipelineCache::UpdateRasterizationState(
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateRasterizationState(
xenos::PrimitiveType primitive_type) {
auto& regs = update_rasterization_state_regs_;
auto& state_info = update_rasterization_state_info_;
@ -1395,7 +1398,8 @@ PipelineCache::UpdateStatus PipelineCache::UpdateRasterizationState(
return UpdateStatus::kMismatch;
}
PipelineCache::UpdateStatus PipelineCache::UpdateMultisampleState() {
VulkanPipelineCache::UpdateStatus
VulkanPipelineCache::UpdateMultisampleState() {
auto& regs = update_multisample_state_regs_;
auto& state_info = update_multisample_state_info_;
@ -1448,7 +1452,8 @@ PipelineCache::UpdateStatus PipelineCache::UpdateMultisampleState() {
return UpdateStatus::kMismatch;
}
PipelineCache::UpdateStatus PipelineCache::UpdateDepthStencilState() {
VulkanPipelineCache::UpdateStatus
VulkanPipelineCache::UpdateDepthStencilState() {
auto& regs = update_depth_stencil_state_regs_;
auto& state_info = update_depth_stencil_state_info_;
@ -1530,7 +1535,7 @@ PipelineCache::UpdateStatus PipelineCache::UpdateDepthStencilState() {
return UpdateStatus::kMismatch;
}
PipelineCache::UpdateStatus PipelineCache::UpdateColorBlendState() {
VulkanPipelineCache::UpdateStatus VulkanPipelineCache::UpdateColorBlendState() {
auto& regs = update_color_blend_state_regs_;
auto& state_info = update_color_blend_state_info_;

View File

@ -7,8 +7,8 @@
******************************************************************************
*/
#ifndef XENIA_GPU_VULKAN_PIPELINE_CACHE_H_
#define XENIA_GPU_VULKAN_PIPELINE_CACHE_H_
#ifndef XENIA_GPU_VULKAN_VULKAN_PIPELINE_CACHE_H_
#define XENIA_GPU_VULKAN_VULKAN_PIPELINE_CACHE_H_
#include <unordered_map>
@ -29,7 +29,7 @@ namespace vulkan {
// Configures and caches pipelines based on render state.
// This is responsible for properly setting all state required for a draw
// including shaders, various blend/etc options, and input configuration.
class PipelineCache {
class VulkanPipelineCache {
public:
enum class UpdateStatus {
kCompatible,
@ -37,9 +37,9 @@ class PipelineCache {
kError,
};
PipelineCache(RegisterFile* register_file,
const ui::vulkan::VulkanProvider& provider);
~PipelineCache();
VulkanPipelineCache(RegisterFile* register_file,
const ui::vulkan::VulkanProvider& provider);
~VulkanPipelineCache();
VkResult Initialize(VkDescriptorSetLayout uniform_descriptor_set_layout,
VkDescriptorSetLayout texture_descriptor_set_layout,
@ -310,4 +310,4 @@ class PipelineCache {
} // namespace gpu
} // namespace xe
#endif // XENIA_GPU_VULKAN_PIPELINE_CACHE_H_
#endif // XENIA_GPU_VULKAN_VULKAN_PIPELINE_CACHE_H_

View File

@ -7,7 +7,7 @@
******************************************************************************
*/
#include "xenia/gpu/vulkan/texture_cache.h"
#include "xenia/gpu/vulkan/vulkan_texture_cache.h"
#include <algorithm>
@ -56,9 +56,10 @@ const char* get_dimension_name(xenos::DataDimension dimension) {
return "unknown";
}
TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
TraceWriter* trace_writer,
ui::vulkan::VulkanProvider& provider)
VulkanTextureCache::VulkanTextureCache(Memory* memory,
RegisterFile* register_file,
TraceWriter* trace_writer,
ui::vulkan::VulkanProvider& provider)
: memory_(memory),
register_file_(register_file),
trace_writer_(trace_writer),
@ -68,9 +69,9 @@ TextureCache::TextureCache(Memory* memory, RegisterFile* register_file,
wb_staging_buffer_(provider, VK_BUFFER_USAGE_TRANSFER_DST_BIT,
kStagingBufferSize) {}
TextureCache::~TextureCache() { Shutdown(); }
VulkanTextureCache::~VulkanTextureCache() { Shutdown(); }
VkResult TextureCache::Initialize() {
VkResult VulkanTextureCache::Initialize() {
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
VkDevice device = provider_.device();
VkResult status = VK_SUCCESS;
@ -164,7 +165,7 @@ VkResult TextureCache::Initialize() {
return VK_SUCCESS;
}
void TextureCache::Shutdown() {
void VulkanTextureCache::Shutdown() {
if (memory_invalidation_callback_handle_ != nullptr) {
memory_->UnregisterPhysicalMemoryInvalidationCallback(
memory_invalidation_callback_handle_);
@ -185,7 +186,7 @@ void TextureCache::Shutdown() {
nullptr);
}
TextureCache::Texture* TextureCache::AllocateTexture(
VulkanTextureCache::Texture* VulkanTextureCache::AllocateTexture(
const TextureInfo& texture_info, VkFormatFeatureFlags required_flags) {
auto format_info = texture_info.format_info();
assert_not_null(format_info);
@ -313,7 +314,7 @@ TextureCache::Texture* TextureCache::AllocateTexture(
return texture;
}
bool TextureCache::FreeTexture(Texture* texture) {
bool VulkanTextureCache::FreeTexture(Texture* texture) {
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
VkDevice device = provider_.device();
@ -354,7 +355,7 @@ bool TextureCache::FreeTexture(Texture* texture) {
return true;
}
void TextureCache::WatchTexture(Texture* texture) {
void VulkanTextureCache::WatchTexture(Texture* texture) {
uint32_t address, size;
{
@ -424,7 +425,7 @@ void TextureCache::WatchTexture(Texture* texture) {
memory_->EnablePhysicalMemoryAccessCallbacks(address, size, true, false);
}
void TextureCache::TextureTouched(Texture* texture) {
void VulkanTextureCache::TextureTouched(Texture* texture) {
if (texture->pending_invalidation) {
return;
}
@ -438,7 +439,7 @@ void TextureCache::TextureTouched(Texture* texture) {
texture->pending_invalidation = true;
}
std::pair<uint32_t, uint32_t> TextureCache::MemoryInvalidationCallback(
std::pair<uint32_t, uint32_t> VulkanTextureCache::MemoryInvalidationCallback(
uint32_t physical_address_start, uint32_t length, bool exact_range) {
global_critical_region_.Acquire();
if (watched_textures_.empty()) {
@ -478,14 +479,15 @@ std::pair<uint32_t, uint32_t> TextureCache::MemoryInvalidationCallback(
return std::make_pair(previous_end, next_start - previous_end);
}
std::pair<uint32_t, uint32_t> TextureCache::MemoryInvalidationCallbackThunk(
std::pair<uint32_t, uint32_t>
VulkanTextureCache::MemoryInvalidationCallbackThunk(
void* context_ptr, uint32_t physical_address_start, uint32_t length,
bool exact_range) {
return reinterpret_cast<TextureCache*>(context_ptr)
return reinterpret_cast<VulkanTextureCache*>(context_ptr)
->MemoryInvalidationCallback(physical_address_start, length, exact_range);
}
TextureCache::Texture* TextureCache::DemandResolveTexture(
VulkanTextureCache::Texture* VulkanTextureCache::DemandResolveTexture(
const TextureInfo& texture_info) {
auto texture_hash = texture_info.hash();
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
@ -544,9 +546,9 @@ TextureCache::Texture* TextureCache::DemandResolveTexture(
return texture;
}
TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
VkCommandBuffer command_buffer,
VkFence completion_fence) {
VulkanTextureCache::Texture* VulkanTextureCache::Demand(
const TextureInfo& texture_info, VkCommandBuffer command_buffer,
VkFence completion_fence) {
// Run a tight loop to scan for an exact match existing texture.
auto texture_hash = texture_info.hash();
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
@ -629,8 +631,8 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
return texture;
}
TextureCache::TextureView* TextureCache::DemandView(Texture* texture,
uint16_t swizzle) {
VulkanTextureCache::TextureView* VulkanTextureCache::DemandView(
Texture* texture, uint16_t swizzle) {
for (auto it = texture->views.begin(); it != texture->views.end(); ++it) {
if ((*it)->swizzle == swizzle) {
return (*it).get();
@ -717,7 +719,8 @@ TextureCache::TextureView* TextureCache::DemandView(Texture* texture,
return nullptr;
}
TextureCache::Sampler* TextureCache::Demand(const SamplerInfo& sampler_info) {
VulkanTextureCache::Sampler* VulkanTextureCache::Demand(
const SamplerInfo& sampler_info) {
#if FINE_GRAINED_DRAW_SCOPES
SCOPE_profile_cpu_f("gpu");
#endif // FINE_GRAINED_DRAW_SCOPES
@ -875,7 +878,8 @@ bool TextureFormatIsSimilar(xenos::TextureFormat left,
#undef COMPARE_FORMAT
}
TextureCache::Texture* TextureCache::Lookup(const TextureInfo& texture_info) {
VulkanTextureCache::Texture* VulkanTextureCache::Lookup(
const TextureInfo& texture_info) {
auto texture_hash = texture_info.hash();
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
if (it->second->texture_info == texture_info) {
@ -918,11 +922,9 @@ TextureCache::Texture* TextureCache::Lookup(const TextureInfo& texture_info) {
return nullptr;
}
TextureCache::Texture* TextureCache::LookupAddress(uint32_t guest_address,
uint32_t width,
uint32_t height,
xenos::TextureFormat format,
VkOffset2D* out_offset) {
VulkanTextureCache::Texture* VulkanTextureCache::LookupAddress(
uint32_t guest_address, uint32_t width, uint32_t height,
xenos::TextureFormat format, VkOffset2D* out_offset) {
for (auto it = textures_.begin(); it != textures_.end(); ++it) {
const auto& texture_info = it->second->texture_info;
if (guest_address >= texture_info.memory.base_address &&
@ -958,8 +960,8 @@ TextureCache::Texture* TextureCache::LookupAddress(uint32_t guest_address,
return nullptr;
}
void TextureCache::FlushPendingCommands(VkCommandBuffer command_buffer,
VkFence completion_fence) {
void VulkanTextureCache::FlushPendingCommands(VkCommandBuffer command_buffer,
VkFence completion_fence) {
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
VkDevice device = provider_.device();
auto status = dfn.vkEndCommandBuffer(command_buffer);
@ -992,8 +994,9 @@ void TextureCache::FlushPendingCommands(VkCommandBuffer command_buffer,
dfn.vkBeginCommandBuffer(command_buffer, &begin_info);
}
bool TextureCache::ConvertTexture(uint8_t* dest, VkBufferImageCopy* copy_region,
uint32_t mip, const TextureInfo& src) {
bool VulkanTextureCache::ConvertTexture(uint8_t* dest,
VkBufferImageCopy* copy_region,
uint32_t mip, const TextureInfo& src) {
#if FINE_GRAINED_DRAW_SCOPES
SCOPE_profile_cpu_f("gpu");
#endif // FINE_GRAINED_DRAW_SCOPES
@ -1064,9 +1067,9 @@ bool TextureCache::ConvertTexture(uint8_t* dest, VkBufferImageCopy* copy_region,
return true;
}
bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
VkFence completion_fence, Texture* dest,
const TextureInfo& src) {
bool VulkanTextureCache::UploadTexture(VkCommandBuffer command_buffer,
VkFence completion_fence, Texture* dest,
const TextureInfo& src) {
#if FINE_GRAINED_DRAW_SCOPES
SCOPE_profile_cpu_f("gpu");
#endif // FINE_GRAINED_DRAW_SCOPES
@ -1103,7 +1106,8 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
if (!staging_buffer_.CanAcquire(unpack_length)) {
// The staging buffer isn't big enough to hold this texture.
XELOGE(
"TextureCache staging buffer is too small! (uploading 0x{:X} bytes)",
"VulkanTextureCache staging buffer is too small! (uploading 0x{:X} "
"bytes)",
unpack_length);
assert_always();
return false;
@ -1226,7 +1230,8 @@ bool TextureCache::UploadTexture(VkCommandBuffer command_buffer,
return true;
}
const FormatInfo* TextureCache::GetFormatInfo(xenos::TextureFormat format) {
const FormatInfo* VulkanTextureCache::GetFormatInfo(
xenos::TextureFormat format) {
switch (format) {
case xenos::TextureFormat::k_CTX1:
return FormatInfo::Get(xenos::TextureFormat::k_8_8);
@ -1237,7 +1242,7 @@ const FormatInfo* TextureCache::GetFormatInfo(xenos::TextureFormat format) {
}
}
texture_conversion::CopyBlockCallback TextureCache::GetFormatCopyBlock(
texture_conversion::CopyBlockCallback VulkanTextureCache::GetFormatCopyBlock(
xenos::TextureFormat format) {
switch (format) {
case xenos::TextureFormat::k_CTX1:
@ -1249,7 +1254,8 @@ texture_conversion::CopyBlockCallback TextureCache::GetFormatCopyBlock(
}
}
TextureExtent TextureCache::GetMipExtent(const TextureInfo& src, uint32_t mip) {
TextureExtent VulkanTextureCache::GetMipExtent(const TextureInfo& src,
uint32_t mip) {
auto format_info = GetFormatInfo(src.format);
uint32_t width = src.width + 1;
uint32_t height = src.height + 1;
@ -1267,9 +1273,9 @@ TextureExtent TextureCache::GetMipExtent(const TextureInfo& src, uint32_t mip) {
return extent;
}
uint32_t TextureCache::ComputeMipStorage(const FormatInfo* format_info,
uint32_t width, uint32_t height,
uint32_t depth, uint32_t mip) {
uint32_t VulkanTextureCache::ComputeMipStorage(const FormatInfo* format_info,
uint32_t width, uint32_t height,
uint32_t depth, uint32_t mip) {
assert_not_null(format_info);
TextureExtent extent;
if (mip == 0) {
@ -1285,14 +1291,15 @@ uint32_t TextureCache::ComputeMipStorage(const FormatInfo* format_info,
return extent.all_blocks() * bytes_per_block;
}
uint32_t TextureCache::ComputeMipStorage(const TextureInfo& src, uint32_t mip) {
uint32_t VulkanTextureCache::ComputeMipStorage(const TextureInfo& src,
uint32_t mip) {
uint32_t size = ComputeMipStorage(GetFormatInfo(src.format), src.width + 1,
src.height + 1, src.depth + 1, mip);
// ensure 4-byte alignment
return (size + 3) & (~3u);
}
uint32_t TextureCache::ComputeTextureStorage(const TextureInfo& src) {
uint32_t VulkanTextureCache::ComputeTextureStorage(const TextureInfo& src) {
auto format_info = GetFormatInfo(src.format);
uint32_t width = src.width + 1;
uint32_t height = src.height + 1;
@ -1309,7 +1316,7 @@ uint32_t TextureCache::ComputeTextureStorage(const TextureInfo& src) {
return length;
}
void TextureCache::WritebackTexture(Texture* texture) {
void VulkanTextureCache::WritebackTexture(Texture* texture) {
const ui::vulkan::VulkanProvider::DeviceFunctions& dfn = provider_.dfn();
VkResult status = VK_SUCCESS;
VkFence fence = wb_command_pool_->BeginBatch();
@ -1391,7 +1398,7 @@ void TextureCache::WritebackTexture(Texture* texture) {
wb_staging_buffer_.Scavenge();
}
void TextureCache::HashTextureBindings(
void VulkanTextureCache::HashTextureBindings(
XXH3_state_t* hash_state, uint32_t& fetch_mask,
const std::vector<Shader::TextureBinding>& bindings) {
for (auto& binding : bindings) {
@ -1412,7 +1419,7 @@ void TextureCache::HashTextureBindings(
}
}
VkDescriptorSet TextureCache::PrepareTextureSet(
VkDescriptorSet VulkanTextureCache::PrepareTextureSet(
VkCommandBuffer command_buffer, VkFence completion_fence,
const std::vector<Shader::TextureBinding>& vertex_bindings,
const std::vector<Shader::TextureBinding>& pixel_bindings) {
@ -1478,7 +1485,7 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
return descriptor_set;
}
bool TextureCache::SetupTextureBindings(
bool VulkanTextureCache::SetupTextureBindings(
VkCommandBuffer command_buffer, VkFence completion_fence,
UpdateSetInfo* update_set_info,
const std::vector<Shader::TextureBinding>& bindings) {
@ -1496,10 +1503,9 @@ bool TextureCache::SetupTextureBindings(
return !any_failed;
}
bool TextureCache::SetupTextureBinding(VkCommandBuffer command_buffer,
VkFence completion_fence,
UpdateSetInfo* update_set_info,
const Shader::TextureBinding& binding) {
bool VulkanTextureCache::SetupTextureBinding(
VkCommandBuffer command_buffer, VkFence completion_fence,
UpdateSetInfo* update_set_info, const Shader::TextureBinding& binding) {
#if FINE_GRAINED_DRAW_SCOPES
SCOPE_profile_cpu_f("gpu");
#endif // FINE_GRAINED_DRAW_SCOPES
@ -1590,7 +1596,7 @@ bool TextureCache::SetupTextureBinding(VkCommandBuffer command_buffer,
return true;
}
void TextureCache::RemoveInvalidatedTextures() {
void VulkanTextureCache::RemoveInvalidatedTextures() {
std::unordered_set<Texture*>& invalidated_textures = *invalidated_textures_;
// Clean up any invalidated textures.
@ -1619,7 +1625,7 @@ void TextureCache::RemoveInvalidatedTextures() {
}
}
void TextureCache::ClearCache() {
void VulkanTextureCache::ClearCache() {
RemoveInvalidatedTextures();
for (auto it = textures_.begin(); it != textures_.end(); ++it) {
while (!FreeTexture(it->second)) {
@ -1639,7 +1645,7 @@ void TextureCache::ClearCache() {
samplers_.clear();
}
void TextureCache::Scavenge() {
void VulkanTextureCache::Scavenge() {
SCOPE_profile_cpu_f("gpu");
// Close any open descriptor pool batches

View File

@ -7,8 +7,8 @@
******************************************************************************
*/
#ifndef XENIA_GPU_VULKAN_TEXTURE_CACHE_H_
#define XENIA_GPU_VULKAN_TEXTURE_CACHE_H_
#ifndef XENIA_GPU_VULKAN_VULKAN_TEXTURE_CACHE_H_
#define XENIA_GPU_VULKAN_VULKAN_TEXTURE_CACHE_H_
#include <algorithm>
#include <list>
@ -34,7 +34,7 @@ namespace gpu {
namespace vulkan {
//
class TextureCache {
class VulkanTextureCache {
public:
struct TextureView;
@ -75,9 +75,10 @@ class TextureCache {
};
};
TextureCache(Memory* memory, RegisterFile* register_file,
TraceWriter* trace_writer, ui::vulkan::VulkanProvider& provider);
~TextureCache();
VulkanTextureCache(Memory* memory, RegisterFile* register_file,
TraceWriter* trace_writer,
ui::vulkan::VulkanProvider& provider);
~VulkanTextureCache();
VkResult Initialize();
void Shutdown();
@ -237,4 +238,4 @@ class TextureCache {
} // namespace gpu
} // namespace xe
#endif // XENIA_GPU_VULKAN_TEXTURE_CACHE_H_
#endif // XENIA_GPU_VULKAN_VULKAN_TEXTURE_CACHE_H_