Merge branch 'master' of github.com:benvanik/xenia

This commit is contained in:
DrChat 2017-03-16 15:03:02 -05:00
commit 5507f83e8b
9 changed files with 185 additions and 258 deletions

View File

@ -42,6 +42,9 @@ bool SamplerInfo::Prepare(const xenos::xe_gpu_texture_fetch_t& fetch,
? static_cast<AnisoFilter>(fetch.aniso_filter)
: fetch_instr.attributes.aniso_filter;
out_info->border_color = static_cast<BorderColor>(fetch.border_color);
out_info->lod_bias = (fetch.lod_bias) / 32.f;
return true;
}

View File

@ -24,6 +24,8 @@ struct SamplerInfo {
ClampMode clamp_v;
ClampMode clamp_w;
AnisoFilter aniso_filter;
BorderColor border_color;
float lod_bias;
static bool Prepare(const xenos::xe_gpu_texture_fetch_t& fetch,
const ParsedTextureFetchInstruction& fetch_instr,

View File

@ -1635,13 +1635,6 @@ void SpirvShaderTranslator::ProcessVectorAluInstruction(
const ParsedAluInstruction& instr) {
auto& b = *builder_;
// TODO: If we have identical operands, reuse previous one.
Id sources[3] = {0};
Id dest = vec4_float_zero_;
for (size_t i = 0; i < instr.operand_count; i++) {
sources[i] = LoadFromOperand(instr.operands[i]);
}
// Close the open predicated block if this instr isn't predicated or the
// conditions do not match.
if (open_predicated_block_ &&
@ -1669,6 +1662,13 @@ void SpirvShaderTranslator::ProcessVectorAluInstruction(
b.setBuildPoint(block);
}
// TODO: If we have identical operands, reuse previous one.
Id sources[3] = {0};
Id dest = vec4_float_zero_;
for (size_t i = 0; i < instr.operand_count; i++) {
sources[i] = LoadFromOperand(instr.operands[i]);
}
bool close_predicated_block = false;
switch (instr.vector_opcode) {
case AluVectorOpcode::kAdd: {
@ -2048,22 +2048,6 @@ void SpirvShaderTranslator::ProcessScalarAluInstruction(
const ParsedAluInstruction& instr) {
auto& b = *builder_;
// TODO: If we have identical operands, reuse previous one.
Id sources[3] = {0};
Id dest = b.makeFloatConstant(0);
for (size_t i = 0, x = 0; i < instr.operand_count; i++) {
auto src = LoadFromOperand(instr.operands[i]);
// Pull components out of the vector operands and use them as sources.
if (instr.operands[i].component_count > 1) {
for (int j = 0; j < instr.operands[i].component_count; j++) {
sources[x++] = b.createCompositeExtract(src, float_type_, j);
}
} else {
sources[x++] = src;
}
}
// Close the open predicated block if this instr isn't predicated or the
// conditions do not match.
if (open_predicated_block_ &&
@ -2091,6 +2075,22 @@ void SpirvShaderTranslator::ProcessScalarAluInstruction(
b.setBuildPoint(block);
}
// TODO: If we have identical operands, reuse previous one.
Id sources[3] = {0};
Id dest = b.makeFloatConstant(0);
for (size_t i = 0, x = 0; i < instr.operand_count; i++) {
auto src = LoadFromOperand(instr.operands[i]);
// Pull components out of the vector operands and use them as sources.
if (instr.operands[i].component_count > 1) {
for (int j = 0; j < instr.operands[i].component_count; j++) {
sources[x++] = b.createCompositeExtract(src, float_type_, j);
}
} else {
sources[x++] = src;
}
}
bool close_predicated_block = false;
switch (instr.scalar_opcode) {
case AluScalarOpcode::kAdds:

View File

@ -145,29 +145,66 @@ bool TextureInfo::Prepare(const xe_gpu_texture_fetch_t& fetch,
}
// Must be called here when we know the format.
info.input_length = 0; // Populated below.
info.output_length = 0;
switch (info.dimension) {
case Dimension::k1D:
info.CalculateTextureSizes1D(fetch);
break;
case Dimension::k2D:
info.CalculateTextureSizes2D(fetch);
break;
case Dimension::k3D:
case Dimension::k1D: {
info.CalculateTextureSizes1D(fetch.size_1d.width + 1);
} break;
case Dimension::k2D: {
info.CalculateTextureSizes2D(fetch.size_2d.width + 1,
fetch.size_2d.height + 1);
// DEBUG: Make sure our calculated pitch is equal to the fetch pitch.
uint32_t bytes_per_block = info.format_info->block_width *
info.format_info->block_height *
info.format_info->bits_per_pixel / 8;
assert_true(info.size_2d.input_pitch ==
(bytes_per_block * fetch.pitch << 5) /
info.format_info->block_width);
} break;
case Dimension::k3D: {
// TODO(benvanik): calculate size.
return false;
case Dimension::kCube:
info.CalculateTextureSizesCube(fetch);
break;
}
case Dimension::kCube: {
info.CalculateTextureSizesCube(fetch.size_stack.width + 1,
fetch.size_stack.height + 1,
fetch.size_stack.depth + 1);
} break;
}
return true;
}
void TextureInfo::CalculateTextureSizes1D(const xe_gpu_texture_fetch_t& fetch) {
bool TextureInfo::PrepareResolve(uint32_t physical_address,
TextureFormat texture_format, Endian endian,
uint32_t width, uint32_t height,
TextureInfo* out_info) {
std::memset(out_info, 0, sizeof(TextureInfo));
auto& info = *out_info;
info.guest_address = physical_address;
info.dimension = Dimension::k2D;
info.width = width - 1;
info.height = height - 1;
info.format_info = FormatInfo::Get(static_cast<uint32_t>(texture_format));
info.endianness = endian;
info.is_tiled = true;
info.has_packed_mips = false;
info.input_length = 0;
info.output_length = 0;
if (info.format_info->format == TextureFormat::kUnknown) {
assert_true("Unsupported texture format");
return false;
}
info.CalculateTextureSizes2D(width, height);
return true;
}
void TextureInfo::CalculateTextureSizes1D(uint32_t width) {
// ?
size_1d.logical_width = 1 + fetch.size_1d.width;
size_1d.logical_width = width;
uint32_t block_width =
xe::round_up(size_1d.logical_width, format_info->block_width) /
@ -186,25 +223,24 @@ void TextureInfo::CalculateTextureSizes1D(const xe_gpu_texture_fetch_t& fetch) {
}
size_1d.input_width = tile_width * 32 * format_info->block_width;
size_1d.output_width = block_width * format_info->block_width;
size_1d.input_pitch = byte_pitch;
size_1d.output_pitch = block_width * bytes_per_block;
input_length = size_1d.input_pitch;
// TODO(DrChat): Remove this, leave it up to the backend.
size_1d.output_width = block_width * format_info->block_width;
size_1d.output_pitch = block_width * bytes_per_block;
output_length = size_1d.output_pitch;
}
void TextureInfo::CalculateTextureSizes2D(const xe_gpu_texture_fetch_t& fetch) {
size_2d.logical_width = 1 + fetch.size_2d.width;
size_2d.logical_height = 1 + fetch.size_2d.height;
void TextureInfo::CalculateTextureSizes2D(uint32_t width, uint32_t height) {
size_2d.logical_width = width;
size_2d.logical_height = height;
// Here be dragons. The values here are used in texture_cache.cc to copy
// images and create GL textures. Changes here will impact that code.
// TODO(benvanik): generic texture copying utility.
// w/h in blocks must be a multiple of block size.
// w/h in blocks.
uint32_t block_width =
xe::round_up(size_2d.logical_width, format_info->block_width) /
format_info->block_width;
@ -212,15 +248,11 @@ void TextureInfo::CalculateTextureSizes2D(const xe_gpu_texture_fetch_t& fetch) {
xe::round_up(size_2d.logical_height, format_info->block_height) /
format_info->block_height;
// Tiles are 32x32 blocks. All textures must be multiples of tile dimensions.
// ...except textures don't seem to need a multiple of 32 for height.
// Tiles are 32x32 blocks. The pitch of all textures must a multiple of tile
// dimensions.
uint32_t tile_width = uint32_t(std::ceil(block_width / 32.0f));
uint32_t tile_height = uint32_t(std::ceil(block_height / 32.0f));
size_2d.block_width = tile_width * 32;
size_2d.block_height =
/*format_info->type == FormatType::kCompressed
? tile_height * 32
:*/ block_height;
size_2d.block_height = block_height;
uint32_t bytes_per_block = format_info->block_width *
format_info->block_height *
@ -234,22 +266,23 @@ void TextureInfo::CalculateTextureSizes2D(const xe_gpu_texture_fetch_t& fetch) {
size_2d.input_width = size_2d.block_width * format_info->block_width;
size_2d.input_height = size_2d.block_height * format_info->block_height;
size_2d.output_width = block_width * format_info->block_width;
size_2d.output_height = block_height * format_info->block_height;
size_2d.input_pitch = byte_pitch;
size_2d.output_pitch = block_width * bytes_per_block;
input_length = size_2d.input_pitch * size_2d.block_height;
// TODO(DrChat): Remove this, leave it up to the backend.
size_2d.output_width = block_width * format_info->block_width;
size_2d.output_height = block_height * format_info->block_height;
size_2d.output_pitch = block_width * bytes_per_block;
output_length = size_2d.output_pitch * block_height;
}
void TextureInfo::CalculateTextureSizesCube(
const xe_gpu_texture_fetch_t& fetch) {
assert_true(fetch.size_stack.depth + 1 == 6);
size_cube.logical_width = 1 + fetch.size_stack.width;
size_cube.logical_height = 1 + fetch.size_stack.height;
void TextureInfo::CalculateTextureSizesCube(uint32_t width, uint32_t height,
uint32_t depth) {
assert_true(depth == 6);
size_cube.logical_width = width;
size_cube.logical_height = height;
// w/h in blocks must be a multiple of block size.
uint32_t block_width =
@ -268,23 +301,24 @@ void TextureInfo::CalculateTextureSizesCube(
uint32_t bytes_per_block = format_info->block_width *
format_info->block_height *
format_info->bits_per_pixel / 8;
uint32_t byte_pitch = tile_width * 32 * bytes_per_block;
uint32_t byte_pitch = size_cube.block_width * bytes_per_block;
if (!is_tiled) {
// Each row must be a multiple of 256 in linear textures.
byte_pitch = xe::round_up(byte_pitch, 256);
}
size_cube.input_width = tile_width * 32 * format_info->block_width;
size_cube.input_height = tile_height * 32 * format_info->block_height;
size_cube.output_width = block_width * format_info->block_width;
size_cube.output_height = block_height * format_info->block_height;
size_cube.input_width = size_cube.block_width * format_info->block_width;
size_cube.input_height = size_cube.block_height * format_info->block_height;
size_cube.input_pitch = byte_pitch;
size_cube.output_pitch = block_width * bytes_per_block;
size_cube.input_face_length = size_cube.input_pitch * size_cube.block_height;
input_length = size_cube.input_face_length * 6;
// TODO(DrChat): Remove this, leave it up to the backend.
size_cube.output_width = block_width * format_info->block_width;
size_cube.output_height = block_height * format_info->block_height;
size_cube.output_pitch = block_width * bytes_per_block;
size_cube.output_face_length = size_cube.output_pitch * block_height;
output_length = size_cube.output_face_length * 6;
}
@ -346,7 +380,7 @@ bool TextureInfo::GetPackedTileOffset(const TextureInfo& texture_info,
return true;
}
// https://code.google.com/p/crunch/source/browse/trunk/inc/crn_decomp.h#4104
// https://github.com/BinomialLLC/crunch/blob/ea9b8d8c00c8329791256adafa8cf11e4e7942a2/inc/crn_decomp.h#L4108
uint32_t TextureInfo::TiledOffset2DOuter(uint32_t y, uint32_t width,
uint32_t log_bpp) {
uint32_t macro = ((y >> 5) * (width >> 5)) << (log_bpp + 7);

View File

@ -34,7 +34,7 @@ enum class TextureFormat : uint32_t {
k_8_8 = 10,
k_Cr_Y1_Cb_Y0 = 11,
k_Y1_Cr_Y0_Cb = 12,
// ? hole
k_Shadow = 13,
k_8_8_8_8_A = 14,
k_4_4_4_4 = 15,
k_10_11_11 = 16,
@ -42,7 +42,7 @@ enum class TextureFormat : uint32_t {
k_DXT1 = 18,
k_DXT2_3 = 19,
k_DXT4_5 = 20,
// ? hole
k_DXV = 21,
k_24_8 = 22,
k_24_8_FLOAT = 23,
k_16 = 24,
@ -233,20 +233,24 @@ struct TextureInfo {
union {
struct {
uint32_t logical_width;
uint32_t block_width;
uint32_t input_width;
uint32_t input_pitch;
uint32_t block_width; // # of horizontal blocks
uint32_t input_width; // pixel pitch
uint32_t input_pitch; // pitch in bytes
// DEPRECATED: Do not use.
uint32_t output_width;
uint32_t output_pitch;
} size_1d;
struct {
uint32_t logical_width;
uint32_t logical_height;
uint32_t block_width;
uint32_t block_height;
uint32_t input_width;
uint32_t input_height;
uint32_t input_pitch;
uint32_t block_width; // # of horizontal blocks
uint32_t block_height; // # of vertical blocks
uint32_t input_width; // pixel pitch
uint32_t input_height; // pixel height
uint32_t input_pitch; // pitch in bytes
// DEPRECATED: Do not use.
uint32_t output_width;
uint32_t output_height;
uint32_t output_pitch;
@ -256,15 +260,17 @@ struct TextureInfo {
struct {
uint32_t logical_width;
uint32_t logical_height;
uint32_t block_width;
uint32_t block_height;
uint32_t input_width;
uint32_t input_height;
uint32_t input_pitch;
uint32_t block_width; // # of horizontal blocks
uint32_t block_height; // # of vertical blocks
uint32_t input_width; // pixel pitch
uint32_t input_height; // pixel height
uint32_t input_pitch; // pitch in bytes
uint32_t input_face_length; // pitch of face in bytes
// DEPRECATED: Do not use.
uint32_t output_width;
uint32_t output_height;
uint32_t output_pitch;
uint32_t input_face_length;
uint32_t output_face_length;
} size_cube;
};
@ -272,6 +278,11 @@ struct TextureInfo {
static bool Prepare(const xenos::xe_gpu_texture_fetch_t& fetch,
TextureInfo* out_info);
static bool PrepareResolve(uint32_t physical_address,
TextureFormat texture_format, Endian endian,
uint32_t width, uint32_t height,
TextureInfo* out_info);
static bool GetPackedTileOffset(const TextureInfo& texture_info,
uint32_t* out_offset_x,
uint32_t* out_offset_y);
@ -286,9 +297,10 @@ struct TextureInfo {
}
private:
void CalculateTextureSizes1D(const xenos::xe_gpu_texture_fetch_t& fetch);
void CalculateTextureSizes2D(const xenos::xe_gpu_texture_fetch_t& fetch);
void CalculateTextureSizesCube(const xenos::xe_gpu_texture_fetch_t& fetch);
void CalculateTextureSizes1D(uint32_t width);
void CalculateTextureSizes2D(uint32_t width, uint32_t height);
void CalculateTextureSizesCube(uint32_t width, uint32_t height,
uint32_t depth);
};
} // namespace gpu

View File

@ -419,10 +419,7 @@ void BufferCache::Flush(VkCommandBuffer command_buffer) {
vkFlushMappedMemoryRanges(device_, 1, &dirty_range);
}
void BufferCache::InvalidateCache() {
// TODO(benvanik): caching.
}
void BufferCache::InvalidateCache() { transient_cache_.clear(); }
void BufferCache::ClearCache() { transient_cache_.clear(); }
void BufferCache::Scavenge() {

View File

@ -430,19 +430,26 @@ bool TextureCache::FreeTexture(Texture* texture) {
}
TextureCache::Texture* TextureCache::DemandResolveTexture(
const TextureInfo& texture_info, TextureFormat format,
VkOffset2D* out_offset) {
// Check to see if we've already used a texture at this location.
auto texture = LookupAddress(
texture_info.guest_address, texture_info.size_2d.block_width,
texture_info.size_2d.block_height, format, out_offset);
if (texture) {
return texture;
const TextureInfo& texture_info, TextureFormat format) {
auto texture_hash = texture_info.hash();
for (auto it = textures_.find(texture_hash); it != textures_.end(); ++it) {
if (it->second->texture_info == texture_info) {
if (it->second->pending_invalidation) {
// This texture has been invalidated!
RemoveInvalidatedTextures();
break;
}
// Tell the trace writer to "cache" this memory (but not read it)
trace_writer_->WriteMemoryReadCachedNop(texture_info.guest_address,
texture_info.input_length);
return it->second;
}
}
// No texture at this location. Make a new one.
texture = AllocateTexture(texture_info);
texture->is_full_texture = false;
auto texture = AllocateTexture(texture_info);
// Setup a debug name for the texture.
device_->DbgSetObjectName(
@ -461,16 +468,18 @@ TextureCache::Texture* TextureCache::DemandResolveTexture(
auto touched_texture = reinterpret_cast<Texture*>(data_ptr);
// Clear watch handle first so we don't redundantly
// remove.
assert_not_zero(touched_texture->access_watch_handle);
touched_texture->access_watch_handle = 0;
touched_texture->pending_invalidation = true;
// Add to pending list so Scavenge will clean it up.
self->invalidated_resolve_textures_mutex_.lock();
self->invalidated_resolve_textures_.push_back(touched_texture);
self->invalidated_resolve_textures_mutex_.unlock();
self->invalidated_textures_mutex_.lock();
self->invalidated_textures_->push_back(touched_texture);
self->invalidated_textures_mutex_.unlock();
},
this, texture);
resolve_textures_.push_back(texture);
textures_[texture_hash] = texture;
return texture;
}
@ -494,58 +503,6 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
}
}
// Check resolve textures.
for (auto it = resolve_textures_.begin(); it != resolve_textures_.end();
++it) {
auto texture = (*it);
if (texture_info.guest_address == texture->texture_info.guest_address &&
texture_info.size_2d.logical_width ==
texture->texture_info.size_2d.logical_width &&
texture_info.size_2d.logical_height ==
texture->texture_info.size_2d.logical_height) {
if (texture->pending_invalidation) {
// Texture invalidated! Remove.
RemoveInvalidatedTextures();
break;
}
// Exact match.
// TODO: Lazy match (at an offset)
// Upgrade this texture to a full texture.
texture->is_full_texture = true;
texture->texture_info = texture_info;
if (texture->access_watch_handle) {
memory_->CancelAccessWatch(texture->access_watch_handle);
}
// Tell the trace writer to cache this memory but don't read it
trace_writer_->WriteMemoryReadCachedNop(texture_info.guest_address,
texture_info.input_length);
texture->access_watch_handle = memory_->AddPhysicalAccessWatch(
texture_info.guest_address, texture_info.input_length,
cpu::MMIOHandler::kWatchWrite,
[](void* context_ptr, void* data_ptr, uint32_t address) {
auto self = reinterpret_cast<TextureCache*>(context_ptr);
auto touched_texture = reinterpret_cast<Texture*>(data_ptr);
// Clear watch handle first so we don't redundantly
// remove.
touched_texture->access_watch_handle = 0;
touched_texture->pending_invalidation = true;
// Add to pending list so Scavenge will clean it up.
self->invalidated_textures_mutex_.lock();
self->invalidated_textures_->push_back(touched_texture);
self->invalidated_textures_mutex_.unlock();
},
this, texture);
textures_[texture_hash] = *it;
it = resolve_textures_.erase(it);
return textures_[texture_hash];
}
}
if (!command_buffer) {
// Texture not found and no command buffer was passed, preventing us from
// uploading a new one.
@ -598,22 +555,6 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
"0x%.8X - 0x%.8X", texture_info.guest_address,
texture_info.guest_address + texture_info.output_length));
// Copy in overlapping resolve textures.
// FIXME: RDR appears to take textures from small chunks of a resolve texture?
if (texture_info.dimension == Dimension::k2D) {
for (auto it = resolve_textures_.begin(); it != resolve_textures_.end();
++it) {
auto texture = (*it);
if (texture_info.guest_address >= texture->texture_info.guest_address &&
texture_info.guest_address < texture->texture_info.guest_address +
texture->texture_info.input_length) {
// Lazy matched a resolve texture. Copy it in and destroy it.
// Future resolves will just copy directly into this texture.
// assert_always();
}
}
}
// Though we didn't find an exact match, that doesn't mean we're out of the
// woods yet. This texture could either be a portion of another texture or
// vice versa. Copy any overlapping textures into this texture.
@ -634,6 +575,7 @@ TextureCache::Texture* TextureCache::Demand(const TextureInfo& texture_info,
auto touched_texture = reinterpret_cast<Texture*>(data_ptr);
// Clear watch handle first so we don't redundantly
// remove.
assert_not_zero(touched_texture->access_watch_handle);
touched_texture->access_watch_handle = 0;
touched_texture->pending_invalidation = true;
// Add to pending list so Scavenge will clean it up.
@ -693,8 +635,9 @@ TextureCache::TextureView* TextureCache::DemandView(Texture* texture,
swiz_component_map[(swizzle >> 9) & 0x7],
};
view_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
if (texture->format == VK_FORMAT_D24_UNORM_S8_UINT) {
// This applies to any depth/stencil format, but we only use D24S8.
if (texture->format == VK_FORMAT_D24_UNORM_S8_UINT ||
texture->format == VK_FORMAT_D32_SFLOAT_S8_UINT) {
// This applies to any depth/stencil format, but we only use D24S8 / D32FS8.
view_info.subresourceRange.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
}
@ -804,7 +747,7 @@ TextureCache::Sampler* TextureCache::Demand(const SamplerInfo& sampler_info) {
sampler_create_info.addressModeW =
address_mode_map[static_cast<int>(sampler_info.clamp_w)];
sampler_create_info.mipLodBias = 0.0f;
sampler_create_info.mipLodBias = sampler_info.lod_bias;
float aniso = 0.f;
switch (sampler_info.aniso_filter) {
@ -895,23 +838,6 @@ TextureCache::Texture* TextureCache::LookupAddress(uint32_t guest_address,
}
}
// Check resolve textures
for (auto it = resolve_textures_.begin(); it != resolve_textures_.end();
++it) {
const auto& texture_info = (*it)->texture_info;
if (texture_info.guest_address == guest_address &&
texture_info.dimension == Dimension::k2D &&
texture_info.size_2d.input_width == width &&
texture_info.size_2d.input_height == height) {
if (out_offset) {
out_offset->x = 0;
out_offset->y = 0;
}
return (*it);
}
}
return nullptr;
}
@ -1239,8 +1165,7 @@ bool TextureCache::UploadTexture2D(VkCommandBuffer command_buffer,
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = 0;
barrier.dstAccessMask =
VK_ACCESS_TRANSFER_WRITE_BIT | VK_ACCESS_HOST_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.oldLayout = dest->image_layout;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
@ -1419,7 +1344,6 @@ VkDescriptorSet TextureCache::PrepareTextureSet(
descriptor_pool_->BeginBatch(completion_fence);
}
// TODO(benvanik): reuse.
auto descriptor_set =
descriptor_pool_->AcquireEntry(texture_descriptor_set_layout_);
if (!descriptor_set) {
@ -1557,24 +1481,6 @@ void TextureCache::RemoveInvalidatedTextures() {
invalidated_textures.clear();
}
// Invalidated resolve textures.
invalidated_resolve_textures_mutex_.lock();
if (!invalidated_resolve_textures_.empty()) {
for (auto it = invalidated_resolve_textures_.begin();
it != invalidated_resolve_textures_.end(); ++it) {
pending_delete_textures_.push_back(*it);
auto tex =
std::find(resolve_textures_.begin(), resolve_textures_.end(), *it);
if (tex != resolve_textures_.end()) {
resolve_textures_.erase(tex);
}
}
invalidated_resolve_textures_.clear();
}
invalidated_resolve_textures_mutex_.unlock();
}
void TextureCache::ClearCache() {

View File

@ -38,10 +38,6 @@ class TextureCache {
TextureInfo texture_info;
std::vector<std::unique_ptr<TextureView>> views;
// True if we know all info about this texture, false otherwise.
// (e.g. we resolve to system memory and may not know the full details about
// this texture)
bool is_full_texture;
VkFormat format;
VkImage image;
VkImageLayout image_layout;
@ -93,8 +89,6 @@ class TextureCache {
const std::vector<Shader::TextureBinding>& vertex_bindings,
const std::vector<Shader::TextureBinding>& pixel_bindings);
// TODO(benvanik): UploadTexture.
// TODO(benvanik): Resolve.
// TODO(benvanik): ReadTexture.
// Looks for a texture either containing or matching these parameters.
@ -107,17 +101,9 @@ class TextureCache {
VkOffset2D* out_offset = nullptr);
// Demands a texture for the purpose of resolving from EDRAM. This either
// creates a new texture or returns a previously created texture. texture_info
// is not required to be completely filled out, just guest_address and all
// sizes.
//
// It's possible that this may return an image that is larger than the
// requested size (e.g. resolving into a bigger texture) or an image that
// must have an offset applied. If so, the caller must handle this.
// At the very least, it's guaranteed that the image will be large enough to
// hold the requested size.
// creates a new texture or returns a previously created texture.
Texture* DemandResolveTexture(const TextureInfo& texture_info,
TextureFormat format, VkOffset2D* out_offset);
TextureFormat format);
// Clears all cached content.
void ClearCache();
@ -203,16 +189,12 @@ class TextureCache {
ui::vulkan::CircularBuffer staging_buffer_;
std::unordered_map<uint64_t, Texture*> textures_;
std::unordered_map<uint64_t, Sampler*> samplers_;
std::vector<Texture*> resolve_textures_;
std::list<Texture*> pending_delete_textures_;
std::mutex invalidated_textures_mutex_;
std::vector<Texture*>* invalidated_textures_;
std::vector<Texture*> invalidated_textures_sets_[2];
std::mutex invalidated_resolve_textures_mutex_;
std::vector<Texture*> invalidated_resolve_textures_;
struct UpdateSetInfo {
// Bitmap of all 32 fetch constants and whether they have been setup yet.
// This prevents duplication across the vertex and pixel shader.

View File

@ -850,8 +850,6 @@ bool VulkanCommandProcessor::IssueCopy() {
// but I can't seem to find something similar.
uint32_t dest_logical_width = copy_dest_pitch;
uint32_t dest_logical_height = copy_dest_height;
uint32_t dest_block_width = xe::round_up(dest_logical_width, 32);
uint32_t dest_block_height = /*xe::round_up(*/ dest_logical_height /*, 32)*/;
uint32_t window_offset = regs[XE_GPU_REG_PA_SC_WINDOW_OFFSET].u32;
int16_t window_offset_x = window_offset & 0x7FFF;
@ -944,26 +942,19 @@ bool VulkanCommandProcessor::IssueCopy() {
}
}
Endian resolve_endian = Endian::k8in32;
if (copy_dest_endian <= Endian128::k16in32) {
resolve_endian = static_cast<Endian>(copy_dest_endian);
}
// Demand a resolve texture from the texture cache.
TextureInfo tex_info = {};
tex_info.guest_address = copy_dest_base;
tex_info.width = dest_logical_width - 1;
tex_info.height = dest_logical_height - 1;
tex_info.dimension = gpu::Dimension::k2D;
tex_info.input_length =
dest_block_width * dest_block_height * dest_texel_size;
tex_info.format_info = FormatInfo::Get(uint32_t(copy_dest_format));
tex_info.endianness = Endian::k8in32;
tex_info.is_tiled = true;
tex_info.size_2d.logical_width = dest_logical_width;
tex_info.size_2d.logical_height = dest_logical_height;
tex_info.size_2d.block_width = dest_block_width;
tex_info.size_2d.block_height = dest_block_height;
tex_info.size_2d.input_width = dest_block_width;
tex_info.size_2d.input_height = dest_block_height;
tex_info.size_2d.input_pitch = copy_dest_pitch * dest_texel_size;
TextureInfo tex_info;
TextureInfo::PrepareResolve(copy_dest_base, copy_dest_format, resolve_endian,
dest_logical_width, dest_logical_height,
&tex_info);
auto texture =
texture_cache_->DemandResolveTexture(tex_info, copy_dest_format, nullptr);
texture_cache_->DemandResolveTexture(tex_info, copy_dest_format);
assert_not_null(texture);
texture->in_flight_fence = current_batch_fence_;