Merge pull request #7716 from stenzek/stereo
Stereoscopy regression fixes
This commit is contained in:
commit
8aaebfa2b3
|
@ -61,6 +61,7 @@ void main()
|
|||
gl_Position = gl_in[i].gl_Position;
|
||||
gl_Layer = layer;
|
||||
EmitVertex();
|
||||
}
|
||||
}
|
||||
EndPrimitive();
|
||||
}
|
||||
|
|
|
@ -416,7 +416,10 @@ TextureCacheBase::DoPartialTextureUpdates(TCacheEntry* entry_to_update, u8* pale
|
|||
dstrect.top = dst_y;
|
||||
dstrect.right = (dst_x + copy_width);
|
||||
dstrect.bottom = (dst_y + copy_height);
|
||||
for (u32 layer = 0; layer < entry->texture->GetConfig().layers; layer++)
|
||||
|
||||
// If one copy is stereo, and the other isn't... not much we can do here :/
|
||||
const u32 layers_to_copy = std::min(entry->GetNumLayers(), entry_to_update->GetNumLayers());
|
||||
for (u32 layer = 0; layer < layers_to_copy; layer++)
|
||||
{
|
||||
entry_to_update->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer,
|
||||
0, dstrect, layer, 0);
|
||||
|
@ -426,7 +429,8 @@ TextureCacheBase::DoPartialTextureUpdates(TCacheEntry* entry_to_update, u8* pale
|
|||
{
|
||||
// Remove the temporary converted texture, it won't be used anywhere else
|
||||
// TODO: It would be nice to convert and copy in one step, but this code path isn't common
|
||||
InvalidateTexture(GetTexCacheIter(entry));
|
||||
iter.first = InvalidateTexture(iter.first);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -1140,37 +1144,25 @@ TextureCacheBase::GetXFBTexture(u32 address, u32 width, u32 height, TextureForma
|
|||
texture_cache_safety_color_sample_size, false, 0, 0, 0,
|
||||
TLUTFormat::IA8, 1);
|
||||
if (!tex_info)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Try a direct lookup by address/hash.
|
||||
const TextureLookupInformation tex_info_value = tex_info.value();
|
||||
|
||||
TCacheEntry* entry = GetXFBFromCache(tex_info_value);
|
||||
if (entry != nullptr)
|
||||
{
|
||||
if (entry)
|
||||
return entry;
|
||||
}
|
||||
|
||||
entry = CreateNormalTexture(tex_info.value());
|
||||
|
||||
// XFBs created for the purpose of being a container for textures from memory
|
||||
// or as a container for overlapping textures, never need to be combined
|
||||
// with other textures
|
||||
entry->may_have_overlapping_textures = false;
|
||||
|
||||
// At this point, the XFB wasn't found in cache
|
||||
// this means the address is most likely not pointing at an xfb copy but instead
|
||||
// an area of memory. Let's attempt to stitch all entries in this memory space
|
||||
// together
|
||||
bool loaded_from_overlapping = LoadTextureFromOverlappingTextures(entry, tex_info_value);
|
||||
|
||||
if (!loaded_from_overlapping)
|
||||
// At this point, the XFB wasn't found in cache. This means the address is most likely not
|
||||
// pointing at an xfb copy but instead an area of memory. Let's attempt to stitch all entries in
|
||||
// this memory space together
|
||||
bool loaded_from_overlapping = true;
|
||||
entry = GetTextureFromOverlappingTextures(tex_info_value);
|
||||
if (!entry)
|
||||
{
|
||||
// At this point, the xfb address is truly "bogus"
|
||||
// it likely is an area of memory defined by the CPU
|
||||
// so load it from memory
|
||||
LoadTextureFromMemory(entry, tex_info_value);
|
||||
// At this point, the xfb address is truly "bogus" it likely is an area of memory defined by the
|
||||
// CPU, so load it from memory.
|
||||
entry = GetTextureFromMemory(tex_info_value);
|
||||
loaded_from_overlapping = false;
|
||||
}
|
||||
|
||||
if (g_ActiveConfig.bDumpXFBTarget)
|
||||
|
@ -1299,12 +1291,17 @@ TextureCacheBase::GetXFBFromCache(const TextureLookupInformation& tex_info)
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_update,
|
||||
const TextureLookupInformation& tex_info)
|
||||
TextureCacheBase::TCacheEntry*
|
||||
TextureCacheBase::GetTextureFromOverlappingTextures(const TextureLookupInformation& tex_info)
|
||||
{
|
||||
bool updated_entry = false;
|
||||
u32 numBlocksX = tex_info.native_width / tex_info.block_width;
|
||||
|
||||
u32 numBlocksX = entry_to_update->native_width / tex_info.block_width;
|
||||
// XFBs created for the purpose of being a container for textures from memory
|
||||
// or as a container for overlapping textures, never need to be combined
|
||||
// with other textures
|
||||
TCacheEntry* stitched_entry =
|
||||
CreateNormalTexture(tex_info, FramebufferManagerBase::GetEFBLayers());
|
||||
stitched_entry->may_have_overlapping_textures = false;
|
||||
|
||||
// It is possible that some of the overlapping textures overlap each other.
|
||||
// This behavior has been seen with XFB copies in Rogue Leader.
|
||||
|
@ -1315,14 +1312,13 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
// instead, which would reduce the amount of copying work here.
|
||||
std::vector<TCacheEntry*> candidates;
|
||||
|
||||
auto iter = FindOverlappingTextures(entry_to_update->addr, entry_to_update->size_in_bytes);
|
||||
auto iter = FindOverlappingTextures(tex_info.address, tex_info.total_bytes);
|
||||
while (iter.first != iter.second)
|
||||
{
|
||||
TCacheEntry* entry = iter.first->second;
|
||||
if (entry != entry_to_update && entry->IsCopy() && !entry->tmem_only &&
|
||||
entry->references.count(entry_to_update) == 0 &&
|
||||
entry->OverlapsMemoryRange(entry_to_update->addr, entry_to_update->size_in_bytes) &&
|
||||
entry->memory_stride == entry_to_update->memory_stride)
|
||||
if (entry->IsCopy() && !entry->tmem_only &&
|
||||
entry->OverlapsMemoryRange(tex_info.address, tex_info.total_bytes) &&
|
||||
entry->memory_stride == stitched_entry->memory_stride)
|
||||
{
|
||||
if (entry->hash == entry->CalculateHash())
|
||||
{
|
||||
|
@ -1341,6 +1337,7 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
std::sort(candidates.begin(), candidates.end(),
|
||||
[](const TCacheEntry* a, const TCacheEntry* b) { return a->id < b->id; });
|
||||
|
||||
bool updated_entry = false;
|
||||
for (TCacheEntry* entry : candidates)
|
||||
{
|
||||
if (tex_info.is_palette_texture)
|
||||
|
@ -1351,7 +1348,7 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
{
|
||||
// Link the efb copy with the partially updated texture, so we won't apply this partial
|
||||
// update again
|
||||
entry->CreateReference(entry_to_update);
|
||||
entry->CreateReference(stitched_entry);
|
||||
// Mark the texture update as used, as if it was loaded directly
|
||||
entry->frameCount = FRAMECOUNT_INVALID;
|
||||
entry = decoded_entry;
|
||||
|
@ -1366,9 +1363,9 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
|
||||
// Note for understanding the math:
|
||||
// Normal textures can't be strided, so the 2 missing cases with src_x > 0 don't exist
|
||||
if (entry->addr >= entry_to_update->addr)
|
||||
if (entry->addr >= stitched_entry->addr)
|
||||
{
|
||||
s32 block_offset = (entry->addr - entry_to_update->addr) / tex_info.bytes_per_block;
|
||||
s32 block_offset = (entry->addr - stitched_entry->addr) / tex_info.bytes_per_block;
|
||||
s32 block_x = block_offset % numBlocksX;
|
||||
s32 block_y = block_offset / numBlocksX;
|
||||
src_x = 0;
|
||||
|
@ -1379,7 +1376,7 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
else
|
||||
{
|
||||
s32 srcNumBlocksX = entry->native_width / tex_info.block_width;
|
||||
s32 block_offset = (entry_to_update->addr - entry->addr) / tex_info.bytes_per_block;
|
||||
s32 block_offset = (stitched_entry->addr - entry->addr) / tex_info.bytes_per_block;
|
||||
s32 block_x = block_offset % srcNumBlocksX;
|
||||
s32 block_y = block_offset / srcNumBlocksX;
|
||||
src_x = block_x * tex_info.block_width;
|
||||
|
@ -1388,18 +1385,17 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
dst_y = 0;
|
||||
}
|
||||
|
||||
u32 copy_width = std::min(entry->native_width - src_x, entry_to_update->native_width - dst_x);
|
||||
u32 copy_height =
|
||||
std::min(entry->native_height - src_y, entry_to_update->native_height - dst_y);
|
||||
u32 copy_width = std::min(entry->native_width - src_x, stitched_entry->native_width - dst_x);
|
||||
u32 copy_height = std::min(entry->native_height - src_y, stitched_entry->native_height - dst_y);
|
||||
|
||||
// If one of the textures is scaled, scale both with the current efb scaling factor
|
||||
if (entry_to_update->native_width != entry_to_update->GetWidth() ||
|
||||
entry_to_update->native_height != entry_to_update->GetHeight() ||
|
||||
if (stitched_entry->native_width != stitched_entry->GetWidth() ||
|
||||
stitched_entry->native_height != stitched_entry->GetHeight() ||
|
||||
entry->native_width != entry->GetWidth() || entry->native_height != entry->GetHeight())
|
||||
{
|
||||
ScaleTextureCacheEntryTo(entry_to_update,
|
||||
g_renderer->EFBToScaledX(entry_to_update->native_width),
|
||||
g_renderer->EFBToScaledY(entry_to_update->native_height));
|
||||
ScaleTextureCacheEntryTo(stitched_entry,
|
||||
g_renderer->EFBToScaledX(stitched_entry->native_width),
|
||||
g_renderer->EFBToScaledY(stitched_entry->native_height));
|
||||
ScaleTextureCacheEntryTo(entry, g_renderer->EFBToScaledX(entry->native_width),
|
||||
g_renderer->EFBToScaledY(entry->native_height));
|
||||
|
||||
|
@ -1422,10 +1418,12 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
dstrect.right = (dst_x + copy_width);
|
||||
dstrect.bottom = (dst_y + copy_height);
|
||||
|
||||
for (u32 layer = 0; layer < entry->texture->GetConfig().layers; layer++)
|
||||
// If one copy is stereo, and the other isn't... not much we can do here :/
|
||||
const u32 layers_to_copy = std::min(entry->GetNumLayers(), stitched_entry->GetNumLayers());
|
||||
for (u32 layer = 0; layer < layers_to_copy; layer++)
|
||||
{
|
||||
entry_to_update->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer, 0,
|
||||
dstrect, layer, 0);
|
||||
stitched_entry->texture->CopyRectangleFromTexture(entry->texture.get(), srcrect, layer, 0,
|
||||
dstrect, layer, 0);
|
||||
}
|
||||
updated_entry = true;
|
||||
|
||||
|
@ -1438,17 +1436,25 @@ bool TextureCacheBase::LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_
|
|||
else
|
||||
{
|
||||
// Link the two textures together, so we won't apply this partial update again
|
||||
entry->CreateReference(entry_to_update);
|
||||
entry->CreateReference(stitched_entry);
|
||||
// Mark the texture update as used, as if it was loaded directly
|
||||
entry->frameCount = FRAMECOUNT_INVALID;
|
||||
}
|
||||
}
|
||||
|
||||
return updated_entry;
|
||||
if (!updated_entry)
|
||||
{
|
||||
// Kinda annoying that we have to throw away the texture we just created, but with the above
|
||||
// code requiring the TCacheEntry object exists, can't do much at the moment.
|
||||
InvalidateTexture(GetTexCacheIter(stitched_entry));
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return stitched_entry;
|
||||
}
|
||||
|
||||
TextureCacheBase::TCacheEntry*
|
||||
TextureCacheBase::CreateNormalTexture(const TextureLookupInformation& tex_info)
|
||||
TextureCacheBase::CreateNormalTexture(const TextureLookupInformation& tex_info, u32 layers)
|
||||
{
|
||||
// create the entry/texture
|
||||
TextureConfig config;
|
||||
|
@ -1457,6 +1463,7 @@ TextureCacheBase::CreateNormalTexture(const TextureLookupInformation& tex_info)
|
|||
config.levels = tex_info.computed_levels;
|
||||
config.format = AbstractTextureFormat::RGBA8;
|
||||
config.rendertarget = true;
|
||||
config.layers = layers;
|
||||
|
||||
TCacheEntry* entry = AllocateCacheEntry(config);
|
||||
GFX_DEBUGGER_PAUSE_AT(NEXT_NEW_TEXTURE, true);
|
||||
|
@ -1485,8 +1492,8 @@ TextureCacheBase::CreateNormalTexture(const TextureLookupInformation& tex_info)
|
|||
return entry;
|
||||
}
|
||||
|
||||
void TextureCacheBase::LoadTextureFromMemory(TCacheEntry* entry_to_update,
|
||||
const TextureLookupInformation& tex_info)
|
||||
TextureCacheBase::TCacheEntry*
|
||||
TextureCacheBase::GetTextureFromMemory(const TextureLookupInformation& tex_info)
|
||||
{
|
||||
// We can decode on the GPU if it is a supported format and the flag is enabled.
|
||||
// Currently we don't decode RGBA8 textures from Tmem, as that would require copying from both
|
||||
|
@ -1498,7 +1505,11 @@ void TextureCacheBase::LoadTextureFromMemory(TCacheEntry* entry_to_update,
|
|||
tex_info.full_format.tlutfmt) &&
|
||||
!(tex_info.from_tmem && tex_info.full_format.texfmt == TextureFormat::RGBA8);
|
||||
|
||||
LoadTextureLevelZeroFromMemory(entry_to_update, tex_info, decode_on_gpu);
|
||||
// Since it's coming from RAM, it can only have one layer (no stereo).
|
||||
TCacheEntry* entry = CreateNormalTexture(tex_info, 1);
|
||||
entry->may_have_overlapping_textures = false;
|
||||
LoadTextureLevelZeroFromMemory(entry, tex_info, decode_on_gpu);
|
||||
return entry;
|
||||
}
|
||||
|
||||
void TextureCacheBase::LoadTextureLevelZeroFromMemory(TCacheEntry* entry_to_update,
|
||||
|
|
|
@ -250,11 +250,9 @@ public:
|
|||
u32 tmem_address_even, u32 tmem_address_odd, u32 tlutaddr,
|
||||
TLUTFormat tlutfmt, u32 levels);
|
||||
TCacheEntry* GetXFBFromCache(const TextureLookupInformation& tex_info);
|
||||
bool LoadTextureFromOverlappingTextures(TCacheEntry* entry_to_update,
|
||||
const TextureLookupInformation& tex_info);
|
||||
TCacheEntry* CreateNormalTexture(const TextureLookupInformation& tex_info);
|
||||
void LoadTextureFromMemory(TCacheEntry* entry_to_update,
|
||||
const TextureLookupInformation& tex_info);
|
||||
TCacheEntry* GetTextureFromOverlappingTextures(const TextureLookupInformation& tex_info);
|
||||
TCacheEntry* GetTextureFromMemory(const TextureLookupInformation& tex_info);
|
||||
TCacheEntry* CreateNormalTexture(const TextureLookupInformation& tex_info, u32 layers);
|
||||
void LoadTextureLevelZeroFromMemory(TCacheEntry* entry_to_update,
|
||||
const TextureLookupInformation& tex_info, bool decode_on_gpu);
|
||||
virtual void BindTextures();
|
||||
|
|
Loading…
Reference in New Issue