GS/HW: Round source lookup rect to block boundary and improve overlap check.

- also clean up formatting in GSTextureCache.cpp
This commit is contained in:
refractionpcsx2 2024-03-26 08:18:31 +00:00
parent a959d33f85
commit 545fbdeed1
2 changed files with 63 additions and 59 deletions

View File

@ -2389,7 +2389,7 @@ void GSRendererHW::Draw()
FRAME_TEX0.TBW = m_cached_ctx.FRAME.FBW;
FRAME_TEX0.PSM = m_cached_ctx.FRAME.PSM;
GSTextureCache::Target* tgt = g_texture_cache->LookupTarget(FRAME_TEX0, GSVector2i(m_vt.m_max.p.x, m_vt.m_max.p.y), GetTextureScaleFactor(), GSTextureCache::RenderTarget, true,
GSTextureCache::Target* tgt = g_texture_cache->LookupTarget(FRAME_TEX0, GSVector2i(m_vt.m_max.p.x, m_vt.m_max.p.y), GetTextureScaleFactor(), GSTextureCache::RenderTarget, false,
fm);
if (tgt)

View File

@ -195,13 +195,13 @@ void GSTextureCache::AddDirtyRectTarget(Target* target, GSVector4i rect, u32 psm
}
}
void GSTextureCache::ResizeTarget(Target* t, GSVector4i rect, u32 tbp, u32 psm, u32 tbw)
void GSTextureCache::ResizeTarget(Target* t, GSVector4i rect, u32 tbp, u32 psm, u32 tbw)
{
// Valid area isn't the whole texture anyway, no point in expanding.
if (t->m_valid.z < t->m_unscaled_size.x || t->m_valid.w < t->m_unscaled_size.y)
return;
const GSVector2i size_delta = { std::max(0, (rect.z - t->m_valid.z)), std::max(0, (rect.w - t->m_valid.w)) };
const GSVector2i size_delta = {std::max(0, (rect.z - t->m_valid.z)), std::max(0, (rect.w - t->m_valid.w))};
// If it's 1 row, it's probably the texture bounds accounting for bilinear, ignore it.
if (size_delta.x > 1 || size_delta.y > 1)
{
@ -210,7 +210,7 @@ void GSTextureCache::ResizeTarget(Target* t, GSVector4i rect, u32 tbp, u32 psm,
// Dirty the expanded areas.
AddDirtyRectTarget(t, GSVector4i(t->m_valid.x, t->m_valid.w, t->m_valid.z + std::max(0, size_delta.x), t->m_valid.w + std::max(0, size_delta.y)), t->m_TEX0.PSM, t->m_TEX0.TBW, rgba);
AddDirtyRectTarget(t, GSVector4i(t->m_valid.z, t->m_valid.y, t->m_valid.z + std::max(0, size_delta.x), t->m_valid.w), t->m_TEX0.PSM, t->m_TEX0.TBW, rgba);
const GSVector4i valid_rect = { t->m_valid.x, t->m_valid.y, t->m_valid.z + std::max(0, size_delta.x), t->m_valid.w + std::max(0, size_delta.y) };
const GSVector4i valid_rect = {t->m_valid.x, t->m_valid.y, t->m_valid.z + std::max(0, size_delta.x), t->m_valid.w + std::max(0, size_delta.y)};
// Resizes of edges due to bilinear filtering and tex is rt could cause bad valid rects.
t->UpdateValidity(valid_rect, size_delta.x > 2 || size_delta.y > 2);
GetTargetSize(tbp, tbw, psm, valid_rect.z, valid_rect.w);
@ -441,7 +441,7 @@ void GSTextureCache::DirtyRectByPage(u32 sbp, u32 spsm, u32 sbw, Target* t, GSVe
{
const int src_bpp = src_info->bpp;
if(block_offset)
if (block_offset)
in_rect = in_rect.ralign<Align_Outside>(src_info->bs);
else
in_rect = in_rect.ralign<Align_Outside>(src_info->pgs);
@ -603,7 +603,6 @@ void GSTextureCache::DirtyRectByPage(u32 sbp, u32 spsm, u32 sbw, Target* t, GSVe
in_rect.y = in_rect.y & ~(src_info->pgs.y - 1);
in_rect.w = (in_rect.w + (src_info->pgs.y - 1)) & ~(src_info->pgs.y - 1);
}
}
else
{
@ -638,7 +637,7 @@ void GSTextureCache::DirtyRectByPage(u32 sbp, u32 spsm, u32 sbw, Target* t, GSVe
const int page_skip = src_pg_width - page_draw;
const int vertical_pages = (std::max(src_info->pgs.y, in_rect.height()) / src_info->pgs.y);
const int horisontal_pages = (std::max(src_info->pgs.x, in_rect.width()) / src_info->pgs.x);
const int totalpages = vertical_pages * horisontal_pages + (page_skip * (vertical_pages-1));
const int totalpages = vertical_pages * horisontal_pages + (page_skip * (vertical_pages - 1));
const bool single_width = page_draw == 1;
// We can use the offset here for the X to pick the right function.
@ -648,7 +647,7 @@ void GSTextureCache::DirtyRectByPage(u32 sbp, u32 spsm, u32 sbw, Target* t, GSVe
const int block_y = in_rect.y & (src_info->pgs.y - 1);
x_offset += block_x;
y_offset += block_y;
if(block_x)
if (block_x)
in_rect = GSVector4i(in_rect.x - block_x, in_rect.y, in_rect.z - block_x, in_rect.w);
if (block_y)
in_rect = GSVector4i(in_rect.x, in_rect.y - block_y, in_rect.z, in_rect.w - block_y);
@ -688,7 +687,7 @@ void GSTextureCache::DirtyRectByPage(u32 sbp, u32 spsm, u32 sbw, Target* t, GSVe
{
if (in_rect.x & (dst_info->pgs.x - 1))
{
const u32 rect_off = (in_rect.x & (dst_info->pgs.x-1));
const u32 rect_off = (in_rect.x & (dst_info->pgs.x - 1));
in_rect.x -= rect_off;
in_rect.z -= rect_off;
x_offset += rect_off;
@ -776,7 +775,7 @@ __ri static GSTextureCache::Source* FindSourceInMap(const GIFRegTEX0& TEX0, cons
// We request a palette texture (psm_s.pal). If the texture was
// converted by the CPU (!s->m_palette), we need to ensure
// palette content is the same.
if (!s->m_palette && !s->ClutMatch({ clut, psm_s.pal }))
if (!s->m_palette && !s->ClutMatch({clut, psm_s.pal}))
continue;
}
else
@ -1071,6 +1070,11 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
req_rect.x = region.HasX() ? region.GetMinX() : 0;
req_rect.y = region.HasY() ? region.GetMinY() : 0;
GSVector4i block_boundary_rect = req_rect;
// Round up to the nearst block boundary for lookup to avoid problems due to bilinear and inclusive rects.
block_boundary_rect.z = std::max(req_rect.x + 1, (block_boundary_rect.z + (psm_s.bs.x - 2)) & ~(psm_s.bs.x - 1));
block_boundary_rect.w = std::max(req_rect.y + 1, (block_boundary_rect.w + (psm_s.bs.y - 2)) & ~(psm_s.bs.y - 1));
// Arc the Lad finds the wrong surface here when looking for a depth stencil.
// Since we're currently not caching depth stencils (check ToDo in CreateSource) we should not look for it here.
@ -1091,14 +1095,14 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
if (((bp & (BLOCKS_PER_PAGE - 1)) != (t->m_TEX0.TBP0 & (BLOCKS_PER_PAGE - 1))) && (bp & (BLOCKS_PER_PAGE - 1)))
continue;
const bool overlaps = t->Overlaps(bp, bw, psm, req_rect);
const bool overlaps = t->Overlaps(bp, bw, psm, block_boundary_rect);
// Try to make sure the target has available what we need, be careful of self referencing frames with font in the alpha.
if (!overlaps)
continue;
const bool width_match = (std::max(64U, bw * 64U) >> GSLocalMemory::m_psm[psm].info.pageShiftX()) ==
(std::max(64U, t->m_TEX0.TBW * 64U) >> GSLocalMemory::m_psm[t->m_TEX0.PSM].info.pageShiftX());
(std::max(64U, t->m_TEX0.TBW * 64U) >> GSLocalMemory::m_psm[t->m_TEX0.PSM].info.pageShiftX());
if (bp == t->m_TEX0.TBP0 && !t->m_dirty.empty() && overlaps && GSUtil::GetChannelMask(psm) == GSUtil::GetChannelMask(t->m_TEX0.PSM) && GSRendererHW::GetInstance()->m_draw_transfers.size() > 0)
{
@ -1110,7 +1114,7 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
const int start_draw = GSRendererHW::GetInstance()->m_draw_transfers.back().draw;
for (iter = GSRendererHW::GetInstance()->m_draw_transfers.rbegin(); iter != GSRendererHW::GetInstance()->m_draw_transfers.rend(); )
for (iter = GSRendererHW::GetInstance()->m_draw_transfers.rbegin(); iter != GSRendererHW::GetInstance()->m_draw_transfers.rend();)
{
if (TEX0.TBP0 == iter->blit.DBP && GSUtil::HasCompatibleBits(iter->blit.DPSM, TEX0.PSM) && req_rect.rintersect(iter->rect).eq(req_rect))
{
@ -1223,7 +1227,7 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
if (!dirty_rect.rintersect(new_rect).rempty())
{
rect_clean = false;
partial |= !new_rect.rintersect(dirty_rect).eq(new_rect);
partial |= !new_rect.rintersect(dirty_rect).eq(new_rect) || dirty_rect.eq(new_rect);
break;
}
}
@ -1240,7 +1244,10 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
}
// If not all channels are clean/dirty or only part of the rect is dirty, we need to update the target.
if (((channels & channel_mask) != channel_mask || partial))
{
t->Update();
rect_clean = true;
}
if (linear)
{
@ -1269,7 +1276,7 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
}
const GSLocalMemory::psm_t* src_info = &GSLocalMemory::m_psm[psm];
const int block_offset = static_cast<int>(bp) - static_cast<int>(t->m_TEX0.TBP0);
const int block_offset = static_cast<int>(bp) - static_cast<int>(t->m_TEX0.TBP0);
const int page_offset = (block_offset) >> 5;
const int start_page = page_offset + (new_rect.x / src_info->pgs.x) + ((new_rect.y / src_info->pgs.y) * std::max(static_cast<int>(bw), 1));
const int src_page_width = std::max(static_cast<int>((bw * 64) / src_info->pgs.x), 1);
@ -1294,8 +1301,8 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
if (!rect_clean)
{
const u32 read_start = GSLocalMemory::GetStartBlockAddress(bp, bw, psm, req_rect);
const u32 read_end = GSLocalMemory::GetUnwrappedEndBlockAddress(bp, bw, psm, req_rect);
const u32 read_start = GSLocalMemory::GetStartBlockAddress(bp, bw, psm, block_boundary_rect);
const u32 read_end = GSLocalMemory::GetUnwrappedEndBlockAddress(bp, bw, psm, block_boundary_rect);
const GSVector4i dirty_rect = t->m_dirty.GetTotalRect(t->m_TEX0, t->m_unscaled_size);
const u32 dirty_start = GSLocalMemory::GetStartBlockAddress(t->m_TEX0.TBP0, t->m_TEX0.TBW, t->m_TEX0.PSM, dirty_rect);
const u32 dirty_end = GSLocalMemory::GetUnwrappedEndBlockAddress(t->m_TEX0.TBP0, t->m_TEX0.TBW, t->m_TEX0.PSM, dirty_rect);
@ -1307,7 +1314,7 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
const u32 color_psm = ((psm & 0x30) == 0x30) ? (psm & ~0x30) : psm;
const u32 tex_color_psm = ((t->m_TEX0.PSM & 0x30) == 0x30) ? (t->m_TEX0.PSM & ~0x30) : t->m_TEX0.PSM;
const bool can_convert = (GSUtil::HasCompatibleBits(psm, t_psm) && ((bw == t->m_TEX0.TBW) || (bw <= 1 && req_rect.w < GSLocalMemory::m_psm[psm].pgs.y))) ||
(possible_shuffle && ((bw == t->m_TEX0.TBW) || (bw == (t->m_TEX0.TBW * 2) || bw <= 2)) && GSLocalMemory::m_psm[t->m_TEX0.PSM].bpp == 32);
(possible_shuffle && ((bw == t->m_TEX0.TBW) || (bw == (t->m_TEX0.TBW * 2) || bw <= 2)) && GSLocalMemory::m_psm[t->m_TEX0.PSM].bpp == 32);
// Match if we haven't already got a tex in rt
if (((!t_clean && can_convert) || t_clean || !overlapping_dirty) && GSUtil::HasSharedBits(bp, psm, t->m_TEX0.TBP0, t_psm))
@ -1367,7 +1374,6 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
t->m_valid_rgb = true;
break;
}
}
if (overlaps && !t->HasValidBitsForFormat(psm, req_color, req_alpha) && !(possible_shuffle && GSLocalMemory::m_psm[psm].bpp == 16 && GSLocalMemory::m_psm[t->m_TEX0.PSM].bpp == 32))
@ -1409,12 +1415,10 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
// Make sure the texture actually is INSIDE the RT, it's possibly not valid if it isn't.
// Also check BP >= TBP, create source isn't equpped to expand it backwards and all data comes from the target. (GH3)
else if (GSConfig.UserHacks_TextureInsideRt >= GSTextureInRtMode::InsideTargets && color_psm >= PSMCT32 &&
color_psm <= PSMCT16S && (GSUtil::HasCompatibleBits(tex_color_psm, color_psm) ||
(possible_shuffle && tex_color_psm <= PSMCT24 && ((((t->UnwrappedEndBlock() + 1) - t->m_TEX0.TBP0) >> 1) + t->m_TEX0.TBP0) == bp)) &&
(t->Overlaps(bp, bw, psm, req_rect) || t->Wraps()) &&
t->m_age <= 1 && (!found_t || dst->m_TEX0.TBW < bw))
color_psm <= PSMCT16S && t->m_age <= 1 && (!found_t || t->m_last_draw > dst->m_last_draw) && CanTranslate(bp, bw, psm, block_boundary_rect, t->m_TEX0.TBP0, t->m_TEX0.PSM, t->m_TEX0.TBW))
{
if (overlaps && !t->HasValidBitsForFormat(psm, req_color, req_alpha) && !(possible_shuffle && GSLocalMemory::m_psm[psm].bpp == 16 && GSLocalMemory::m_psm[t->m_TEX0.PSM].bpp == 32))
if (!t->HasValidBitsForFormat(psm, req_color, req_alpha) && !(possible_shuffle && GSLocalMemory::m_psm[psm].bpp == 16 && GSLocalMemory::m_psm[t->m_TEX0.PSM].bpp == 32))
continue;
// PSM equality needed because CreateSource does not handle PSM conversion.
@ -1734,7 +1738,7 @@ GSTextureCache::Source* GSTextureCache::LookupSource(const bool is_color, const
if (gpu_clut)
AttachPaletteToSource(src, gpu_clut);
else if (src->m_palette && (!src->m_palette_obj || !src->ClutMatch({ clut, psm_s.pal })))
else if (src->m_palette && (!src->m_palette_obj || !src->ClutMatch({clut, psm_s.pal})))
AttachPaletteToSource(src, psm_s.pal, true);
}
@ -1831,7 +1835,8 @@ GSTextureCache::Target* GSTextureCache::LookupTarget(GIFRegTEX0 TEX0, const GSVe
if (can_use)
{
list.MoveFront(i.Index());
if (used)
list.MoveFront(i.Index());
dst = t;
@ -2152,7 +2157,7 @@ GSTextureCache::Target* GSTextureCache::LookupTarget(GIFRegTEX0 TEX0, const GSVe
GIFRegTEX0 new_TEX0;
new_TEX0.TBP0 = TEX0.TBP0;
new_TEX0.TBW = (!half_width) ? dst_match->m_TEX0.TBW : TEX0.TBW;
new_TEX0.PSM = is_shuffle ? dst_match->m_TEX0.PSM :TEX0.PSM;
new_TEX0.PSM = is_shuffle ? dst_match->m_TEX0.PSM : TEX0.PSM;
dst = Target::Create(new_TEX0, new_size.x, new_size.y, scale, type, clear);
if (!dst)
@ -2162,12 +2167,12 @@ GSTextureCache::Target* GSTextureCache::LookupTarget(GIFRegTEX0 TEX0, const GSVe
dst->OffsetHack_modxy = dst_match->OffsetHack_modxy;
dst->m_end_block = dst_match->m_end_block; // If we're copying the size, we need to keep the end block.
dst->m_valid = dst_match->m_valid;
dst->m_valid_alpha_low = dst_match->m_valid_alpha_low;//&& psm_s.trbpp != 24;
dst->m_valid_alpha_high = dst_match->m_valid_alpha_high;//&& psm_s.trbpp != 24;
dst->m_valid_alpha_low = dst_match->m_valid_alpha_low; //&& psm_s.trbpp != 24;
dst->m_valid_alpha_high = dst_match->m_valid_alpha_high; //&& psm_s.trbpp != 24;
dst->m_valid_rgb = dst_match->m_valid_rgb;
dst->m_was_dst_matched = true;
if(GSLocalMemory::m_psm[dst->m_TEX0.PSM].bpp == 16 && GSLocalMemory::m_psm[dst_match->m_TEX0.PSM].bpp > 16)
if (GSLocalMemory::m_psm[dst->m_TEX0.PSM].bpp == 16 && GSLocalMemory::m_psm[dst_match->m_TEX0.PSM].bpp > 16)
dst->m_TEX0.TBW = dst_match->m_TEX0.TBW; // Be careful of shuffles of the depth as C16, but using a buffer width of 16 (Mercenaries).
else if (GSLocalMemory::m_psm[dst->m_TEX0.PSM].bpp == 32 && GSLocalMemory::m_psm[dst_match->m_TEX0.PSM].bpp == 16)
{
@ -2301,7 +2306,7 @@ GSTextureCache::Target* GSTextureCache::CreateTarget(GIFRegTEX0 TEX0, const GSVe
Target* const rev_t = *j;
if (rev_t->m_TEX0.TBP0 == dst->m_TEX0.TBP0 && GSLocalMemory::m_psm[rev_t->m_TEX0.PSM].bpp == GSLocalMemory::m_psm[dst->m_TEX0.PSM].bpp)
{
if(!rev_t->m_valid_rgb)
if (!rev_t->m_valid_rgb)
rev_t->m_was_dst_matched = true;
break;
}
@ -2331,7 +2336,7 @@ GSTextureCache::Target* GSTextureCache::CreateTarget(GIFRegTEX0 TEX0, const GSVe
std::vector<GSState::GSUploadQueue>::reverse_iterator iter;
const int start_transfer = g_gs_renderer->s_transfer_n;
const u32 tex_end = GSLocalMemory::GetUnwrappedEndBlockAddress(TEX0.TBP0, TEX0.TBW, TEX0.PSM, GSVector4i::loadh(size));
for (iter = GSRendererHW::GetInstance()->m_draw_transfers.rbegin(); iter != GSRendererHW::GetInstance()->m_draw_transfers.rend(); )
for (iter = GSRendererHW::GetInstance()->m_draw_transfers.rbegin(); iter != GSRendererHW::GetInstance()->m_draw_transfers.rend();)
{
const u32 transfer_end = GSLocalMemory::GetUnwrappedEndBlockAddress(iter->blit.DBP, iter->blit.DBW, iter->blit.DPSM, iter->rect);
// If the format, and location doesn't overlap
@ -2534,7 +2539,7 @@ bool GSTextureCache::PreloadTarget(GIFRegTEX0 TEX0, const GSVector2i& size, cons
return hw_clear.value_or(false);
}
// The new texture is behind it but engulfs the whole thing, shrink the new target so it grows in the HW Draw resize.
else if (((((dst->UnwrappedEndBlock() + 1) - dst->m_TEX0.TBP0) >> 1) + dst->m_TEX0.TBP0) == t->m_TEX0.TBP0)
else if (dst->m_TEX0.TBP0 < t->m_TEX0.TBP0 && (dst->UnwrappedEndBlock() + 1) > t->m_TEX0.TBP0 && dst->m_TEX0.TBP0 < (t->UnwrappedEndBlock() + 1))
{
const int rt_pages = ((t->UnwrappedEndBlock() + 1) - t->m_TEX0.TBP0) >> 5;
const int overlapping_pages = std::min(rt_pages, static_cast<int>((dst->UnwrappedEndBlock() + 1) - t->m_TEX0.TBP0) >> 5);
@ -2555,7 +2560,7 @@ bool GSTextureCache::PreloadTarget(GIFRegTEX0 TEX0, const GSVector2i& size, cons
GL_INS("RT double buffer copy from FBP 0x%x, %dx%d => %d,%d", t->m_TEX0.TBP0, copy_width, copy_height, 0, dst_offset);
pxAssert(copy_width <= dst->GetTexture()->GetWidth() && copy_height <= dst->GetTexture()->GetHeight() &&
copy_width <= t->GetTexture()->GetWidth() && copy_height <= t->GetTexture()->GetHeight());
copy_width <= t->GetTexture()->GetWidth() && copy_height <= t->GetTexture()->GetHeight());
pxAssert(dst_offset > 0);
@ -2620,7 +2625,7 @@ GSTextureCache::Target* GSTextureCache::LookupDisplayTarget(GIFRegTEX0 TEX0, con
GSVector4i eerect = GSVector4i::zero();
const int last_draw = GSRendererHW::GetInstance()->m_draw_transfers.back().draw;
for (iter = GSRendererHW::GetInstance()->m_draw_transfers.rbegin(); iter != GSRendererHW::GetInstance()->m_draw_transfers.rend(); )
for (iter = GSRendererHW::GetInstance()->m_draw_transfers.rbegin(); iter != GSRendererHW::GetInstance()->m_draw_transfers.rend();)
{
// Would be nice to make this 100, but B-Boy seems to rely on data uploaded ~200 draws ago. Making it bigger for now to be safe.
if (last_draw - iter->draw > 500)
@ -3551,16 +3556,14 @@ void GSTextureCache::InvalidateLocalMem(const GSOffset& off, const GSVector4i& r
{
t->m_drawn_since_read = GSVector4i::zero();
}
else if (targetr.xzxz().eq(t->m_drawn_since_read.xzxz())
&& targetr.w >= t->m_drawn_since_read.y)
else if (targetr.xzxz().eq(t->m_drawn_since_read.xzxz()) && targetr.w >= t->m_drawn_since_read.y)
{
if (targetr.y <= t->m_drawn_since_read.y)
t->m_drawn_since_read.y = targetr.w;
else if (targetr.w >= t->m_drawn_since_read.w)
t->m_drawn_since_read.w = targetr.y;
}
else if (targetr.ywyw().eq(t->m_drawn_since_read.ywyw())
&& targetr.z >= t->m_drawn_since_read.x)
else if (targetr.ywyw().eq(t->m_drawn_since_read.ywyw()) && targetr.z >= t->m_drawn_since_read.x)
{
if (targetr.x <= t->m_drawn_since_read.x)
t->m_drawn_since_read.x = targetr.z;
@ -3729,7 +3732,7 @@ bool GSTextureCache::Move(u32 SBP, u32 SBW, u32 SPSM, int sx, int sy, u32 DBP, u
return false;
}
if(tmp_texture->IsDepthStencil())
if (tmp_texture->IsDepthStencil())
{
const GSVector4 src_rect = GSVector4(scaled_sx, scaled_sy, scaled_sx + scaled_w, scaled_sy + scaled_h);
const GSVector4 tmp_rect = src_rect / (GSVector4(tmp_texture->GetSize()).xyxy());
@ -3788,7 +3791,7 @@ bool GSTextureCache::Move(u32 SBP, u32 SBW, u32 SPSM, int sx, int sy, u32 DBP, u
if (GSUtil::GetChannelMask(DPSM) & 0x8)
{
if(DPSM != PSMT4HH)
if (DPSM != PSMT4HH)
dst->m_valid_alpha_low |= src->m_valid_alpha_low;
if (DPSM != PSMT4HL)
dst->m_valid_alpha_high |= src->m_valid_alpha_high;
@ -4857,7 +4860,7 @@ GSTextureCache::Source* GSTextureCache::CreateMergedSource(GIFRegTEX0 TEX0, GIFR
// Helper to preload a page.
auto preload_page = [&TEXA, scale, &psm, &lm_off, &lmtex, &lmtex_map, &lmtex_mapped,
page_width, page_height, tex_width, tex_height, copy_queue, &copy_count](int dst_x, int dst_y) {
page_width, page_height, tex_width, tex_height, copy_queue, &copy_count](int dst_x, int dst_y) {
if (!lmtex)
{
lmtex = g_gs_device->CreateTexture(tex_width, tex_height, 1, GSTexture::Format::Color, false);
@ -5015,7 +5018,7 @@ GSTextureCache::Source* GSTextureCache::CreateMergedSource(GIFRegTEX0 TEX0, GIFR
GSVector4(t->m_texture->GetSize()).xyxy(),
GSVector4(dst_x, dst_y, dst_x + copy_width, dst_y + copy_height) *
GSVector4(scale).xyxy(),
t->m_texture, linear, 0xf };
t->m_texture, linear, 0xf};
}
row_page++;
@ -5121,7 +5124,7 @@ GSTextureCache::HashCacheEntry* GSTextureCache::LookupHashCache(const GIFRegTEX0
// need the hash either for replacing, dumping or caching.
// if dumping/replacing is on, we compute the clut hash regardless, since replacements aren't indexed
HashCacheKey key{ HashCacheKey::Create(TEX0, TEXA, (dump || replace || !paltex) ? clut : nullptr, lod, region) };
HashCacheKey key{HashCacheKey::Create(TEX0, TEXA, (dump || replace || !paltex) ? clut : nullptr, lod, region)};
// handle dumping first, this is mostly isolated.
if (dump)
@ -5136,7 +5139,7 @@ GSTextureCache::HashCacheEntry* GSTextureCache::LookupHashCache(const GIFRegTEX0
const int nmips = lod->y - lod->x + 1;
for (int mip = 1; mip < nmips; mip++)
{
const GIFRegTEX0 MIP_TEX0{ g_gs_renderer->GetTex0Layer(basemip + mip) };
const GIFRegTEX0 MIP_TEX0{g_gs_renderer->GetTex0Layer(basemip + mip)};
GSTextureReplacements::DumpTexture(key, MIP_TEX0, TEXA, region, g_gs_renderer->m_mem, mip);
}
}
@ -5175,7 +5178,7 @@ GSTextureCache::HashCacheEntry* GSTextureCache::LookupHashCache(const GIFRegTEX0
{
// found a replacement texture! insert it into the hash cache, and clear paltex (since it's not indexed)
paltex = false;
const HashCacheEntry entry{ replacement_tex, 1u, 0u, alpha_minmax, true };
const HashCacheEntry entry{replacement_tex, 1u, 0u, alpha_minmax, true};
m_hash_cache_replacement_memory_usage += entry.texture->GetMemUsage();
return &m_hash_cache.emplace(key, entry).first->second;
}
@ -5331,8 +5334,8 @@ GSTextureCache::Target* GSTextureCache::Target::Create(GIFRegTEX0 TEX0, int w, i
const int scaled_w = static_cast<int>(std::ceil(static_cast<float>(w) * scale));
const int scaled_h = static_cast<int>(std::ceil(static_cast<float>(h) * scale));
GSTexture* texture = (type == RenderTarget) ?
g_gs_device->CreateRenderTarget(scaled_w, scaled_h, GSTexture::Format::Color, clear, PreferReusedLabelledTexture()) :
g_gs_device->CreateDepthStencil(scaled_w, scaled_h, GSTexture::Format::DepthStencil, clear, PreferReusedLabelledTexture());
g_gs_device->CreateRenderTarget(scaled_w, scaled_h, GSTexture::Format::Color, clear, PreferReusedLabelledTexture()) :
g_gs_device->CreateDepthStencil(scaled_w, scaled_h, GSTexture::Format::DepthStencil, clear, PreferReusedLabelledTexture());
if (!texture)
return nullptr;
@ -5594,8 +5597,9 @@ bool GSTextureCache::Surface::Inside(u32 bp, u32 bw, u32 psm, const GSVector4i&
{
// Valid only for color formats.
const GSOffset off(GSLocalMemory::m_psm[psm].info, bp, bw, psm);
const u32 start_block = off.bnNoWrap(rect.x, rect.y);
const u32 end_block = off.bnNoWrap(rect.z - 1, rect.w - 1);
return bp >= m_TEX0.TBP0 && end_block <= UnwrappedEndBlock();
return start_block >= m_TEX0.TBP0 && end_block <= UnwrappedEndBlock();
}
bool GSTextureCache::Surface::Overlaps(u32 bp, u32 bw, u32 psm, const GSVector4i& rect)