* Simplified VIFunpack's C-based interpreters (removed ODD size handlers and unused data)

* Fixed V2/V3 unpacks to behave same as the SSE unpacks (matches undefined PS2 behaviors)
 * Removed legacy vifUnpacker (haven't needed it for any regression testing in forever).
 * Move some VIF MARK console spam to DevCon (Ape Escape 3)

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@3746 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2010-09-10 13:16:50 +00:00
parent 14513cecb9
commit da9c955135
9 changed files with 54 additions and 347 deletions

View File

@ -225,7 +225,6 @@ set(pcsx2Headers
Vif_Dma.h
Vif.h
Vif_Unpack.h
Vif_Unpack.inl
vtlb.h
VUflags.h
VUmicro.h

View File

@ -394,7 +394,6 @@
<Unit filename="../Vif_Transfer.cpp" />
<Unit filename="../Vif_Unpack.cpp" />
<Unit filename="../Vif_Unpack.h" />
<Unit filename="../Vif_Unpack.inl" />
<Unit filename="../ZipTools/ThreadedZipTools.h" />
<Unit filename="../ZipTools/thread_gzip.cpp" />
<Unit filename="../ZipTools/thread_lzma.cpp" />

View File

@ -25,7 +25,7 @@
// Doesn't stall if the next vifCode is the Mark command
_vifT bool runMark(u32* &data) {
if (((vifXRegs.code >> 24) & 0x7f) == 0x7) {
Console.WriteLn("Vif%d: Running Mark with I-bit", idx);
DevCon.WriteLn("Vif%d: Running Mark with I-bit", idx);
return 1; // No Stall?
}
return 1; // Stall

View File

@ -89,7 +89,7 @@ static __ri void writeXYZW(u32 offnum, u32 &dest, u32 data) {
}
template < bool doMask, class T >
static __fi void __fastcall UNPACK_S(u32 *dest, const T *data, int size)
static void __fastcall UNPACK_S(u32 *dest, const T *data)
{
//S-# will always be a complete packet, no matter what. So we can skip the offset bits
writeXYZW<doMask>(OFFSET_X, *dest++, *data);
@ -98,99 +98,31 @@ static __fi void __fastcall UNPACK_S(u32 *dest, const T *data, int size)
writeXYZW<doMask>(OFFSET_W, *dest , *data);
}
// The PS2 console actually writes v1v0v1v0 for all V2 unpacks -- the second v1v0 pair
// being officially "indeterminate" but some games very much depend on it.
template <bool doMask, class T>
static __ri void __fastcall UNPACK_V2(u32 *dest, const T *data, int size)
static void __fastcall UNPACK_V2(u32 *dest, const T *data)
{
if (vifRegs->offset == OFFSET_X)
{
if (size > 0)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *data++);
vifRegs->offset = OFFSET_Y;
size--;
}
}
if (vifRegs->offset == OFFSET_Y)
{
if (size > 0)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *data);
vifRegs->offset = OFFSET_Z;
size--;
}
}
if (vifRegs->offset == OFFSET_Z)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *dest-2);
vifRegs->offset = OFFSET_W;
}
if (vifRegs->offset == OFFSET_W)
{
writeXYZW<doMask>(vifRegs->offset, *dest, *data);
vifRegs->offset = OFFSET_X;
}
writeXYZW<doMask>(0, *dest++, *data);
writeXYZW<doMask>(1, *dest++, *(data+1));
writeXYZW<doMask>(2, *dest++, *data);
writeXYZW<doMask>(3, *dest++, *(data+1));
}
// V3 and V4 unpacks both use the V4 unpack logic, even though most of the OFFSET_W fields
// during V3 unpacking end up being overwritten by the next unpack. This is confirmed real
// hardware behavior that games such as Ape Escape 3 depend on.
template <bool doMask, class T>
static __ri void __fastcall UNPACK_V3(u32 *dest, const T *data, int size)
static void __fastcall UNPACK_V4(u32 *dest, const T *data)
{
if(vifRegs->offset == OFFSET_X)
{
if (size > 0)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *data++);
vifRegs->offset = OFFSET_Y;
size--;
}
}
if(vifRegs->offset == OFFSET_Y)
{
if (size > 0)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *data++);
vifRegs->offset = OFFSET_Z;
size--;
}
}
if(vifRegs->offset == OFFSET_Z)
{
if (size > 0)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *data++);
vifRegs->offset = OFFSET_W;
size--;
}
}
if(vifRegs->offset == OFFSET_W)
{
// V3-# does some bizarre thing with alignment, every 6qw of data the W becomes 0 (strange console!)
// Ape Escape doesn't seem to like it tho (what the hell?) gonna have to investigate
writeXYZW<doMask>(vifRegs->offset, *dest, *data);
vifRegs->offset = OFFSET_X;
}
}
template <bool doMask, class T>
static __fi void __fastcall UNPACK_V4(u32 *dest, const T *data , int size)
{
while (size > 0)
{
writeXYZW<doMask>(vifRegs->offset, *dest++, *data++);
vifRegs->offset++;
size--;
}
if (vifRegs->offset > OFFSET_W) vifRegs->offset = OFFSET_X;
writeXYZW<doMask>(OFFSET_X, *dest++, *data++);
writeXYZW<doMask>(OFFSET_Y, *dest++, *data++);
writeXYZW<doMask>(OFFSET_Z, *dest++, *data++);
writeXYZW<doMask>(OFFSET_W, *dest , *data);
}
template< bool doMask >
static __ri void __fastcall UNPACK_V4_5(u32 *dest, const u32 *data, int size)
static void __fastcall UNPACK_V4_5(u32 *dest, const u32 *data)
{
//As with S-#, this will always be a complete packet
writeXYZW<doMask>(OFFSET_X, *dest++, ((*data & 0x001f) << 3));
@ -201,36 +133,6 @@ static __ri void __fastcall UNPACK_V4_5(u32 *dest, const u32 *data, int size)
// =====================================================================================================
template < bool doMask, int size, class T >
static void __fastcall fUNPACK_S(u32 *dest, const T *data)
{
UNPACK_S<doMask>( dest, data, size );
}
template <bool doMask, int size, class T>
static void __fastcall fUNPACK_V2(u32 *dest, const T *data)
{
UNPACK_V2<doMask>( dest, data, size );
}
template <bool doMask, int size, class T>
static void __fastcall fUNPACK_V3(u32 *dest, const T *data)
{
UNPACK_V3<doMask>( dest, data, size );
}
template <bool doMask, int size, class T>
static void __fastcall fUNPACK_V4(u32 *dest, const T *data)
{
UNPACK_V4<doMask>( dest, data, size );
}
template< bool doMask >
static void __fastcall fUNPACK_V4_5(u32 *dest, const u32 *data)
{
UNPACK_V4_5<doMask>(dest, data, 0); // size is ignored.
}
// --------------------------------------------------------------------------------------
// Main table for function unpacking.
// --------------------------------------------------------------------------------------
@ -248,45 +150,38 @@ static void __fastcall fUNPACK_V4_5(u32 *dest, const u32 *data)
#define _upk (UNPACKFUNCTYPE)
#define _odd (UNPACKFUNCTYPE_ODD)
#define _unpk_s(bits) (UNPACKFUNCTYPE_S##bits)
#define _odd_s(bits) (UNPACKFUNCTYPE_ODD_S##bits)
#define _unpk_u(bits) (UNPACKFUNCTYPE_U##bits)
#define _odd_u(bits) (UNPACKFUNCTYPE_ODD_U##bits)
// 32-bits versions are unsigned-only!!
#define UnpackFuncPair32( sizefac, vt, doMask ) \
(UNPACKFUNCTYPE)_unpk_u(32) fUNPACK_##vt<doMask, sizefac, u32>, \
(UNPACKFUNCTYPE)_unpk_u(32) fUNPACK_##vt<doMask, sizefac, u32>, \
(UNPACKFUNCTYPE_ODD)_odd_u(32) UNPACK_##vt<doMask, u32>, \
(UNPACKFUNCTYPE_ODD)_odd_u(32) UNPACK_##vt<doMask, u32>,
#define UnpackFuncPair32( vt, doMask ) \
(UNPACKFUNCTYPE)_unpk_u(32) UNPACK_##vt<doMask, u32>, \
(UNPACKFUNCTYPE)_unpk_u(32) UNPACK_##vt<doMask, u32>
#define UnpackFuncPair( vt, bits, doMask ) \
(UNPACKFUNCTYPE)_unpk_u(bits) UNPACK_##vt<doMask, u##bits>, \
(UNPACKFUNCTYPE)_unpk_s(bits) UNPACK_##vt<doMask, s##bits>
#define UnpackFuncPair( sizefac, vt, bits, doMask ) \
(UNPACKFUNCTYPE)_unpk_u(bits) fUNPACK_##vt<doMask, sizefac, u##bits>, \
(UNPACKFUNCTYPE)_unpk_s(bits) fUNPACK_##vt<doMask, sizefac, s##bits>, \
(UNPACKFUNCTYPE_ODD)_odd_u(bits) UNPACK_##vt<doMask, u##bits>, \
(UNPACKFUNCTYPE_ODD)_odd_s(bits) UNPACK_##vt<doMask, s##bits>,
#define UnpackFuncSet( doMask ) \
{ UnpackFuncPair32( 4, S, doMask ) 1, 4, 4, 4 }, /* 0x0 - S-32 */ \
{ UnpackFuncPair ( 4, S, 16, doMask ) 2, 2, 2, 4 }, /* 0x1 - S-16 */ \
{ UnpackFuncPair ( 4, S, 8, doMask ) 4, 1, 1, 4 }, /* 0x2 - S-8 */ \
{ NULL, NULL, NULL, NULL, 0, 0, 0, 0 }, /* 0x3 (NULL) */ \
{ UnpackFuncPair32( 2, V2, doMask ) 24, 4, 8, 2 }, /* 0x4 - V2-32 */ \
{ UnpackFuncPair ( 2, V2, 16, doMask ) 12, 2, 4, 2 }, /* 0x5 - V2-16 */ \
{ UnpackFuncPair ( 2, V2, 8, doMask ) 6, 1, 2, 2 }, /* 0x6 - V2-8 */ \
{ NULL, NULL, NULL, NULL,0, 0, 0, 0 }, /* 0x7 (NULL) */ \
{ UnpackFuncPair32( 3, V3, doMask ) 36, 4, 12, 3 }, /* 0x8 - V3-32 */ \
{ UnpackFuncPair ( 3, V3, 16, doMask ) 18, 2, 6, 3 }, /* 0x9 - V3-16 */ \
{ UnpackFuncPair ( 3, V3, 8, doMask ) 9, 1, 3, 3 }, /* 0xA - V3-8 */ \
{ NULL, NULL, NULL, NULL,0, 0, 0, 0 }, /* 0xB (NULL) */ \
{ UnpackFuncPair32( 4, V4, doMask ) 48, 4, 16, 4 }, /* 0xC - V4-32 */ \
{ UnpackFuncPair ( 4, V4, 16, doMask ) 24, 2, 8, 4 }, /* 0xD - V4-16 */ \
{ UnpackFuncPair ( 4, V4, 8, doMask ) 12, 1, 4, 4 }, /* 0xE - V4-8 */ \
{ /* 0xF - V4-5 */ \
(UNPACKFUNCTYPE)_unpk_u(32) fUNPACK_V4_5<doMask>, \
(UNPACKFUNCTYPE)_unpk_u(32) fUNPACK_V4_5<doMask>, \
(UNPACKFUNCTYPE_ODD)_odd_u(32) UNPACK_V4_5<doMask>, \
(UNPACKFUNCTYPE_ODD)_odd_u(32) UNPACK_V4_5<doMask>, \
6, 2, 2, 4 },
#define UnpackFuncSet( doMask ) \
{ UnpackFuncPair32( S, doMask ), 4, 4 }, /* 0x0 - S-32 */ \
{ UnpackFuncPair ( S, 16, doMask ), 2, 4 }, /* 0x1 - S-16 */ \
{ UnpackFuncPair ( S, 8, doMask ), 1, 4 }, /* 0x2 - S-8 */ \
{ NULL, NULL, 0, 0 }, /* 0x3 (NULL) */ \
{ UnpackFuncPair32( V2, doMask ), 8, 2 }, /* 0x4 - V2-32 */ \
{ UnpackFuncPair ( V2, 16, doMask ), 4, 2 }, /* 0x5 - V2-16 */ \
{ UnpackFuncPair ( V2, 8, doMask ), 2, 2 }, /* 0x6 - V2-8 */ \
{ NULL, NULL, 0, 0 }, /* 0x7 (NULL) */ \
{ UnpackFuncPair32( V4, doMask ), 12, 3 }, /* 0x8 - V3-32 */ \
{ UnpackFuncPair ( V4, 16, doMask ), 6, 3 }, /* 0x9 - V3-16 */ \
{ UnpackFuncPair ( V4, 8, doMask ), 3, 3 }, /* 0xA - V3-8 */ \
{ NULL, NULL, 0, 0 }, /* 0xB (NULL) */ \
{ UnpackFuncPair32( V4, doMask ), 16, 4 }, /* 0xC - V4-32 */ \
{ UnpackFuncPair ( V4, 16, doMask ), 8, 4 }, /* 0xD - V4-16 */ \
{ UnpackFuncPair ( V4, 8, doMask ), 4, 4 }, /* 0xE - V4-8 */ \
{ /* 0xF - V4-5 */ \
(UNPACKFUNCTYPE)_unpk_u(32)UNPACK_V4_5<doMask>, \
(UNPACKFUNCTYPE)_unpk_u(32)UNPACK_V4_5<doMask>, \
2, 4 \
},
const __aligned16 VIFUnpackFuncTable VIFfuncTable[32] =
{
@ -337,7 +232,6 @@ _vifT void vifUnpackSetup(const u32 *data) {
vifX.cl = 0;
vifX.tag.cmd = vifX.cmd;
vifXRegs.offset = 0;
}
template void vifUnpackSetup<0>(const u32 *data);

View File

@ -16,7 +16,6 @@
#pragma once
typedef void (__fastcall *UNPACKFUNCTYPE)(u32 *dest, const u32 *data);
typedef void (__fastcall *UNPACKFUNCTYPE_ODD)(u32 *dest, const u32 *data, int size);
typedef int (*UNPACKPARTFUNCTYPESSE)(u32 *dest, const u32 *data, int size);
#define create_unpack_u_type(bits) typedef void (__fastcall *UNPACKFUNCTYPE_U##bits)(u32 *dest, const u##bits *data);
@ -39,11 +38,6 @@ struct VIFUnpackFuncTable
UNPACKFUNCTYPE funcU;
UNPACKFUNCTYPE funcS;
UNPACKFUNCTYPE_ODD oddU; // needed for old-style vif only, remove when old vif is removed.
UNPACKFUNCTYPE_ODD oddS; // needed for old-style vif only, remove when old vif is removed.
u8 bsize; // currently unused
u8 dsize; // byte size of one channel
u8 gsize; // size of data in bytes used for each write cycle
u8 qsize; // used for unpack parts, num of vectors that
// will be decompressed from data for 1 cycle

View File

@ -1,159 +0,0 @@
/* PCSX2 - PS2 Emulator for PCs
* Copyright (C) 2002-2010 PCSX2 Dev Team
*
* PCSX2 is free software: you can redistribute it and/or modify it under the terms
* of the GNU Lesser General Public License as published by the Free Software Found-
* ation, either version 3 of the License, or (at your option) any later version.
*
* PCSX2 is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with PCSX2.
* If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
// Old Vif Unpack Code
// Only here for testing/reference
template<const u32 VIFdmanum> void VIFunpack(u32 *data, vifCode *v, u32 size) {
//if (!VIFdmanum) DevCon.WriteLn("vif#%d, size = %d [%x]", VIFdmanum, size, data);
VURegs * VU;
u8 *cdata = (u8*)data;
u32 tempsize = 0;
const u32 memlimit = (VIFdmanum == 0) ? 0x1000 : 0x4000;
if (VIFdmanum == 0) {
VU = &VU0;
vifRegs = &vif0Regs;
vif = &vif0;
}
else {
VU = &VU1;
vifRegs = &vif1Regs;
vif = &vif1;
}
u32 *dest = (u32*)(VU->Mem + v->addr);
const VIFUnpackFuncTable& ft( VIFfuncTable[ v->cmd & 0x1f ] );
UNPACKFUNCTYPE func = vif->usn ? ft.funcU : ft.funcS;
size <<= 2;
if (vifRegs->cycle.cl >= vifRegs->cycle.wl) { // skipping write
if (v->addr >= memlimit) {
DevCon.Warning("Overflown at the start");
v->addr &= (memlimit - 1);
dest = (u32*)(VU->Mem + v->addr);
}
size = std::min<u32>(size, vifRegs->num * ft.gsize); //size will always be the same or smaller
tempsize = v->addr + ((((vifRegs->num-1) / vifRegs->cycle.wl) *
(vifRegs->cycle.cl - vifRegs->cycle.wl)) * 16) + (vifRegs->num * 16);
//Sanity Check (memory overflow)
if (tempsize > memlimit) {
if (((vifRegs->cycle.cl != vifRegs->cycle.wl) &&
((memlimit + (vifRegs->cycle.cl - vifRegs->cycle.wl) * 16) == tempsize))) {
//It's a red herring, so ignore it! SSE unpacks will be much quicker.
DevCon.WriteLn("what!!!!!!!!!");
//tempsize = 0;
tempsize = size;
size = 0;
}
else {
DevCon.Warning("VIF%x Unpack ending %x > %x", VIFdmanum, tempsize, VIFdmanum ? 0x4000 : 0x1000);
tempsize = size;
size = 0;
}
}
else {
tempsize = size;
size = 0;
}
if (tempsize) {
int incdest = ((vifRegs->cycle.cl - vifRegs->cycle.wl) << 2) + 4;
size = 0;
int addrstart = v->addr;
//if((tempsize >> 2) != v->size) DevCon.Warning("split when size != tagsize");
//DbgCon.WriteLn("sorting tempsize :p, size %d, vifnum %d, addr %x", tempsize, vifRegs->num, v->addr);
while ((tempsize >= ft.gsize) && (vifRegs->num > 0)) {
if(v->addr >= memlimit) {
DevCon.Warning("Mem limit overflow");
v->addr &= (memlimit - 1);
dest = (u32*)(VU->Mem + v->addr);
}
func(dest, (u32*)cdata);
cdata += ft.gsize;
tempsize -= ft.gsize;
vifRegs->num--;
vif->cl++;
if (vif->cl == vifRegs->cycle.wl) {
dest += incdest;
v->addr +=(incdest * 4);
vif->cl = 0;
}
else {
dest += 4;
v->addr += 16;
}
}
if (v->addr >= memlimit) {
v->addr &=(memlimit - 1);
dest = (u32*)(VU->Mem + v->addr);
}
v->addr = addrstart;
if(tempsize > 0) size = tempsize;
}
if (size >= ft.dsize && vifRegs->num > 0) { //Else write what we do have
VIF_LOG("warning, end with size = %d", size);
// unpack one qword
//v->addr += (size / ft.dsize) * 4;
(vif->usn ? ft.oddU : ft.oddS)(dest, (u32*)cdata, size / ft.dsize);
size = 0;
//DbgCon.WriteLn("leftover done, size %d, vifnum %d, addr %x", size, vifRegs->num, v->addr);
}
}
else { // filling write
if(vifRegs->cycle.cl > 0) // Quicker and avoids zero division :P
if((u32)(((size / ft.gsize) / vifRegs->cycle.cl) * vifRegs->cycle.wl) < vifRegs->num)
DevCon.Warning("Filling write warning! %x < %x and CL = %x WL = %x", (size / ft.gsize), vifRegs->num, vifRegs->cycle.cl, vifRegs->cycle.wl);
DevCon.Warning("filling write %d cl %d, wl %d mask %x mode %x unpacktype %x addr %x", vifRegs->num, vifRegs->cycle.cl, vifRegs->cycle.wl, vifRegs->mask, vifRegs->mode, v->cmd & 0xf, vif->tag.addr);
while (vifRegs->num > 0) {
if (vif->cl == vifRegs->cycle.wl) {
vif->cl = 0;
}
// unpack one qword
if (vif->cl < vifRegs->cycle.cl) {
if(size < ft.gsize) { DevCon.WriteLn("Out of Filling write data!"); break; }
func(dest, (u32*)cdata);
cdata += ft.gsize;
size -= ft.gsize;
vif->cl++;
vifRegs->num--;
if (vif->cl == vifRegs->cycle.wl) {
vif->cl = 0;
}
}
else {
func(dest, (u32*)cdata);
v->addr += 16;
vifRegs->num--;
vif->cl++;
}
dest += 4;
if (vifRegs->num == 0) break;
}
}
}

View File

@ -831,10 +831,6 @@
RelativePath="..\..\Vif_Unpack.h"
>
</File>
<File
RelativePath="..\..\Vif_Unpack.inl"
>
</File>
<Filter
Name="newVif"
>

View File

@ -107,5 +107,4 @@ extern __aligned16 const u8 nVifT[16];
extern __aligned16 nVifCall nVifUpk[(2*2*16)*4]; // ([USN][Masking][Unpack Type]) [curCycle]
extern __aligned16 u32 nVifMask[3][4][4]; // [MaskNumber][CycleNumber][Vector]
static const bool useOldUnpack = 0; // Use code in newVif_OldUnpack.inl
static const bool newVifDynaRec = 1; // Use code in newVif_Dynarec.inl

View File

@ -21,7 +21,6 @@
#include "Common.h"
#include "Vif_Dma.h"
#include "newVif.h"
#include "Vif_Unpack.inl"
__aligned16 nVifStruct nVif[2];
__aligned16 nVifCall nVifUpk[(2*2*16) *4]; // ([USN][Masking][Unpack Type]) [curCycle]
@ -47,7 +46,7 @@ __aligned16 const u8 nVifT[16] = {
};
// ----------------------------------------------------------------------------
template< int idx, bool doMode, bool isFill, bool singleUnpack >
template< int idx, bool doMode, bool isFill >
__ri void __fastcall _nVifUnpackLoop(const u8 *data, u32 size);
typedef void __fastcall FnType_VifUnpackLoop(const u8 *data, u32 size);
@ -55,18 +54,10 @@ typedef FnType_VifUnpackLoop* Fnptr_VifUnpackLoop;
// Unpacks Until 'Num' is 0
static const __aligned16 Fnptr_VifUnpackLoop UnpackLoopTable[2][2][2] = {
{{ _nVifUnpackLoop<0,0,0,0>, _nVifUnpackLoop<0,0,1,0> },
{ _nVifUnpackLoop<0,1,0,0>, _nVifUnpackLoop<0,1,1,0> },},
{{ _nVifUnpackLoop<1,0,0,0>, _nVifUnpackLoop<1,0,1,0> },
{ _nVifUnpackLoop<1,1,0,0>, _nVifUnpackLoop<1,1,1,0> },},
};
// Unpacks until 1 normal write cycle unpack has been written to VU mem
static const __aligned16 Fnptr_VifUnpackLoop UnpackSingleTable[2][2][2] = {
{{ _nVifUnpackLoop<0,0,0,1>, _nVifUnpackLoop<0,0,1,1> },
{ _nVifUnpackLoop<0,1,0,1>, _nVifUnpackLoop<0,1,1,1> },},
{{ _nVifUnpackLoop<1,0,0,1>, _nVifUnpackLoop<1,0,1,1> },
{ _nVifUnpackLoop<1,1,0,1>, _nVifUnpackLoop<1,1,1,1> },},
{{ _nVifUnpackLoop<0,0,0>, _nVifUnpackLoop<0,0,1> },
{ _nVifUnpackLoop<0,1,0>, _nVifUnpackLoop<0,1,1> },},
{{ _nVifUnpackLoop<1,0,0>, _nVifUnpackLoop<1,0,1> },
{ _nVifUnpackLoop<1,1,0>, _nVifUnpackLoop<1,1,1> },},
};
// ----------------------------------------------------------------------------
@ -195,7 +186,7 @@ static void setMasks(int idx, const VIFregisters& v) {
// elimination that it would probably be a win in most cases (and for sure in many
// "slow" games that need it most). --air
template< int idx, bool doMode, bool isFill, bool singleUnpack >
template< int idx, bool doMode, bool isFill >
__ri void __fastcall _nVifUnpackLoop(const u8 *data, u32 size) {
const int cycleSize = isFill ? vifRegs->cycle.cl : vifRegs->cycle.wl;
@ -219,7 +210,8 @@ __ri void __fastcall _nVifUnpackLoop(const u8 *data, u32 size) {
while (vifRegs->num) {
if (vif->cl < cycleSize) {
if (size < ft.gsize) break;
// This should always be true as per the _1mb buffer used to merge partial transfers.
pxAssume (size >= ft.gsize);
if (doMode) {
//DevCon.WriteLn("Non SSE; unpackNum = %d", upkNum);
func((u32*)dest, (u32*)data);
@ -233,7 +225,6 @@ __ri void __fastcall _nVifUnpackLoop(const u8 *data, u32 size) {
vifRegs->num--;
incVUptrBy16(idx, dest, vuMemBase);
if (++vif->cl == blockSize) vif->cl = 0;
if (singleUnpack) return;
}
else if (isFill) {
//DevCon.WriteLn("isFill!");
@ -251,12 +242,6 @@ __ri void __fastcall _nVifUnpackLoop(const u8 *data, u32 size) {
__fi void _nVifUnpack(int idx, const u8 *data, u32 size, bool isFill) {
if (useOldUnpack) {
if (!idx) VIFunpack<0>((u32*)data, &vif0.tag, size>>2);
else VIFunpack<1>((u32*)data, &vif1.tag, size>>2);
return;
}
const bool doMode = !!vifRegs->mode;
UnpackLoopTable[idx][doMode][isFill]( data, size );
}