diff --git a/pcsx2-codeblocks.workspace b/pcsx2-codeblocks.workspace
index 16d92eb170..b1dfb8dc1c 100644
--- a/pcsx2-codeblocks.workspace
+++ b/pcsx2-codeblocks.workspace
@@ -1,7 +1,7 @@
-
+
@@ -30,6 +30,6 @@
-
+
diff --git a/plugins/zzogl-pg/opengl/Mem.cpp b/plugins/zzogl-pg/opengl/Mem.cpp
index 3d67880f85..ce65e96a0b 100644
--- a/plugins/zzogl-pg/opengl/Mem.cpp
+++ b/plugins/zzogl-pg/opengl/Mem.cpp
@@ -146,9 +146,14 @@ End: \
return (nSize * TransmitPitch##TransSfx(2) + nLeftOver)/2; \
} \
-//#define NEW_TRANSFER
+#define NEW_TRANSFER
#ifdef NEW_TRANSFER
+u32 TransPitch(u32 pitch, u32 size)
+{
+ return pitch * size / 8;
+}
+
//DEFINE_TRANSFERLOCAL(32, u32, 2, 32, 8, 8, _, SwizzleBlock32);
int TransferHostLocal32(const void* pbyMem, u32 nQWordSize)
{
@@ -157,7 +162,8 @@ int TransferHostLocal32(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u32);
-// _SwizzleBlock swizzle;
+ const u32 transfersize = 32;
+ _SwizzleBlock swizzle;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -166,21 +172,25 @@ int TransferHostLocal32(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u32* pbuf = (const u32*)pbyMem;
- const int tp2 = TransmitPitch_(2);
+ const int tp2 = TransPitch(2, transfersize);
int nLeftOver = (nQWordSize*4*2)%tp2;
int nSize = (nQWordSize*4*2)/tp2;
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
- int endY = ROUND_UPPOW2(i, blockheight);
+ int endY = ROUND_UPPOW2(gs.imageY, blockheight);
int alignedY = ROUND_DOWNPOW2(gs.imageEndY, blockheight);
int alignedX = ROUND_DOWNPOW2(gs.imageEndX, blockwidth);
- bool bAligned, bCanAlign = MOD_POW2(gs.trxpos.dx, blockwidth) == 0 && (j == gs.trxpos.dx) && (alignedY > endY) && alignedX > gs.trxpos.dx;
+ bool bAligned;
+ bool bCanAlign = ((MOD_POW2(gs.trxpos.dx, blockwidth) == 0) && (gs.imageX == gs.trxpos.dx) && (alignedY > endY) && (alignedX > gs.trxpos.dx));
- if ((gs.imageEndX-gs.trxpos.dx)%widthlimit)
+ if ((gs.imageEndX - gs.trxpos.dx) % widthlimit)
{
/* hack */
- int testwidth = (int)nSize - (gs.imageEndY-i)*(gs.imageEndX-gs.trxpos.dx)+(j-gs.trxpos.dx);
+ int testwidth = (int)nSize -
+ (gs.imageEndY - gs.imageY) * (gs.imageEndX - gs.trxpos.dx)
+ + (gs.imageX - gs.trxpos.dx);
+
if((testwidth <= widthlimit) && (testwidth >= -widthlimit))
{
/* don't transfer */
@@ -191,7 +201,7 @@ int TransferHostLocal32(const void* pbyMem, u32 nQWordSize)
}
/* first align on block boundary */
- if ( MOD_POW2(i, blockheight) || !bCanAlign )
+ if ( MOD_POW2(gs.imageY, blockheight) || !bCanAlign )
{
if( !bCanAlign )
@@ -199,10 +209,10 @@ int TransferHostLocal32(const void* pbyMem, u32 nQWordSize)
else
assert( endY < gs.imageEndY); /* part of alignment condition */
- if (((gs.imageEndX-gs.trxpos.dx)%widthlimit) || ((gs.imageEndX-j)%widthlimit))
+ if (((gs.imageEndX - gs.trxpos.dx) % widthlimit) || ((gs.imageEndX - gs.imageX) % widthlimit))
{
/* transmit with a width of 1 */
- TRANSMIT_HOSTLOCAL_Y_(32, u32, (1+(DSTPSM == 0x14)), endY);
+ TRANSMIT_HOSTLOCAL_Y_(32, u32, (1 + (DSTPSM == 0x14)), endY);
}
else
{
@@ -212,76 +222,52 @@ int TransferHostLocal32(const void* pbyMem, u32 nQWordSize)
if( nSize == 0 || i == gs.imageEndY ) goto End;
}
- assert( MOD_POW2(i, blockheight) == 0 && j == gs.trxpos.dx);
+ //assert( MOD_POW2(i, blockheight) == 0 && j == gs.trxpos.dx);
/* can align! */
- pitch = gs.imageEndX-gs.trxpos.dx;
- area = pitch*blockheight;
- fracX = gs.imageEndX-alignedX;
+ pitch = gs.imageEndX - gs.trxpos.dx;
+ area = pitch * blockheight;
+ fracX = gs.imageEndX - alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch) & 0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize) & 0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
-
if ( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)))
- {
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
- {
- u8 *temp = pstart + getPixelAddress_0(32, tempj, i, gs.dstbuf.bw)*blockbits/8;
- SwizzleBlock32(temp, (u8*)pbuf, TransmitPitch_(pitch));
- }
- }
+ swizzle = SwizzleBlock32;
else
- {
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
- {
- u8 *temp = pstart + getPixelAddress_0(32, tempj, i, gs.dstbuf.bw)*blockbits/8;
- SwizzleBlock32u(temp, (u8*)pbuf, TransmitPitch_(pitch));
- }
- }
+ swizzle = SwizzleBlock32u;
-
-// if ( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)))
-// {
-// swizzle = SwizzleBlock32;
-// }
-// else
-// {
-// swizzle = SwizzleBlock32u;
-// }
-//
-// for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
-// {
-// u8 *temp = pstart + getPixelAddress_0(32, tempj, i, gs.dstbuf.bw)*blockbits/8;
-// swizzle(temp, (u8*)pbuf, TransmitPitch_(pitch), 0xffffffff);
-// }
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
+ {
+ u8 *temp = pstart + getPixelAddress_0(32, tempj, i, gs.dstbuf.bw)*blockbits/8;
+ swizzle(temp, (u8*)pbuf, TransPitch(pitch, transfersize), 0xffffffff);
+ }
/* transfer the rest */
if( alignedX < gs.imageEndX )
{
- TRANSMIT_HOSTLOCAL_X_(32, u32, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_((alignedX-gs.trxpos.dx))/TSize;
+ TRANSMIT_HOSTLOCAL_X_(32, u32, widthlimit, blockheight, alignedX);
+ pbuf -= TransPitch((alignedX - gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/TSize;
+ pbuf += (blockheight - 1)* TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if ( TransmitPitch_(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0)
{
TRANSMIT_HOSTLOCAL_Y_(32, u32, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
-End:
- if( i >= gs.imageEndY )
+End: if( i >= gs.imageEndY )
{
assert( gs.imageTransfer == -1 || i == gs.imageEndY );
gs.imageTransfer = -1;
@@ -296,7 +282,7 @@ End:
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(32Z, u32, 2, 32, 8, 8, _, SwizzleBlock32);
@@ -307,6 +293,7 @@ int TransferHostLocal32Z(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u32);
+ const u32 transfersize = 32;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -315,8 +302,8 @@ int TransferHostLocal32Z(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u32* pbuf = (const u32*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -368,23 +355,23 @@ int TransferHostLocal32Z(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/sizeof(u32))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u32))
{
- SwizzleBlock32(pstart + getPixelAddress_0(32Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock32(pstart + getPixelAddress_0(32Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/sizeof(u32))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u32))
{
- SwizzleBlock32u(pstart + getPixelAddress_0(32Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock32u(pstart + getPixelAddress_0(32Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -392,20 +379,20 @@ int TransferHostLocal32Z(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_( 32Z, u32, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_(alignedX - gs.trxpos.dx)/sizeof(u32);
+ pbuf -= TransPitch((alignedX - gs.trxpos.dx), transfersize)/sizeof(u32);
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/sizeof(u32);
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/sizeof(u32);
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_( 32Z, u32, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -421,7 +408,7 @@ int TransferHostLocal32Z(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(24, u8, 8, 32, 8, 8, _24, SwizzleBlock24);
@@ -432,6 +419,7 @@ int TransferHostLocal24(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 24;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -440,8 +428,8 @@ int TransferHostLocal24(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_24(2));
- int nSize = nQWordSize*4*2/TransmitPitch_24(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -493,23 +481,23 @@ int TransferHostLocal24(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_24(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize) & 0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_24(blockwidth)/sizeof(u8))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u8))
{
- SwizzleBlock24(pstart + getPixelAddress_0(24,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_24(pitch));
+ SwizzleBlock24(pstart + getPixelAddress_0(24,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_24(blockwidth)/sizeof(u8))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u8))
{
- SwizzleBlock24u(pstart + getPixelAddress_0(24,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_24(pitch));
+ SwizzleBlock24u(pstart + getPixelAddress_0(24,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -517,20 +505,20 @@ int TransferHostLocal24(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_24(24, T, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_24((alignedX-gs.trxpos.dx))/sizeof(u8);
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/sizeof(u8);
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_24(pitch)/sizeof(u8);
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/sizeof(u8);
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_24(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_24(24, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_24(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -546,7 +534,7 @@ int TransferHostLocal24(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_24(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(24Z, u8, 8, 32, 8, 8, _24, SwizzleBlock24);
@@ -557,6 +545,7 @@ int TransferHostLocal24Z(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 24;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -565,8 +554,8 @@ int TransferHostLocal24Z(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_24(2));
- int nSize = nQWordSize*4*2/TransmitPitch_24(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -618,23 +607,23 @@ int TransferHostLocal24Z(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_24(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_24(blockwidth)/sizeof(u8))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u8))
{
- SwizzleBlock24(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_24(pitch));
+ SwizzleBlock24(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_24(blockwidth)/sizeof(u8))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u8))
{
- SwizzleBlock24u(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_24(pitch));
+ SwizzleBlock24u(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -642,20 +631,20 @@ int TransferHostLocal24Z(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_24(16, u8, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_24(alignedX-gs.trxpos.dx)/sizeof(u8);
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/sizeof(u8);
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_24(pitch)/sizeof(u8);
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/sizeof(u8);
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_24(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0)
{
TRANSMIT_HOSTLOCAL_Y_24(24, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_24(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -671,7 +660,7 @@ int TransferHostLocal24Z(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_24(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(16, u16, 4, 16, 16, 8, _, SwizzleBlock16);
@@ -682,6 +671,7 @@ int TransferHostLocal16(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 16;
const u32 blockheight = 8;
const u32 TSize = sizeof(u16);
+ const u32 transfersize = 16;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -690,8 +680,8 @@ int TransferHostLocal16(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u16* pbuf = (const u16*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -743,23 +733,23 @@ int TransferHostLocal16(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/sizeof(u16))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u16))
{
- SwizzleBlock16(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/sizeof(u16))
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/sizeof(u16))
{
- SwizzleBlock16u(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16u(pstart + getPixelAddress_0(16,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -767,20 +757,20 @@ int TransferHostLocal16(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_(16, T, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_((alignedX-gs.trxpos.dx))/sizeof(u16);
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/sizeof(u16);
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/sizeof(u16);
+ pbuf += (blockheight-1)* TransPitch(pitch, transfersize)/sizeof(u16);
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_(nSize)/4 > 0 )
+ if (TransPitch(pitch, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_(16, u16, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -796,7 +786,7 @@ int TransferHostLocal16(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(pitch, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(16S, u16, 4, 16, 16, 8, _, SwizzleBlock16);
@@ -807,6 +797,7 @@ int TransferHostLocal16S(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 16;
const u32 blockheight = 8;
const u32 TSize = sizeof(u16);
+ const u32 transfersize = 16;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -815,8 +806,8 @@ int TransferHostLocal16S(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u16* pbuf = (const u16*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -868,23 +859,23 @@ int TransferHostLocal16S(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock16(pstart + getPixelAddress_0(16S,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16(pstart + getPixelAddress_0(16S,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock16u(pstart + getPixelAddress_0(16S,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16u(pstart + getPixelAddress_0(16S,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -892,20 +883,20 @@ int TransferHostLocal16S(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_(16S, u16, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_((alignedX-gs.trxpos.dx))/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/TSize;
+ pbuf += (blockheight-1) * TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_(16S, u16, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -921,7 +912,7 @@ int TransferHostLocal16S(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(16Z, u16, 4, 16, 16, 8, _, SwizzleBlock16);
@@ -932,6 +923,7 @@ int TransferHostLocal16Z(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 16;
const u32 blockheight = 8;
const u32 TSize = sizeof(u16);
+ const u32 transfersize = 16;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -940,8 +932,8 @@ int TransferHostLocal16Z(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u16* pbuf = (const u16*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -993,23 +985,23 @@ int TransferHostLocal16Z(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock16(pstart + getPixelAddress_0(16Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16(pstart + getPixelAddress_0(16Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock16u(pstart + getPixelAddress_0(16Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16u(pstart + getPixelAddress_0(16Z,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1017,20 +1009,20 @@ int TransferHostLocal16Z(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_(16Z, T, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_(16Z, u16, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1046,7 +1038,7 @@ int TransferHostLocal16Z(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(16SZ, u16, 4, 16, 16, 8, _, SwizzleBlock16);
@@ -1057,6 +1049,7 @@ int TransferHostLocal16SZ(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 16;
const u32 blockheight = 8;
const u32 TSize = sizeof(u16);
+ const u32 transfersize = 16;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -1065,8 +1058,8 @@ int TransferHostLocal16SZ(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u16* pbuf = (const u16*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -1118,23 +1111,23 @@ int TransferHostLocal16SZ(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock16(pstart + getPixelAddress_0(16SZ,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16(pstart + getPixelAddress_0(16SZ,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock16u(pstart + getPixelAddress_0(16SZ,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock16u(pstart + getPixelAddress_0(16SZ,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1142,20 +1135,20 @@ int TransferHostLocal16SZ(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_(16SZ, u16, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_(16SZ, u16, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1171,7 +1164,7 @@ int TransferHostLocal16SZ(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(8, u8, 4, 8, 16, 16, _, SwizzleBlock8);
@@ -1182,6 +1175,7 @@ int TransferHostLocal8(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 16;
const u32 blockheight = 16;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 8;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -1190,8 +1184,8 @@ int TransferHostLocal8(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -1243,23 +1237,23 @@ int TransferHostLocal8(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf +=TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock8(pstart + getPixelAddress_0(8,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock8(pstart + getPixelAddress_0(8,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock8u(pstart + getPixelAddress_0(8,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock8u(pstart + getPixelAddress_0(8,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1267,20 +1261,20 @@ int TransferHostLocal8(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_(8, u8, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch(alignedX-gs.trxpos.dx, transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TRANSMIT_PITCH_(nSize, u8)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_(8, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1296,7 +1290,7 @@ int TransferHostLocal8(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(4, u8, 8, 4, 32, 16, _4, SwizzleBlock4);
@@ -1307,6 +1301,7 @@ int TransferHostLocal4(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 32;
const u32 blockheight = 16;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 4;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -1315,8 +1310,8 @@ int TransferHostLocal4(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_4(2));
- int nSize = nQWordSize*4*2/TransmitPitch_4(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -1357,7 +1352,7 @@ int TransferHostLocal4(const void* pbyMem, u32 nQWordSize)
TRANSMIT_HOSTLOCAL_Y_4(4, u8, widthlimit, endY);
}
- if( nSize == 0 || i == gs.imageEndY ) goto End;
+ if( nSize == 0 || i == gs.imageEndY ) goto End;
}
assert( MOD_POW2(i, blockheight) == 0 && j == gs.trxpos.dx);
@@ -1368,23 +1363,23 @@ int TransferHostLocal4(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_4(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_4(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock4(pstart + getPixelAddress_0(4,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_4(pitch));
+ SwizzleBlock4(pstart + getPixelAddress_0(4,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_4(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock4u(pstart + getPixelAddress_0(4,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_4(pitch));
+ SwizzleBlock4u(pstart + getPixelAddress_0(4,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1392,20 +1387,20 @@ int TransferHostLocal4(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_4(4, u8, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_4(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_4(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_4(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_4(4, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_4(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1421,7 +1416,7 @@ int TransferHostLocal4(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_4(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(8H, u8, 4, 32, 8, 8, _, SwizzleBlock8H);
@@ -1432,6 +1427,7 @@ int TransferHostLocal8H(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 8;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -1440,8 +1436,8 @@ int TransferHostLocal8H(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_(2));
- int nSize = nQWordSize*4*2/TransmitPitch_(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -1493,23 +1489,23 @@ int TransferHostLocal8H(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock8H(pstart + getPixelAddress_0(8H,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock8H(pstart + getPixelAddress_0(8H,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock8Hu(pstart + getPixelAddress_0(8H,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_(pitch));
+ SwizzleBlock8Hu(pstart + getPixelAddress_0(8H,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1517,11 +1513,11 @@ int TransferHostLocal8H(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_(8H, u8, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
@@ -1530,7 +1526,7 @@ int TransferHostLocal8H(const void* pbyMem, u32 nQWordSize)
{
TRANSMIT_HOSTLOCAL_Y_(8H, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1546,7 +1542,7 @@ int TransferHostLocal8H(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(4HL, u8, 8, 32, 8, 8, _4, SwizzleBlock4HL);
@@ -1557,6 +1553,7 @@ int TransferHostLocal4HL(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 4;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -1565,8 +1562,8 @@ int TransferHostLocal4HL(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_4(2));
- int nSize = nQWordSize*4*2/TransmitPitch_4(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/ TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -1618,23 +1615,23 @@ int TransferHostLocal4HL(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_4(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_4(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock4HL(pstart + getPixelAddress_0(4HL,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_4(pitch));
+ SwizzleBlock4HL(pstart + getPixelAddress_0(4HL,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_4(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock4HLu(pstart + getPixelAddress_0(4HL,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_4(pitch));
+ SwizzleBlock4HLu(pstart + getPixelAddress_0(4HL,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1642,20 +1639,20 @@ int TransferHostLocal4HL(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_4(4HL, u8, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_4(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_4(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_4(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_4(4HL, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_4(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1671,7 +1668,7 @@ int TransferHostLocal4HL(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_4(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
//DEFINE_TRANSFERLOCAL(4HH, u8, 8, 32, 8, 8, _4, SwizzleBlock4HH);
@@ -1682,6 +1679,7 @@ int TransferHostLocal4HH(const void* pbyMem, u32 nQWordSize)
const u32 blockwidth = 8;
const u32 blockheight = 8;
const u32 TSize = sizeof(u8);
+ const u32 transfersize = 4;
assert( gs.imageTransfer == 0 );
u8* pstart = g_pbyGSMemory + gs.dstbuf.bp*256;
@@ -1690,8 +1688,8 @@ int TransferHostLocal4HH(const void* pbyMem, u32 nQWordSize)
int i = gs.imageY, j = gs.imageX;
const u8* pbuf = (const u8*)pbyMem;
- int nLeftOver = (nQWordSize*4*2)%(TransmitPitch_4(2));
- int nSize = nQWordSize*4*2/TransmitPitch_4(2);
+ int nLeftOver = (nQWordSize*4*2)%(TransPitch(2, transfersize));
+ int nSize = nQWordSize*4*2/TransPitch(2, transfersize);
nSize = min(nSize, gs.imageWnew * gs.imageHnew);
int pitch, area, fracX;
@@ -1743,23 +1741,23 @@ int TransferHostLocal4HH(const void* pbyMem, u32 nQWordSize)
fracX = gs.imageEndX-alignedX;
/* on top of checking whether pbuf is aligned, make sure that the width is at least aligned to its limits (due to bugs in pcsx2) */
- bAligned = !((uptr)pbuf & 0xf) && (TransmitPitch_4(pitch)&0xf) == 0;
+ bAligned = !((uptr)pbuf & 0xf) && (TransPitch(pitch, transfersize)&0xf) == 0;
/* transfer aligning to blocks */
for(; i < alignedY && nSize >= area; i += blockheight, nSize -= area)
{
if( bAligned || ((DSTPSM==PSMCT24) || (DSTPSM==PSMT8H) || (DSTPSM==PSMT4HH) || (DSTPSM==PSMT4HL)) )
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_4(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock4HH(pstart + getPixelAddress_0(4HH,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_4(pitch));
+ SwizzleBlock4HH(pstart + getPixelAddress_0(4HH,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
else
{
- for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransmitPitch_4(blockwidth)/TSize)
+ for(int tempj = gs.trxpos.dx; tempj < alignedX; tempj += blockwidth, pbuf += TransPitch(blockwidth, transfersize)/TSize)
{
- SwizzleBlock4HHu(pstart + getPixelAddress_0(4HH,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransmitPitch_4(pitch));
+ SwizzleBlock4HHu(pstart + getPixelAddress_0(4HH,tempj, i, gs.dstbuf.bw)*blockbits/8, (u8*)pbuf, TransPitch(pitch, transfersize));
}
}
@@ -1767,20 +1765,20 @@ int TransferHostLocal4HH(const void* pbyMem, u32 nQWordSize)
if ( alignedX < gs.imageEndX )
{
TRANSMIT_HOSTLOCAL_X_4(4HH,u8, widthlimit, blockheight, alignedX);
- pbuf -= TransmitPitch_4(alignedX-gs.trxpos.dx)/TSize;
+ pbuf -= TransPitch((alignedX-gs.trxpos.dx), transfersize)/TSize;
}
else
{
- pbuf += (blockheight-1)*TransmitPitch_4(pitch)/TSize;
+ pbuf += (blockheight-1)*TransPitch(pitch, transfersize)/TSize;
}
j = gs.trxpos.dx;
}
- if (TransmitPitch_4(nSize)/4 > 0 )
+ if (TransPitch(nSize, transfersize)/4 > 0 )
{
TRANSMIT_HOSTLOCAL_Y_4(4HH, u8, widthlimit, gs.imageEndY);
/* sometimes wrong sizes are sent (tekken tag) */
- assert( gs.imageTransfer == -1 || TransmitPitch_4(nSize)/4 <= 2 );
+ assert( gs.imageTransfer == -1 || TransPitch(nSize, transfersize)/4 <= 2 );
}
End:
@@ -1796,23 +1794,23 @@ int TransferHostLocal4HH(const void* pbyMem, u32 nQWordSize)
gs.imageY = i;
gs.imageX = j;
}
- return (nSize * TransmitPitch_4(2) + nLeftOver)/2;
+ return (nSize * TransPitch(2, transfersize) + nLeftOver)/2;
}
#else
-DEFINE_TRANSFERLOCAL(32, u32, 2, 32, 8, 8, _, SwizzleBlock32);
-DEFINE_TRANSFERLOCAL(32Z, u32, 2, 32, 8, 8, _, SwizzleBlock32);
-DEFINE_TRANSFERLOCAL(24, u8, 8, 32, 8, 8, _24, SwizzleBlock24);
-DEFINE_TRANSFERLOCAL(24Z, u8, 8, 32, 8, 8, _24, SwizzleBlock24);
-DEFINE_TRANSFERLOCAL(16, u16, 4, 16, 16, 8, _, SwizzleBlock16);
-DEFINE_TRANSFERLOCAL(16S, u16, 4, 16, 16, 8, _, SwizzleBlock16);
-DEFINE_TRANSFERLOCAL(16Z, u16, 4, 16, 16, 8, _, SwizzleBlock16);
-DEFINE_TRANSFERLOCAL(16SZ, u16, 4, 16, 16, 8, _, SwizzleBlock16);
-DEFINE_TRANSFERLOCAL(8, u8, 4, 8, 16, 16, _, SwizzleBlock8);
-DEFINE_TRANSFERLOCAL(4, u8, 8, 4, 32, 16, _4, SwizzleBlock4);
-DEFINE_TRANSFERLOCAL(8H, u8, 4, 32, 8, 8, _, SwizzleBlock8H);
-DEFINE_TRANSFERLOCAL(4HL, u8, 8, 32, 8, 8, _4, SwizzleBlock4HL);
-DEFINE_TRANSFERLOCAL(4HH, u8, 8, 32, 8, 8, _4, SwizzleBlock4HH);
+DEFINE_TRANSFERLOCAL(32, u32, 2, 32, 8, 8, _, SwizzleBlock32);
+DEFINE_TRANSFERLOCAL(32Z, u32, 2, 32, 8, 8, _, SwizzleBlock32);
+DEFINE_TRANSFERLOCAL(24, u8, 8, 32, 8, 8, _24, SwizzleBlock24);
+DEFINE_TRANSFERLOCAL(24Z, u8, 8, 32, 8, 8, _24, SwizzleBlock24);
+DEFINE_TRANSFERLOCAL(16, u16, 4, 16, 16, 8, _, SwizzleBlock16);
+DEFINE_TRANSFERLOCAL(16S, u16, 4, 16, 16, 8, _, SwizzleBlock16);
+DEFINE_TRANSFERLOCAL(16Z, u16, 4, 16, 16, 8, _, SwizzleBlock16);
+DEFINE_TRANSFERLOCAL(16SZ, u16, 4, 16, 16, 8, _, SwizzleBlock16);
+DEFINE_TRANSFERLOCAL(8, u8, 4, 8, 16, 16, _, SwizzleBlock8);
+DEFINE_TRANSFERLOCAL(4, u8, 8, 4, 32, 16, _4, SwizzleBlock4);
+DEFINE_TRANSFERLOCAL(8H, u8, 4, 32, 8, 8, _, SwizzleBlock8H);
+DEFINE_TRANSFERLOCAL(4HL, u8, 8, 32, 8, 8, _4, SwizzleBlock4HL);
+DEFINE_TRANSFERLOCAL(4HH, u8, 8, 32, 8, 8, _4, SwizzleBlock4HH);
#endif