Fixed bugs in BTS instruction emitters, and working on fixing bugs in BTS memory protection.

git-svn-id: http://pcsx2.googlecode.com/svn/trunk@1089 96395faa-99c1-11dd-bbfe-3dabce05a288
This commit is contained in:
Jake.Stine 2009-04-29 20:55:13 +00:00
parent d8617c1ee8
commit a2fd88d952
4 changed files with 40 additions and 59 deletions

View File

@ -751,6 +751,13 @@ void recClear(u32 addr, u32 size)
return;
addr = HWADDR(addr);
addr -= addr % 16; // round down.
size += 3; // round up!
size -= size % 4;
for (u32 a = addr / 16; a < addr / 16 + size / 4; a++)
vtlb_private::vtlbdata.alloc_bits[a / 8] &= ~(1 << (a & 7));
int blockidx = recBlocks.LastIndex(addr + size * 4 - 4);
if (blockidx == -1)
@ -1544,66 +1551,32 @@ StartRecomp:
}
else
{
// import the vtlbdata (alloc_bits and alloc_base and stuff):
using namespace vtlb_private;
MOV32ItoR(ECX, inpage_ptr);
MOV32ItoR(EDX, pgsz);
MOV32ItoR(EDX, pgsz / 4);
u32 mask=0;
u32 writen=0;
u32 writen_start=0;
u32 index = (psM - vtlbdata.alloc_base + inpage_ptr) / 16 / 32; // 16 bytes per bit, 32 bits per dword.
u32 mask = 0;
u32 lpc=inpage_ptr;
u32 stg=pgsz;
while(stg>0)
{
u32 bit = (lpc>>4) & 7;
if (mask==0)
u32 start = inpage_ptr & ~15;
u32 end = inpage_ptr + pgsz;
for (u32 pos = start, bit = (start / 16) % 32; pos < end; pos += 16, bit++) {
if( bit == 32 )
{
//writen=bit;
writen_start=(((u8*)PSM(lpc)-vtlbdata.alloc_base)>>4)/8;
xTEST(ptr32[&vtlbdata.alloc_bits[index]], mask);
xJNZ(dyna_block_discard);
bit = 0;
mask = 0;
index++;
}
mask |= 1 << bit;
if (bit==31)
{
vtlbdata.alloc_bits[writen_start]&=~mask;
xTEST( ptr32[&vtlbdata.alloc_bits[writen_start]], mask ); // auto-optimizes to imm8 when applicable.
xJNZ( dyna_block_discard );
//SysPrintf("%08X %d %d\n",mask,pgsz,pgsz>>4);
mask = 0;
}
//writen++;
if (stg<=16)
{
lpc += stg;
stg = 0;
}
else
{
lpc += 16;
stg -= 16;
}
}
xTEST(ptr32[&vtlbdata.alloc_bits[index]], mask);
xJNZ(dyna_block_discard);
if (mask)
if (manual_counter[inpage_ptr >> 12] <= 4)
{
vtlbdata.alloc_bits[writen_start] &= ~mask;
xTEST( ptr32[&vtlbdata.alloc_bits[writen_start]], mask ); // auto-optimizes to imm8 when applicable.
xJNZ( dyna_block_discard );
//SysPrintf("%08X %d %d\n",mask,pgsz,pgsz>>4);
mask = 0;
}
if( startpc != 0x81fc0 && manual_counter[inpage_ptr >> 12] <= 4 )
{
// Commented out until we replace it with a smarter algo that only
// recompiles blocks a limited number of times.
xADD(ptr16[&manual_page[inpage_ptr >> 12]], 1);
xJC( dyna_page_reset );
}

View File

@ -430,7 +430,7 @@ static void _vtlb_DynGen_DirectWrite( u32 bits )
u8* bits_base = vtlbdata.alloc_bits;
bits_base -= (alloc_base>>4)/8; //in bytes
xBTS( ecx, bits_base );
xBTS( ptr32[bits_base], ecx );
}
// ------------------------------------------------------------------------

View File

@ -96,15 +96,23 @@ public:
// Bit Test Instructions - Valid on 16/32 bit instructions only.
//
template< G8Type InstType >
class xImpl_Group8 : public xImpl_BitScan<0xa3 | (InstType << 2)>
class xImpl_Group8
{
static const uint RegFormOp = 0xa3 | (InstType << 3);
public:
using xImpl_BitScan<0xa3 | (InstType << 2)>::operator();
__forceinline void operator()( const xRegister32& bitbase, const xRegister32& bitoffset ) const { xOpWrite0F( RegFormOp, bitbase, bitoffset ); }
__forceinline void operator()( const xRegister16& bitbase, const xRegister16& bitoffset ) const { xOpWrite0F( 0x66, RegFormOp, bitbase, bitoffset ); }
__forceinline void operator()( const ModSibBase& bitbase, const xRegister32& bitoffset ) const { xOpWrite0F( RegFormOp, bitoffset, bitbase ); }
__forceinline void operator()( const ModSibBase& bitbase, const xRegister16& bitoffset ) const { xOpWrite0F( 0x66, RegFormOp, bitoffset, bitbase ); }
__forceinline void operator()( const void* bitbase, const xRegister32& bitoffset ) const { xOpWrite0F( 0xab, bitoffset, bitbase ); }
__forceinline void operator()( const void* bitbase, const xRegister16& bitoffset ) const { xOpWrite0F( 0x66, RegFormOp, bitoffset, bitbase ); }
__forceinline void operator()( const ModSibStrict<u32>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const ModSibStrict<u16>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0x66, 0xba, InstType, bitbase, bitoffset ); }
void operator()( const xRegister<u32>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
void operator()( const xRegister<u16>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0x66, 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const ModSibStrict<u32>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const ModSibStrict<u16>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0x66, 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const u32* bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const u16* bitbase, u8 bitoffset ) const { xOpWrite0F( 0x66, 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const xRegister<u32>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0xba, InstType, bitbase, bitoffset ); }
__forceinline void operator()( const xRegister<u16>& bitbase, u8 bitoffset ) const { xOpWrite0F( 0x66, 0xba, InstType, bitbase, bitoffset ); }
xImpl_Group8() {}
};

View File

@ -34,12 +34,12 @@ namespace x86Emitter
{
void xStoreReg( const xRegisterSSE& src )
{
xMOVDQA( &g_globalXMMData[src.Id], src );
xMOVDQA( &g_globalXMMData[src.Id*2], src );
}
void xRestoreReg( const xRegisterSSE& dest )
{
xMOVDQA( dest, &g_globalXMMData[dest.Id] );
xMOVDQA( dest, &g_globalXMMData[dest.Id*2] );
}
}