Arm64Emitter: Check end of allocated space when emitting code

JitArm64 port of 5b52b3e.
This commit is contained in:
JosJuice 2021-08-22 14:21:54 +02:00
parent 867cd99de1
commit 44beaeaff5
7 changed files with 98 additions and 53 deletions

View File

@ -71,14 +71,16 @@ std::optional<u8> FPImm8FromFloat(float value)
}
} // Anonymous namespace
void ARM64XEmitter::SetCodePtrUnsafe(u8* ptr)
void ARM64XEmitter::SetCodePtrUnsafe(u8* ptr, u8* end, bool write_failed)
{
m_code = ptr;
m_code_end = end;
m_write_failed = write_failed;
}
void ARM64XEmitter::SetCodePtr(u8* ptr, u8* end, bool write_failed)
{
SetCodePtrUnsafe(ptr);
SetCodePtrUnsafe(ptr, end, write_failed);
m_lastCacheFlushEnd = ptr;
}
@ -92,6 +94,16 @@ u8* ARM64XEmitter::GetWritableCodePtr()
return m_code;
}
const u8* ARM64XEmitter::GetCodeEnd() const
{
return m_code_end;
}
u8* ARM64XEmitter::GetWritableCodeEnd()
{
return m_code_end;
}
void ARM64XEmitter::ReserveCodeSpace(u32 bytes)
{
for (u32 i = 0; i < bytes / 4; i++)
@ -116,6 +128,13 @@ u8* ARM64XEmitter::AlignCodePage()
void ARM64XEmitter::Write32(u32 value)
{
if (m_code + sizeof(u32) > m_code_end)
{
m_code = m_code_end;
m_write_failed = true;
return;
}
std::memcpy(m_code, &value, sizeof(u32));
m_code += sizeof(u32);
}
@ -659,6 +678,9 @@ static constexpr u32 MaskImm26(s64 distance)
// FixupBranch branching
void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch)
{
if (!branch.ptr)
return;
bool Not = false;
u32 inst = 0;
s64 distance = (s64)(m_code - branch.ptr);
@ -709,67 +731,68 @@ void ARM64XEmitter::SetJumpTarget(FixupBranch const& branch)
std::memcpy(branch.ptr, &inst, sizeof(inst));
}
FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt)
FixupBranch ARM64XEmitter::WriteFixupBranch()
{
FixupBranch branch{};
branch.ptr = m_code;
BRK(0);
// If we couldn't write the full jump instruction, indicate that in the returned FixupBranch by
// setting the branch's address to null. This will prevent a later SetJumpTarget() from writing to
// invalid memory.
if (HasWriteFailed())
branch.ptr = nullptr;
return branch;
}
FixupBranch ARM64XEmitter::CBZ(ARM64Reg Rt)
{
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::CBZ;
branch.reg = Rt;
NOP();
return branch;
}
FixupBranch ARM64XEmitter::CBNZ(ARM64Reg Rt)
{
FixupBranch branch{};
branch.ptr = m_code;
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::CBNZ;
branch.reg = Rt;
NOP();
return branch;
}
FixupBranch ARM64XEmitter::B(CCFlags cond)
{
FixupBranch branch{};
branch.ptr = m_code;
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::BConditional;
branch.cond = cond;
NOP();
return branch;
}
FixupBranch ARM64XEmitter::TBZ(ARM64Reg Rt, u8 bit)
{
FixupBranch branch{};
branch.ptr = m_code;
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::TBZ;
branch.reg = Rt;
branch.bit = bit;
NOP();
return branch;
}
FixupBranch ARM64XEmitter::TBNZ(ARM64Reg Rt, u8 bit)
{
FixupBranch branch{};
branch.ptr = m_code;
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::TBNZ;
branch.reg = Rt;
branch.bit = bit;
NOP();
return branch;
}
FixupBranch ARM64XEmitter::B()
{
FixupBranch branch{};
branch.ptr = m_code;
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::B;
NOP();
return branch;
}
FixupBranch ARM64XEmitter::BL()
{
FixupBranch branch{};
branch.ptr = m_code;
FixupBranch branch = WriteFixupBranch();
branch.type = FixupBranch::Type::BL;
NOP();
return branch;
}
@ -1945,12 +1968,12 @@ bool ARM64XEmitter::MOVI2R2(ARM64Reg Rd, u64 imm1, u64 imm2)
MOVI2R(Rd, imm1);
int size1 = GetCodePtr() - start_pointer;
SetCodePtrUnsafe(start_pointer);
m_code = start_pointer;
MOVI2R(Rd, imm2);
int size2 = GetCodePtr() - start_pointer;
SetCodePtrUnsafe(start_pointer);
m_code = start_pointer;
bool element = size1 > size2;

View File

@ -725,8 +725,18 @@ class ARM64XEmitter
friend class ARM64FloatEmitter;
private:
u8* m_code;
u8* m_lastCacheFlushEnd;
// Pointer to memory where code will be emitted to.
u8* m_code = nullptr;
// Pointer past the end of the memory region we're allowed to emit to.
// Writes that would reach this memory are refused and will set the m_write_failed flag instead.
u8* m_code_end = nullptr;
u8* m_lastCacheFlushEnd = nullptr;
// Set to true when a write request happens that would write past m_code_end.
// Must be cleared with SetCodePtr() afterwards.
bool m_write_failed = false;
void AddImmediate(ARM64Reg Rd, ARM64Reg Rn, u64 imm, bool shift, bool negative, bool flags);
void EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr);
@ -760,6 +770,8 @@ private:
void EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm);
void EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
FixupBranch WriteFixupBranch();
template <typename T>
void MOVI2RImpl(ARM64Reg Rd, T imm);
@ -767,27 +779,30 @@ protected:
void Write32(u32 value);
public:
ARM64XEmitter() : m_code(nullptr), m_lastCacheFlushEnd(nullptr) {}
ARM64XEmitter(u8* code_ptr)
ARM64XEmitter() = default;
ARM64XEmitter(u8* code, u8* code_end)
: m_code(code), m_code_end(code_end), m_lastCacheFlushEnd(code)
{
m_code = code_ptr;
m_lastCacheFlushEnd = code_ptr;
}
virtual ~ARM64XEmitter() {}
// 'end' and 'write_failed' are unused in the ARM code emitter at the moment.
// They're just here for interface compatibility with the x64 code emitter.
void SetCodePtr(u8* ptr, u8* end, bool write_failed = false);
void SetCodePtrUnsafe(u8* ptr);
void SetCodePtrUnsafe(u8* ptr, u8* end, bool write_failed = false);
const u8* GetCodePtr() const;
u8* GetWritableCodePtr();
const u8* GetCodeEnd() const;
u8* GetWritableCodeEnd();
void ReserveCodeSpace(u32 bytes);
u8* AlignCode16();
u8* AlignCodePage();
const u8* GetCodePtr() const;
void FlushIcache();
void FlushIcacheSection(u8* start, u8* end);
u8* GetWritableCodePtr();
// Should be checked after a block of code has been generated to see if the code has been
// successfully written to memory. Do not call the generated code when this returns true!
bool HasWriteFailed() const { return m_write_failed; }
// FixupBranch branching
void SetJumpTarget(FixupBranch const& branch);

View File

@ -45,7 +45,7 @@ void JitArm64::Init()
{
const size_t child_code_size = SConfig::GetInstance().bMMU ? FARCODE_SIZE_MMU : FARCODE_SIZE;
AllocCodeSpace(CODE_SIZE + child_code_size);
AddChildCodeSpace(&farcode, child_code_size);
AddChildCodeSpace(&m_far_code, child_code_size);
jo.fastmem_arena = SConfig::GetInstance().bFastmem && Memory::InitFastmemArena();
jo.enableBlocklink = true;
@ -127,7 +127,7 @@ void JitArm64::ClearCache()
blocks.Clear();
const Common::ScopedJITPageWriteAndNoExecute enable_jit_page_writes;
ClearCodeSpace();
farcode.ClearCodeSpace();
m_far_code.ClearCodeSpace();
UpdateMemoryAndExceptionOptions();
GenerateAsm();
@ -588,7 +588,7 @@ void JitArm64::Jit(u32)
#endif
}
if (IsAlmostFull() || farcode.IsAlmostFull() || SConfig::GetInstance().bJITNoBlockCache)
if (IsAlmostFull() || m_far_code.IsAlmostFull() || SConfig::GetInstance().bJITNoBlockCache)
{
ClearCache();
}
@ -874,5 +874,5 @@ void JitArm64::DoJit(u32 em_address, JitBlock* b, u32 nextPC)
b->originalSize = code_block.m_num_instructions;
FlushIcache();
farcode.FlushIcache();
m_far_code.FlushIcache();
}

View File

@ -191,20 +191,23 @@ protected:
// Simple functions to switch between near and far code emitting
void SwitchToFarCode()
{
nearcode = GetWritableCodePtr();
SetCodePtrUnsafe(farcode.GetWritableCodePtr());
m_near_code = GetWritableCodePtr();
m_near_code_end = GetWritableCodeEnd();
m_near_code_write_failed = HasWriteFailed();
SetCodePtrUnsafe(m_far_code.GetWritableCodePtr(), m_far_code.GetWritableCodeEnd(),
m_far_code.HasWriteFailed());
AlignCode16();
m_in_farcode = true;
m_in_far_code = true;
}
void SwitchToNearCode()
{
farcode.SetCodePtrUnsafe(GetWritableCodePtr());
SetCodePtrUnsafe(nearcode);
m_in_farcode = false;
m_far_code.SetCodePtrUnsafe(GetWritableCodePtr(), GetWritableCodeEnd(), HasWriteFailed());
SetCodePtrUnsafe(m_near_code, m_near_code_end, m_near_code_write_failed);
m_in_far_code = false;
}
bool IsInFarCode() const { return m_in_farcode; }
bool IsInFarCode() const { return m_in_far_code; }
// Dump a memory range of code
void DumpCode(const u8* start, const u8* end);
@ -288,9 +291,13 @@ protected:
Arm64Gen::ARM64FloatEmitter m_float_emit;
Arm64Gen::ARM64CodeBlock farcode;
u8* nearcode; // Backed up when we switch to far code.
bool m_in_farcode = false;
Arm64Gen::ARM64CodeBlock m_far_code;
bool m_in_far_code = false;
// Backed up when we switch to far code.
u8* m_near_code;
u8* m_near_code_end;
bool m_near_code_write_failed;
bool m_enable_blr_optimization;
bool m_cleanup_after_stackfault = false;

View File

@ -60,7 +60,7 @@ void JitArm64BlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const
{
const Common::ScopedJITPageWriteAndNoExecute enable_jit_page_writes;
u8* location = source.exitPtrs;
ARM64XEmitter emit(location);
ARM64XEmitter emit(location, location + 12);
WriteLinkBlock(emit, source, dest);
emit.FlushIcache();
@ -69,7 +69,7 @@ void JitArm64BlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const
void JitArm64BlockCache::WriteDestroyBlock(const JitBlock& block)
{
// Only clear the entry points as we might still be within this block.
ARM64XEmitter emit(block.checkedEntry);
ARM64XEmitter emit(block.checkedEntry, block.normalEntry + 4);
const Common::ScopedJITPageWriteAndNoExecute enable_jit_page_writes;
while (emit.GetWritableCodePtr() <= block.normalEntry)
emit.BRK(0x123);

View File

@ -269,7 +269,7 @@ bool JitArm64::HandleFastmemFault(uintptr_t access_address, SContext* ctx)
return false;
const Common::ScopedJITPageWriteAndNoExecute enable_jit_page_writes;
ARM64XEmitter emitter(const_cast<u8*>(fastmem_area_start));
ARM64XEmitter emitter(const_cast<u8*>(fastmem_area_start), const_cast<u8*>(fastmem_area_end));
emitter.BL(slow_handler_iter->second.slowmem_code);

View File

@ -35,7 +35,7 @@ public:
const Common::ScopedJITPageWriteAndNoExecute enable_jit_page_writes;
AllocCodeSpace(4096);
AddChildCodeSpace(&farcode, 2048);
AddChildCodeSpace(&m_far_code, 2048);
gpr.Init(this);
fpr.Init(this);