Merge pull request #6631 from lioncash/const
x64Emitter: Make the Align* functions return a non-const data pointer
This commit is contained in:
commit
0e4bc6e915
|
@ -146,7 +146,7 @@ void XEmitter::ReserveCodeSpace(int bytes)
|
|||
*code++ = 0xCC;
|
||||
}
|
||||
|
||||
const u8* XEmitter::AlignCodeTo(size_t alignment)
|
||||
u8* XEmitter::AlignCodeTo(size_t alignment)
|
||||
{
|
||||
ASSERT_MSG(DYNA_REC, alignment != 0 && (alignment & (alignment - 1)) == 0,
|
||||
"Alignment must be power of two");
|
||||
|
@ -156,17 +156,17 @@ const u8* XEmitter::AlignCodeTo(size_t alignment)
|
|||
return code;
|
||||
}
|
||||
|
||||
const u8* XEmitter::AlignCode4()
|
||||
u8* XEmitter::AlignCode4()
|
||||
{
|
||||
return AlignCodeTo(4);
|
||||
}
|
||||
|
||||
const u8* XEmitter::AlignCode16()
|
||||
u8* XEmitter::AlignCode16()
|
||||
{
|
||||
return AlignCodeTo(16);
|
||||
}
|
||||
|
||||
const u8* XEmitter::AlignCodePage()
|
||||
u8* XEmitter::AlignCodePage()
|
||||
{
|
||||
return AlignCodeTo(4096);
|
||||
}
|
||||
|
|
|
@ -381,10 +381,10 @@ public:
|
|||
virtual ~XEmitter() = default;
|
||||
void SetCodePtr(u8* ptr);
|
||||
void ReserveCodeSpace(int bytes);
|
||||
const u8* AlignCodeTo(size_t alignment);
|
||||
const u8* AlignCode4();
|
||||
const u8* AlignCode16();
|
||||
const u8* AlignCodePage();
|
||||
u8* AlignCodeTo(size_t alignment);
|
||||
u8* AlignCode4();
|
||||
u8* AlignCode16();
|
||||
u8* AlignCodePage();
|
||||
const u8* GetCodePtr() const;
|
||||
u8* GetWritableCodePtr();
|
||||
|
||||
|
|
|
@ -231,7 +231,7 @@ constexpr std::array<u8, 8> sizes{{32, 0, 0, 0, 8, 16, 8, 16}};
|
|||
void CommonAsmRoutines::GenQuantizedStores()
|
||||
{
|
||||
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX).
|
||||
pairedStoreQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCodeTo(256)));
|
||||
pairedStoreQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
|
||||
ReserveCodeSpace(8 * sizeof(u8*));
|
||||
|
||||
for (int type = 0; type < 8; type++)
|
||||
|
@ -242,7 +242,7 @@ void CommonAsmRoutines::GenQuantizedStores()
|
|||
void CommonAsmRoutines::GenQuantizedSingleStores()
|
||||
{
|
||||
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX).
|
||||
singleStoreQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCodeTo(256)));
|
||||
singleStoreQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
|
||||
ReserveCodeSpace(8 * sizeof(u8*));
|
||||
|
||||
for (int type = 0; type < 8; type++)
|
||||
|
@ -263,7 +263,7 @@ const u8* CommonAsmRoutines::GenQuantizedStoreRuntime(bool single, EQuantizeType
|
|||
void CommonAsmRoutines::GenQuantizedLoads()
|
||||
{
|
||||
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_lXX).
|
||||
pairedLoadQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCodeTo(256)));
|
||||
pairedLoadQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
|
||||
ReserveCodeSpace(8 * sizeof(u8*));
|
||||
|
||||
for (int type = 0; type < 8; type++)
|
||||
|
@ -273,7 +273,7 @@ void CommonAsmRoutines::GenQuantizedLoads()
|
|||
void CommonAsmRoutines::GenQuantizedSingleLoads()
|
||||
{
|
||||
// Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_lXX).
|
||||
singleLoadQuantized = reinterpret_cast<const u8**>(const_cast<u8*>(AlignCodeTo(256)));
|
||||
singleLoadQuantized = reinterpret_cast<const u8**>(AlignCodeTo(256));
|
||||
ReserveCodeSpace(8 * sizeof(u8*));
|
||||
|
||||
for (int type = 0; type < 8; type++)
|
||||
|
|
Loading…
Reference in New Issue