From e28d06353947f65fa8218871d5a488f583ec69ab Mon Sep 17 00:00:00 2001 From: Lioncash Date: Wed, 11 Apr 2018 17:51:38 -0400 Subject: [PATCH] x64Emitter: Make the Align* functions return a non-const data pointer There's no real requirement to make this const, and this should also be decided by the calling code, considering we had places that would simply cast away the const and carry on. --- Source/Core/Common/x64Emitter.cpp | 8 ++++---- Source/Core/Common/x64Emitter.h | 8 ++++---- Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp | 8 ++++---- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Source/Core/Common/x64Emitter.cpp b/Source/Core/Common/x64Emitter.cpp index 70111ab710..6f7cb68342 100644 --- a/Source/Core/Common/x64Emitter.cpp +++ b/Source/Core/Common/x64Emitter.cpp @@ -146,7 +146,7 @@ void XEmitter::ReserveCodeSpace(int bytes) *code++ = 0xCC; } -const u8* XEmitter::AlignCodeTo(size_t alignment) +u8* XEmitter::AlignCodeTo(size_t alignment) { ASSERT_MSG(DYNA_REC, alignment != 0 && (alignment & (alignment - 1)) == 0, "Alignment must be power of two"); @@ -156,17 +156,17 @@ const u8* XEmitter::AlignCodeTo(size_t alignment) return code; } -const u8* XEmitter::AlignCode4() +u8* XEmitter::AlignCode4() { return AlignCodeTo(4); } -const u8* XEmitter::AlignCode16() +u8* XEmitter::AlignCode16() { return AlignCodeTo(16); } -const u8* XEmitter::AlignCodePage() +u8* XEmitter::AlignCodePage() { return AlignCodeTo(4096); } diff --git a/Source/Core/Common/x64Emitter.h b/Source/Core/Common/x64Emitter.h index 3357648716..62811094b5 100644 --- a/Source/Core/Common/x64Emitter.h +++ b/Source/Core/Common/x64Emitter.h @@ -381,10 +381,10 @@ public: virtual ~XEmitter() = default; void SetCodePtr(u8* ptr); void ReserveCodeSpace(int bytes); - const u8* AlignCodeTo(size_t alignment); - const u8* AlignCode4(); - const u8* AlignCode16(); - const u8* AlignCodePage(); + u8* AlignCodeTo(size_t alignment); + u8* AlignCode4(); + u8* AlignCode16(); + u8* AlignCodePage(); const u8* GetCodePtr() const; u8* GetWritableCodePtr(); diff --git a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp index afdd2b1a83..86f29611e7 100644 --- a/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp +++ b/Source/Core/Core/PowerPC/Jit64Common/Jit64AsmCommon.cpp @@ -231,7 +231,7 @@ constexpr std::array sizes{{32, 0, 0, 0, 8, 16, 8, 16}}; void CommonAsmRoutines::GenQuantizedStores() { // Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX). - pairedStoreQuantized = reinterpret_cast(const_cast(AlignCodeTo(256))); + pairedStoreQuantized = reinterpret_cast(AlignCodeTo(256)); ReserveCodeSpace(8 * sizeof(u8*)); for (int type = 0; type < 8; type++) @@ -242,7 +242,7 @@ void CommonAsmRoutines::GenQuantizedStores() void CommonAsmRoutines::GenQuantizedSingleStores() { // Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_stXX). - singleStoreQuantized = reinterpret_cast(const_cast(AlignCodeTo(256))); + singleStoreQuantized = reinterpret_cast(AlignCodeTo(256)); ReserveCodeSpace(8 * sizeof(u8*)); for (int type = 0; type < 8; type++) @@ -263,7 +263,7 @@ const u8* CommonAsmRoutines::GenQuantizedStoreRuntime(bool single, EQuantizeType void CommonAsmRoutines::GenQuantizedLoads() { // Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_lXX). - pairedLoadQuantized = reinterpret_cast(const_cast(AlignCodeTo(256))); + pairedLoadQuantized = reinterpret_cast(AlignCodeTo(256)); ReserveCodeSpace(8 * sizeof(u8*)); for (int type = 0; type < 8; type++) @@ -273,7 +273,7 @@ void CommonAsmRoutines::GenQuantizedLoads() void CommonAsmRoutines::GenQuantizedSingleLoads() { // Aligned to 256 bytes as least significant byte needs to be zero (See: Jit64::psq_lXX). - singleLoadQuantized = reinterpret_cast(const_cast(AlignCodeTo(256))); + singleLoadQuantized = reinterpret_cast(AlignCodeTo(256)); ReserveCodeSpace(8 * sizeof(u8*)); for (int type = 0; type < 8; type++)