Format all code with new clang-format

This commit is contained in:
DrChat 2017-12-14 20:35:44 -06:00
parent f7e91ab475
commit aaf281351d
66 changed files with 915 additions and 718 deletions

View File

@ -28,11 +28,11 @@
namespace xe {
namespace app {
using xe::ui::FileDropEvent;
using xe::ui::KeyEvent;
using xe::ui::MenuItem;
using xe::ui::MouseEvent;
using xe::ui::UIEvent;
using xe::ui::FileDropEvent;
const std::wstring kBaseTitle = L"xenia";

View File

@ -149,21 +149,23 @@ int xenia_main(const std::vector<std::wstring>& args) {
// This will respond to debugging requests so we can open the debug UI.
std::unique_ptr<xe::debug::ui::DebugWindow> debug_window;
if (FLAGS_debug) {
emulator->processor()->set_debug_listener_request_handler([&](
xe::cpu::Processor* processor) {
if (debug_window) {
return debug_window.get();
}
emulator_window->loop()->PostSynchronous([&]() {
debug_window = xe::debug::ui::DebugWindow::Create(
emulator.get(), emulator_window->loop());
debug_window->window()->on_closed.AddListener([&](xe::ui::UIEvent* e) {
emulator->processor()->set_debug_listener(nullptr);
emulator_window->loop()->Post([&]() { debug_window.reset(); });
emulator->processor()->set_debug_listener_request_handler(
[&](xe::cpu::Processor* processor) {
if (debug_window) {
return debug_window.get();
}
emulator_window->loop()->PostSynchronous([&]() {
debug_window = xe::debug::ui::DebugWindow::Create(
emulator.get(), emulator_window->loop());
debug_window->window()->on_closed.AddListener(
[&](xe::ui::UIEvent* e) {
emulator->processor()->set_debug_listener(nullptr);
emulator_window->loop()->Post(
[&]() { debug_window.reset(); });
});
});
return debug_window.get();
});
});
return debug_window.get();
});
}
auto evt = xe::threading::Event::CreateAutoResetEvent(false);

View File

@ -53,9 +53,14 @@ XAudio2AudioDriver::XAudio2AudioDriver(Memory* memory,
XAudio2AudioDriver::~XAudio2AudioDriver() = default;
const DWORD ChannelMasks[] = {
0, 0, SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY, 0,
0, 0, SPEAKER_FRONT_LEFT | SPEAKER_FRONT_CENTER | SPEAKER_FRONT_RIGHT |
SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
0,
0,
SPEAKER_FRONT_LEFT | SPEAKER_FRONT_RIGHT | SPEAKER_LOW_FREQUENCY,
0,
0,
0,
SPEAKER_FRONT_LEFT | SPEAKER_FRONT_CENTER | SPEAKER_FRONT_RIGHT |
SPEAKER_LOW_FREQUENCY | SPEAKER_BACK_LEFT | SPEAKER_BACK_RIGHT,
0,
};

View File

@ -259,29 +259,26 @@ class Win32SocketServer : public SocketServer {
return false;
}
accept_thread_ = xe::threading::Thread::Create(
{},
[this, port]() {
xe::threading::set_name(std::string("xe::SocketServer localhost:") +
std::to_string(port));
while (socket_ != INVALID_SOCKET) {
sockaddr_in6 client_addr;
int client_count = sizeof(client_addr);
SOCKET client_socket =
accept(socket_, reinterpret_cast<sockaddr*>(&client_addr),
&client_count);
if (client_socket == INVALID_SOCKET) {
continue;
}
accept_thread_ = xe::threading::Thread::Create({}, [this, port]() {
xe::threading::set_name(std::string("xe::SocketServer localhost:") +
std::to_string(port));
while (socket_ != INVALID_SOCKET) {
sockaddr_in6 client_addr;
int client_count = sizeof(client_addr);
SOCKET client_socket = accept(
socket_, reinterpret_cast<sockaddr*>(&client_addr), &client_count);
if (client_socket == INVALID_SOCKET) {
continue;
}
auto client = std::make_unique<Win32Socket>();
if (!client->Accept(client_socket)) {
XELOGE("Unable to accept socket; ignoring");
continue;
}
accept_callback_(std::move(client));
}
});
auto client = std::make_unique<Win32Socket>();
if (!client->Accept(client_socket)) {
XELOGE("Unable to accept socket; ignoring");
continue;
}
accept_callback_(std::move(client));
}
});
return true;
}

View File

@ -202,11 +202,11 @@ typedef struct _UNWIND_INFO {
uint8_t FrameOffset : 4;
UNWIND_CODE UnwindCode[1];
/* UNWIND_CODE MoreUnwindCode[((CountOfCodes + 1) & ~1) - 1];
* union {
* OPTIONAL ULONG ExceptionHandler;
* OPTIONAL ULONG FunctionEntry;
* };
* OPTIONAL ULONG ExceptionData[]; */
* union {
* OPTIONAL ULONG ExceptionHandler;
* OPTIONAL ULONG FunctionEntry;
* };
* OPTIONAL ULONG ExceptionData[]; */
} UNWIND_INFO, *PUNWIND_INFO;
void Win32X64CodeCache::InitializeUnwindEntry(uint8_t* unwind_entry_address,

View File

@ -600,78 +600,78 @@ static const vec128_t xmm_consts[] = {
/* XMMZero */ vec128f(0.0f),
/* XMMOne */ vec128f(1.0f),
/* XMMNegativeOne */ vec128f(-1.0f, -1.0f, -1.0f, -1.0f),
/* XMMFFFF */ vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu,
0xFFFFFFFFu),
/* XMMMaskX16Y16 */ vec128i(0x0000FFFFu, 0xFFFF0000u, 0x00000000u,
0x00000000u),
/* XMMFlipX16Y16 */ vec128i(0x00008000u, 0x00000000u, 0x00000000u,
0x00000000u),
/* XMMFFFF */
vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu),
/* XMMMaskX16Y16 */
vec128i(0x0000FFFFu, 0xFFFF0000u, 0x00000000u, 0x00000000u),
/* XMMFlipX16Y16 */
vec128i(0x00008000u, 0x00000000u, 0x00000000u, 0x00000000u),
/* XMMFixX16Y16 */ vec128f(-32768.0f, 0.0f, 0.0f, 0.0f),
/* XMMNormalizeX16Y16 */ vec128f(
1.0f / 32767.0f, 1.0f / (32767.0f * 65536.0f), 0.0f, 0.0f),
/* XMMNormalizeX16Y16 */
vec128f(1.0f / 32767.0f, 1.0f / (32767.0f * 65536.0f), 0.0f, 0.0f),
/* XMM0001 */ vec128f(0.0f, 0.0f, 0.0f, 1.0f),
/* XMM3301 */ vec128f(3.0f, 3.0f, 0.0f, 1.0f),
/* XMM3333 */ vec128f(3.0f, 3.0f, 3.0f, 3.0f),
/* XMMSignMaskPS */ vec128i(0x80000000u, 0x80000000u, 0x80000000u,
0x80000000u),
/* XMMSignMaskPD */ vec128i(0x00000000u, 0x80000000u, 0x00000000u,
0x80000000u),
/* XMMAbsMaskPS */ vec128i(0x7FFFFFFFu, 0x7FFFFFFFu, 0x7FFFFFFFu,
0x7FFFFFFFu),
/* XMMAbsMaskPD */ vec128i(0xFFFFFFFFu, 0x7FFFFFFFu, 0xFFFFFFFFu,
0x7FFFFFFFu),
/* XMMByteSwapMask */ vec128i(0x00010203u, 0x04050607u, 0x08090A0Bu,
0x0C0D0E0Fu),
/* XMMByteOrderMask */ vec128i(0x01000302u, 0x05040706u, 0x09080B0Au,
0x0D0C0F0Eu),
/* XMMSignMaskPS */
vec128i(0x80000000u, 0x80000000u, 0x80000000u, 0x80000000u),
/* XMMSignMaskPD */
vec128i(0x00000000u, 0x80000000u, 0x00000000u, 0x80000000u),
/* XMMAbsMaskPS */
vec128i(0x7FFFFFFFu, 0x7FFFFFFFu, 0x7FFFFFFFu, 0x7FFFFFFFu),
/* XMMAbsMaskPD */
vec128i(0xFFFFFFFFu, 0x7FFFFFFFu, 0xFFFFFFFFu, 0x7FFFFFFFu),
/* XMMByteSwapMask */
vec128i(0x00010203u, 0x04050607u, 0x08090A0Bu, 0x0C0D0E0Fu),
/* XMMByteOrderMask */
vec128i(0x01000302u, 0x05040706u, 0x09080B0Au, 0x0D0C0F0Eu),
/* XMMPermuteControl15 */ vec128b(15),
/* XMMPermuteByteMask */ vec128b(0x1F),
/* XMMPackD3DCOLORSat */ vec128i(0x404000FFu),
/* XMMPackD3DCOLOR */ vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu,
0x0C000408u),
/* XMMUnpackD3DCOLOR */ vec128i(0xFFFFFF0Eu, 0xFFFFFF0Du, 0xFFFFFF0Cu,
0xFFFFFF0Fu),
/* XMMPackFLOAT16_2 */ vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu,
0x01000302u),
/* XMMUnpackFLOAT16_2 */ vec128i(0x0D0C0F0Eu, 0xFFFFFFFFu, 0xFFFFFFFFu,
0xFFFFFFFFu),
/* XMMPackFLOAT16_4 */ vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0x05040706u,
0x01000302u),
/* XMMUnpackFLOAT16_4 */ vec128i(0x09080B0Au, 0x0D0C0F0Eu, 0xFFFFFFFFu,
0xFFFFFFFFu),
/* XMMPackD3DCOLOR */
vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0x0C000408u),
/* XMMUnpackD3DCOLOR */
vec128i(0xFFFFFF0Eu, 0xFFFFFF0Du, 0xFFFFFF0Cu, 0xFFFFFF0Fu),
/* XMMPackFLOAT16_2 */
vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0x01000302u),
/* XMMUnpackFLOAT16_2 */
vec128i(0x0D0C0F0Eu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu),
/* XMMPackFLOAT16_4 */
vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0x05040706u, 0x01000302u),
/* XMMUnpackFLOAT16_4 */
vec128i(0x09080B0Au, 0x0D0C0F0Eu, 0xFFFFFFFFu, 0xFFFFFFFFu),
/* XMMPackSHORT_Min */ vec128i(0x403F8001u),
/* XMMPackSHORT_Max */ vec128i(0x40407FFFu),
/* XMMPackSHORT_2 */ vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu,
0x01000504u),
/* XMMPackSHORT_4 */ vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0x01000504u,
0x09080D0Cu),
/* XMMUnpackSHORT_2 */ vec128i(0xFFFF0F0Eu, 0xFFFF0D0Cu, 0xFFFFFFFFu,
0xFFFFFFFFu),
/* XMMUnpackSHORT_4 */ vec128i(0xFFFF0B0Au, 0xFFFF0908u, 0xFFFF0F0Eu,
0xFFFF0D0Cu),
/* XMMPackSHORT_2 */
vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0xFFFFFFFFu, 0x01000504u),
/* XMMPackSHORT_4 */
vec128i(0xFFFFFFFFu, 0xFFFFFFFFu, 0x01000504u, 0x09080D0Cu),
/* XMMUnpackSHORT_2 */
vec128i(0xFFFF0F0Eu, 0xFFFF0D0Cu, 0xFFFFFFFFu, 0xFFFFFFFFu),
/* XMMUnpackSHORT_4 */
vec128i(0xFFFF0B0Au, 0xFFFF0908u, 0xFFFF0F0Eu, 0xFFFF0D0Cu),
/* XMMOneOver255 */ vec128f(1.0f / 255.0f),
/* XMMMaskEvenPI16 */ vec128i(0x0000FFFFu, 0x0000FFFFu, 0x0000FFFFu,
0x0000FFFFu),
/* XMMShiftMaskEvenPI16 */ vec128i(0x0000000Fu, 0x0000000Fu, 0x0000000Fu,
0x0000000Fu),
/* XMMShiftMaskPS */ vec128i(0x0000001Fu, 0x0000001Fu, 0x0000001Fu,
0x0000001Fu),
/* XMMShiftByteMask */ vec128i(0x000000FFu, 0x000000FFu, 0x000000FFu,
0x000000FFu),
/* XMMSwapWordMask */ vec128i(0x03030303u, 0x03030303u, 0x03030303u,
0x03030303u),
/* XMMUnsignedDwordMax */ vec128i(0xFFFFFFFFu, 0x00000000u, 0xFFFFFFFFu,
0x00000000u),
/* XMMMaskEvenPI16 */
vec128i(0x0000FFFFu, 0x0000FFFFu, 0x0000FFFFu, 0x0000FFFFu),
/* XMMShiftMaskEvenPI16 */
vec128i(0x0000000Fu, 0x0000000Fu, 0x0000000Fu, 0x0000000Fu),
/* XMMShiftMaskPS */
vec128i(0x0000001Fu, 0x0000001Fu, 0x0000001Fu, 0x0000001Fu),
/* XMMShiftByteMask */
vec128i(0x000000FFu, 0x000000FFu, 0x000000FFu, 0x000000FFu),
/* XMMSwapWordMask */
vec128i(0x03030303u, 0x03030303u, 0x03030303u, 0x03030303u),
/* XMMUnsignedDwordMax */
vec128i(0xFFFFFFFFu, 0x00000000u, 0xFFFFFFFFu, 0x00000000u),
/* XMM255 */ vec128f(255.0f),
/* XMMPI32 */ vec128i(32),
/* XMMSignMaskI8 */ vec128i(0x80808080u, 0x80808080u, 0x80808080u,
0x80808080u),
/* XMMSignMaskI16 */ vec128i(0x80008000u, 0x80008000u, 0x80008000u,
0x80008000u),
/* XMMSignMaskI32 */ vec128i(0x80000000u, 0x80000000u, 0x80000000u,
0x80000000u),
/* XMMSignMaskF32 */ vec128i(0x80000000u, 0x80000000u, 0x80000000u,
0x80000000u),
/* XMMSignMaskI8 */
vec128i(0x80808080u, 0x80808080u, 0x80808080u, 0x80808080u),
/* XMMSignMaskI16 */
vec128i(0x80008000u, 0x80008000u, 0x80008000u, 0x80008000u),
/* XMMSignMaskI32 */
vec128i(0x80000000u, 0x80000000u, 0x80000000u, 0x80000000u),
/* XMMSignMaskF32 */
vec128i(0x80000000u, 0x80000000u, 0x80000000u, 0x80000000u),
/* XMMShortMinPS */ vec128f(SHRT_MIN),
/* XMMShortMaxPS */ vec128f(SHRT_MAX),
/* XMMIntMin */ vec128i(INT_MIN),

View File

@ -2988,8 +2988,9 @@ EMITTER_OPCODE_TABLE(OPCODE_IS_NAN, IS_NAN_F32, IS_NAN_F64);
struct COMPARE_EQ_I8
: Sequence<COMPARE_EQ_I8, I<OPCODE_COMPARE_EQ, I8Op, I8Op, I8Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg8& src1,
const Reg8& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg8& src1,
const Reg8& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg8& src1,
int32_t constant) { e.cmp(src1, constant); });
e.sete(i.dest);
@ -2998,8 +2999,9 @@ struct COMPARE_EQ_I8
struct COMPARE_EQ_I16
: Sequence<COMPARE_EQ_I16, I<OPCODE_COMPARE_EQ, I8Op, I16Op, I16Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg16& src1,
const Reg16& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg16& src1,
const Reg16& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg16& src1,
int32_t constant) { e.cmp(src1, constant); });
e.sete(i.dest);
@ -3008,8 +3010,9 @@ struct COMPARE_EQ_I16
struct COMPARE_EQ_I32
: Sequence<COMPARE_EQ_I32, I<OPCODE_COMPARE_EQ, I8Op, I32Op, I32Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg32& src1,
const Reg32& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg32& src1,
const Reg32& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg32& src1,
int32_t constant) { e.cmp(src1, constant); });
e.sete(i.dest);
@ -3018,8 +3021,9 @@ struct COMPARE_EQ_I32
struct COMPARE_EQ_I64
: Sequence<COMPARE_EQ_I64, I<OPCODE_COMPARE_EQ, I8Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg64& src1,
const Reg64& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg64& src1,
const Reg64& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg64& src1,
int32_t constant) { e.cmp(src1, constant); });
e.sete(i.dest);
@ -3055,8 +3059,9 @@ EMITTER_OPCODE_TABLE(OPCODE_COMPARE_EQ, COMPARE_EQ_I8, COMPARE_EQ_I16,
struct COMPARE_NE_I8
: Sequence<COMPARE_NE_I8, I<OPCODE_COMPARE_NE, I8Op, I8Op, I8Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg8& src1,
const Reg8& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg8& src1,
const Reg8& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg8& src1,
int32_t constant) { e.cmp(src1, constant); });
e.setne(i.dest);
@ -3065,8 +3070,9 @@ struct COMPARE_NE_I8
struct COMPARE_NE_I16
: Sequence<COMPARE_NE_I16, I<OPCODE_COMPARE_NE, I8Op, I16Op, I16Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg16& src1,
const Reg16& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg16& src1,
const Reg16& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg16& src1,
int32_t constant) { e.cmp(src1, constant); });
e.setne(i.dest);
@ -3075,8 +3081,9 @@ struct COMPARE_NE_I16
struct COMPARE_NE_I32
: Sequence<COMPARE_NE_I32, I<OPCODE_COMPARE_NE, I8Op, I32Op, I32Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg32& src1,
const Reg32& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg32& src1,
const Reg32& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg32& src1,
int32_t constant) { e.cmp(src1, constant); });
e.setne(i.dest);
@ -3085,8 +3092,9 @@ struct COMPARE_NE_I32
struct COMPARE_NE_I64
: Sequence<COMPARE_NE_I64, I<OPCODE_COMPARE_NE, I8Op, I64Op, I64Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeCompareOp(e, i, [](X64Emitter& e, const Reg64& src1,
const Reg64& src2) { e.cmp(src1, src2); },
EmitCommutativeCompareOp(e, i,
[](X64Emitter& e, const Reg64& src1,
const Reg64& src2) { e.cmp(src1, src2); },
[](X64Emitter& e, const Reg64& src1,
int32_t constant) { e.cmp(src1, constant); });
e.setne(i.dest);
@ -3421,8 +3429,10 @@ EMITTER_OPCODE_TABLE(OPCODE_VECTOR_COMPARE_UGE, VECTOR_COMPARE_UGE_V128);
template <typename SEQ, typename REG, typename ARGS>
void EmitAddXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitCommutativeBinaryOp(
e, i, [](X64Emitter& e, const REG& dest_src,
const REG& src) { e.add(dest_src, src); },
e, i,
[](X64Emitter& e, const REG& dest_src, const REG& src) {
e.add(dest_src, src);
},
[](X64Emitter& e, const REG& dest_src, int32_t constant) {
e.add(dest_src, constant);
});
@ -3491,8 +3501,10 @@ void EmitAddCarryXX(X64Emitter& e, const ARGS& i) {
e.sahf();
}
SEQ::EmitCommutativeBinaryOp(
e, i, [](X64Emitter& e, const REG& dest_src,
const REG& src) { e.adc(dest_src, src); },
e, i,
[](X64Emitter& e, const REG& dest_src, const REG& src) {
e.adc(dest_src, src);
},
[](X64Emitter& e, const REG& dest_src, int32_t constant) {
e.adc(dest_src, constant);
});
@ -3530,105 +3542,110 @@ EMITTER_OPCODE_TABLE(OPCODE_ADD_CARRY, ADD_CARRY_I8, ADD_CARRY_I16,
struct VECTOR_ADD
: Sequence<VECTOR_ADD, I<OPCODE_VECTOR_ADD, V128Op, V128Op, V128Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
Xmm src1, Xmm src2) {
const TypeName part_type = static_cast<TypeName>(i.instr->flags & 0xFF);
const uint32_t arithmetic_flags = i.instr->flags >> 8;
bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED);
bool saturate = !!(arithmetic_flags & ARITHMETIC_SATURATE);
switch (part_type) {
case INT8_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpaddusb(dest, src1, src2);
} else {
e.vpaddsb(dest, src1, src2);
}
} else {
e.vpaddb(dest, src1, src2);
}
break;
case INT16_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpaddusw(dest, src1, src2);
} else {
e.vpaddsw(dest, src1, src2);
}
} else {
e.vpaddw(dest, src1, src2);
}
break;
case INT32_TYPE:
if (saturate) {
if (is_unsigned) {
// xmm0 is the only temp register that can be used by src1/src2.
e.vpaddd(e.xmm1, src1, src2);
// If result is smaller than either of the inputs, we've
// overflowed (only need to check one input)
// if (src1 > res) then overflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm2, src1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpxor(e.xmm0, e.xmm1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpcmpgtd(e.xmm0, e.xmm2, e.xmm0);
e.vpor(dest, e.xmm1, e.xmm0);
} else {
// Preserve the sources.
if (dest == src1) {
e.vmovdqa(e.xmm2, src1);
src1 = e.xmm2;
EmitCommutativeBinaryXmmOp(
e, i, [&i](X64Emitter& e, const Xmm& dest, Xmm src1, Xmm src2) {
const TypeName part_type =
static_cast<TypeName>(i.instr->flags & 0xFF);
const uint32_t arithmetic_flags = i.instr->flags >> 8;
bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED);
bool saturate = !!(arithmetic_flags & ARITHMETIC_SATURATE);
switch (part_type) {
case INT8_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpaddusb(dest, src1, src2);
} else {
e.vpaddsb(dest, src1, src2);
}
} else {
e.vpaddb(dest, src1, src2);
}
if (dest == src2) {
e.vmovdqa(e.xmm1, src2);
src2 = e.xmm1;
break;
case INT16_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpaddusw(dest, src1, src2);
} else {
e.vpaddsw(dest, src1, src2);
}
} else {
e.vpaddw(dest, src1, src2);
}
break;
case INT32_TYPE:
if (saturate) {
if (is_unsigned) {
// xmm0 is the only temp register that can be used by
// src1/src2.
e.vpaddd(e.xmm1, src1, src2);
// xmm0 is the only temp register that can be used by src1/src2.
e.vpaddd(dest, src1, src2);
// If result is smaller than either of the inputs, we've
// overflowed (only need to check one input)
// if (src1 > res) then overflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm2, src1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpxor(e.xmm0, e.xmm1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpcmpgtd(e.xmm0, e.xmm2, e.xmm0);
e.vpor(dest, e.xmm1, e.xmm0);
} else {
// Preserve the sources.
if (dest == src1) {
e.vmovdqa(e.xmm2, src1);
src1 = e.xmm2;
}
if (dest == src2) {
e.vmovdqa(e.xmm1, src2);
src2 = e.xmm1;
}
// Overflow results if two inputs are the same sign and the result
// isn't the same sign.
// if ((s32b)(~(src1 ^ src2) & (src1 ^ res)) < 0) then overflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm1, src1, src2);
// xmm0 is the only temp register that can be used by
// src1/src2.
e.vpaddd(dest, src1, src2);
// Move src1 to xmm0 in-case it was the same register as the dest.
// This kills src2 if it's a constant.
if (src1 != e.xmm0) {
e.vmovdqa(e.xmm0, src1);
src1 = e.xmm0;
// Overflow results if two inputs are the same sign and the
// result isn't the same sign. if ((s32b)(~(src1 ^ src2) &
// (src1 ^ res)) < 0) then overflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm1, src1, src2);
// Move src1 to xmm0 in-case it was the same register as the
// dest. This kills src2 if it's a constant.
if (src1 != e.xmm0) {
e.vmovdqa(e.xmm0, src1);
src1 = e.xmm0;
}
e.vpxor(e.xmm2, src1, dest);
e.vpandn(e.xmm1, e.xmm1, e.xmm2);
// High bit of xmm1 is now set if overflowed.
// Set any negative overflowed elements of src1 to INT_MIN
e.vpand(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMSignMaskI32),
e.xmm2);
// Set any positive overflowed elements of src1 to INT_MAX
e.vpandn(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMAbsMaskPS),
e.xmm2);
}
} else {
e.vpaddd(dest, src1, src2);
}
e.vpxor(e.xmm2, src1, dest);
e.vpandn(e.xmm1, e.xmm1, e.xmm2);
// High bit of xmm1 is now set if overflowed.
// Set any negative overflowed elements of src1 to INT_MIN
e.vpand(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMSignMaskI32), e.xmm2);
// Set any positive overflowed elements of src1 to INT_MAX
e.vpandn(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMAbsMaskPS), e.xmm2);
}
} else {
e.vpaddd(dest, src1, src2);
break;
case FLOAT32_TYPE:
assert_false(is_unsigned);
assert_false(saturate);
e.vaddps(dest, src1, src2);
break;
default:
assert_unhandled_case(part_type);
break;
}
break;
case FLOAT32_TYPE:
assert_false(is_unsigned);
assert_false(saturate);
e.vaddps(dest, src1, src2);
break;
default:
assert_unhandled_case(part_type);
break;
}
});
});
}
};
EMITTER_OPCODE_TABLE(OPCODE_VECTOR_ADD, VECTOR_ADD);
@ -3640,8 +3657,10 @@ EMITTER_OPCODE_TABLE(OPCODE_VECTOR_ADD, VECTOR_ADD);
template <typename SEQ, typename REG, typename ARGS>
void EmitSubXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitAssociativeBinaryOp(
e, i, [](X64Emitter& e, const REG& dest_src,
const REG& src) { e.sub(dest_src, src); },
e, i,
[](X64Emitter& e, const REG& dest_src, const REG& src) {
e.sub(dest_src, src);
},
[](X64Emitter& e, const REG& dest_src, int32_t constant) {
e.sub(dest_src, constant);
});
@ -3693,104 +3712,109 @@ EMITTER_OPCODE_TABLE(OPCODE_SUB, SUB_I8, SUB_I16, SUB_I32, SUB_I64, SUB_F32,
struct VECTOR_SUB
: Sequence<VECTOR_SUB, I<OPCODE_VECTOR_SUB, V128Op, V128Op, V128Op>> {
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
Xmm src1, Xmm src2) {
const TypeName part_type = static_cast<TypeName>(i.instr->flags & 0xFF);
const uint32_t arithmetic_flags = i.instr->flags >> 8;
bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED);
bool saturate = !!(arithmetic_flags & ARITHMETIC_SATURATE);
switch (part_type) {
case INT8_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpsubusb(dest, src1, src2);
} else {
e.vpsubsb(dest, src1, src2);
}
} else {
e.vpsubb(dest, src1, src2);
}
break;
case INT16_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpsubusw(dest, src1, src2);
} else {
e.vpsubsw(dest, src1, src2);
}
} else {
e.vpsubw(dest, src1, src2);
}
break;
case INT32_TYPE:
if (saturate) {
if (is_unsigned) {
// xmm0 is the only temp register that can be used by src1/src2.
e.vpsubd(e.xmm1, src1, src2);
// If result is greater than either of the inputs, we've
// underflowed (only need to check one input)
// if (res > src1) then underflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm2, src1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpxor(e.xmm0, e.xmm1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpcmpgtd(e.xmm0, e.xmm0, e.xmm2);
e.vpandn(dest, e.xmm0, e.xmm1);
} else {
// Preserve the sources.
if (dest == src1) {
e.vmovdqa(e.xmm2, src1);
src1 = e.xmm2;
EmitCommutativeBinaryXmmOp(
e, i, [&i](X64Emitter& e, const Xmm& dest, Xmm src1, Xmm src2) {
const TypeName part_type =
static_cast<TypeName>(i.instr->flags & 0xFF);
const uint32_t arithmetic_flags = i.instr->flags >> 8;
bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED);
bool saturate = !!(arithmetic_flags & ARITHMETIC_SATURATE);
switch (part_type) {
case INT8_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpsubusb(dest, src1, src2);
} else {
e.vpsubsb(dest, src1, src2);
}
} else {
e.vpsubb(dest, src1, src2);
}
if (dest == src2) {
e.vmovdqa(e.xmm1, src2);
src2 = e.xmm1;
break;
case INT16_TYPE:
if (saturate) {
// TODO(benvanik): trace DID_SATURATE
if (is_unsigned) {
e.vpsubusw(dest, src1, src2);
} else {
e.vpsubsw(dest, src1, src2);
}
} else {
e.vpsubw(dest, src1, src2);
}
break;
case INT32_TYPE:
if (saturate) {
if (is_unsigned) {
// xmm0 is the only temp register that can be used by
// src1/src2.
e.vpsubd(e.xmm1, src1, src2);
// xmm0 is the only temp register that can be used by src1/src2.
e.vpsubd(dest, src1, src2);
// If result is greater than either of the inputs, we've
// underflowed (only need to check one input)
// if (res > src1) then underflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm2, src1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpxor(e.xmm0, e.xmm1, e.GetXmmConstPtr(XMMSignMaskI32));
e.vpcmpgtd(e.xmm0, e.xmm0, e.xmm2);
e.vpandn(dest, e.xmm0, e.xmm1);
} else {
// Preserve the sources.
if (dest == src1) {
e.vmovdqa(e.xmm2, src1);
src1 = e.xmm2;
}
if (dest == src2) {
e.vmovdqa(e.xmm1, src2);
src2 = e.xmm1;
}
// We can only overflow if the signs of the operands are opposite.
// If signs are opposite and result sign isn't the same as src1's
// sign, we've overflowed.
// if ((s32b)((src1 ^ src2) & (src1 ^ res)) < 0) then overflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm1, src1, src2);
// xmm0 is the only temp register that can be used by
// src1/src2.
e.vpsubd(dest, src1, src2);
// Move src1 to xmm0 in-case it's the same register as the dest.
// This kills src2 if it's a constant.
if (src1 != e.xmm0) {
e.vmovdqa(e.xmm0, src1);
src1 = e.xmm0;
// We can only overflow if the signs of the operands are
// opposite. If signs are opposite and result sign isn't the
// same as src1's sign, we've overflowed. if ((s32b)((src1 ^
// src2) & (src1 ^ res)) < 0) then overflowed
// http://locklessinc.com/articles/sat_arithmetic/
e.vpxor(e.xmm1, src1, src2);
// Move src1 to xmm0 in-case it's the same register as the
// dest. This kills src2 if it's a constant.
if (src1 != e.xmm0) {
e.vmovdqa(e.xmm0, src1);
src1 = e.xmm0;
}
e.vpxor(e.xmm2, src1, dest);
e.vpand(e.xmm1, e.xmm1, e.xmm2);
// High bit of xmm1 is now set if overflowed.
// Set any negative overflowed elements of src1 to INT_MIN
e.vpand(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMSignMaskI32),
e.xmm2);
// Set any positive overflowed elements of src1 to INT_MAX
e.vpandn(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMAbsMaskPS),
e.xmm2);
}
} else {
e.vpsubd(dest, src1, src2);
}
e.vpxor(e.xmm2, src1, dest);
e.vpand(e.xmm1, e.xmm1, e.xmm2);
// High bit of xmm1 is now set if overflowed.
// Set any negative overflowed elements of src1 to INT_MIN
e.vpand(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMSignMaskI32), e.xmm2);
// Set any positive overflowed elements of src1 to INT_MAX
e.vpandn(e.xmm2, src1, e.xmm1);
e.vblendvps(dest, dest, e.GetXmmConstPtr(XMMAbsMaskPS), e.xmm2);
}
} else {
e.vpsubd(dest, src1, src2);
break;
case FLOAT32_TYPE:
e.vsubps(dest, src1, src2);
break;
default:
assert_unhandled_case(part_type);
break;
}
break;
case FLOAT32_TYPE:
e.vsubps(dest, src1, src2);
break;
default:
assert_unhandled_case(part_type);
break;
}
});
});
}
};
EMITTER_OPCODE_TABLE(OPCODE_VECTOR_SUB, VECTOR_SUB);
@ -4469,24 +4493,26 @@ struct MUL_ADD_F32
// FMA extension
if (e.IsFeatureEnabled(kX64EmitFMA)) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmadd213ss(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmadd213ss(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmadd231ss(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovss(i.dest, src1);
e.vfmadd213ss(i.dest, src2, src3);
}
});
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1,
const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmadd213ss(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmadd213ss(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmadd231ss(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovss(i.dest, src1);
e.vfmadd213ss(i.dest, src2, src3);
}
});
} else {
Xmm src3;
if (i.src3.is_constant) {
@ -4526,24 +4552,26 @@ struct MUL_ADD_F64
// FMA extension
if (e.IsFeatureEnabled(kX64EmitFMA)) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmadd213sd(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmadd213sd(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmadd231sd(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovsd(i.dest, src1);
e.vfmadd213sd(i.dest, src2, src3);
}
});
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1,
const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmadd213sd(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmadd213sd(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmadd231sd(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovsd(i.dest, src1);
e.vfmadd213sd(i.dest, src2, src3);
}
});
} else {
Xmm src3;
if (i.src3.is_constant) {
@ -4589,24 +4617,26 @@ struct MUL_ADD_V128
// than vmul+vadd and it'd be nice to know why. Until we know, it's
// disabled so tests pass.
if (false && e.IsFeatureEnabled(kX64EmitFMA)) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmadd213ps(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmadd213ps(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmadd231ps(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovdqa(i.dest, src1);
e.vfmadd213ps(i.dest, src2, src3);
}
});
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1,
const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmadd213ps(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmadd213ps(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmadd231ps(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovdqa(i.dest, src1);
e.vfmadd213ps(i.dest, src2, src3);
}
});
} else {
Xmm src3;
if (i.src3.is_constant) {
@ -4660,24 +4690,26 @@ struct MUL_SUB_F32
// FMA extension
if (e.IsFeatureEnabled(kX64EmitFMA)) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmsub213ss(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmsub213ss(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmsub231ss(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovss(i.dest, src1);
e.vfmsub213ss(i.dest, src2, src3);
}
});
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1,
const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmsub213ss(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmsub213ss(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmsub231ss(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovss(i.dest, src1);
e.vfmsub213ss(i.dest, src2, src3);
}
});
} else {
Xmm src3;
if (i.src3.is_constant) {
@ -4717,24 +4749,26 @@ struct MUL_SUB_F64
// FMA extension
if (e.IsFeatureEnabled(kX64EmitFMA)) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmsub213sd(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmsub213sd(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmsub231sd(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovsd(i.dest, src1);
e.vfmsub213sd(i.dest, src2, src3);
}
});
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1,
const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmsub213sd(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmsub213sd(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmsub231sd(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovsd(i.dest, src1);
e.vfmsub213sd(i.dest, src2, src3);
}
});
} else {
Xmm src3;
if (i.src3.is_constant) {
@ -4778,24 +4812,26 @@ struct MUL_SUB_V128
// FMA extension
if (e.IsFeatureEnabled(kX64EmitFMA)) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmsub213ps(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmsub213ps(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmsub231ps(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovdqa(i.dest, src1);
e.vfmsub213ps(i.dest, src2, src3);
}
});
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1,
const Xmm& src2) {
Xmm src3 = i.src3.is_constant ? e.xmm1 : i.src3;
if (i.src3.is_constant) {
e.LoadConstantXmm(e.xmm1, i.src3.constant());
}
if (i.dest == src1) {
e.vfmsub213ps(i.dest, src2, src3);
} else if (i.dest == src2) {
e.vfmsub213ps(i.dest, src1, src3);
} else if (i.dest == i.src3) {
e.vfmsub231ps(i.dest, src1, src2);
} else {
// Dest not equal to anything
e.vmovdqa(i.dest, src1);
e.vfmsub213ps(i.dest, src2, src3);
}
});
} else {
Xmm src3;
if (i.src3.is_constant) {
@ -5098,8 +5134,10 @@ EMITTER_OPCODE_TABLE(OPCODE_DOT_PRODUCT_4, DOT_PRODUCT_4_V128);
template <typename SEQ, typename REG, typename ARGS>
void EmitAndXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitCommutativeBinaryOp(
e, i, [](X64Emitter& e, const REG& dest_src,
const REG& src) { e.and_(dest_src, src); },
e, i,
[](X64Emitter& e, const REG& dest_src, const REG& src) {
e.and_(dest_src, src);
},
[](X64Emitter& e, const REG& dest_src, int32_t constant) {
e.and_(dest_src, constant);
});
@ -5141,8 +5179,10 @@ EMITTER_OPCODE_TABLE(OPCODE_AND, AND_I8, AND_I16, AND_I32, AND_I64, AND_V128);
template <typename SEQ, typename REG, typename ARGS>
void EmitOrXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitCommutativeBinaryOp(
e, i, [](X64Emitter& e, const REG& dest_src,
const REG& src) { e.or_(dest_src, src); },
e, i,
[](X64Emitter& e, const REG& dest_src, const REG& src) {
e.or_(dest_src, src);
},
[](X64Emitter& e, const REG& dest_src, int32_t constant) {
e.or_(dest_src, constant);
});
@ -5184,8 +5224,10 @@ EMITTER_OPCODE_TABLE(OPCODE_OR, OR_I8, OR_I16, OR_I32, OR_I64, OR_V128);
template <typename SEQ, typename REG, typename ARGS>
void EmitXorXX(X64Emitter& e, const ARGS& i) {
SEQ::EmitCommutativeBinaryOp(
e, i, [](X64Emitter& e, const REG& dest_src,
const REG& src) { e.xor_(dest_src, src); },
e, i,
[](X64Emitter& e, const REG& dest_src, const REG& src) {
e.xor_(dest_src, src);
},
[](X64Emitter& e, const REG& dest_src, int32_t constant) {
e.xor_(dest_src, constant);
});
@ -6209,57 +6251,59 @@ struct VECTOR_AVERAGE
return _mm_load_si128(reinterpret_cast<__m128i*>(value));
}
static void Emit(X64Emitter& e, const EmitArgType& i) {
EmitCommutativeBinaryXmmOp(e, i, [&i](X64Emitter& e, const Xmm& dest,
const Xmm& src1, const Xmm& src2) {
const TypeName part_type = static_cast<TypeName>(i.instr->flags & 0xFF);
const uint32_t arithmetic_flags = i.instr->flags >> 8;
bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED);
switch (part_type) {
case INT8_TYPE:
if (is_unsigned) {
e.vpavgb(dest, src1, src2);
} else {
assert_always();
EmitCommutativeBinaryXmmOp(
e, i,
[&i](X64Emitter& e, const Xmm& dest, const Xmm& src1, const Xmm& src2) {
const TypeName part_type =
static_cast<TypeName>(i.instr->flags & 0xFF);
const uint32_t arithmetic_flags = i.instr->flags >> 8;
bool is_unsigned = !!(arithmetic_flags & ARITHMETIC_UNSIGNED);
switch (part_type) {
case INT8_TYPE:
if (is_unsigned) {
e.vpavgb(dest, src1, src2);
} else {
assert_always();
}
break;
case INT16_TYPE:
if (is_unsigned) {
e.vpavgw(dest, src1, src2);
} else {
assert_always();
}
break;
case INT32_TYPE:
// No 32bit averages in AVX.
if (is_unsigned) {
if (i.src2.is_constant) {
e.LoadConstantXmm(e.xmm0, i.src2.constant());
e.lea(e.r9, e.StashXmm(1, e.xmm0));
} else {
e.lea(e.r9, e.StashXmm(1, i.src2));
}
e.lea(e.r8, e.StashXmm(0, i.src1));
e.CallNativeSafe(
reinterpret_cast<void*>(EmulateVectorAverageUnsignedI32));
e.vmovaps(i.dest, e.xmm0);
} else {
if (i.src2.is_constant) {
e.LoadConstantXmm(e.xmm0, i.src2.constant());
e.lea(e.r9, e.StashXmm(1, e.xmm0));
} else {
e.lea(e.r9, e.StashXmm(1, i.src2));
}
e.lea(e.r8, e.StashXmm(0, i.src1));
e.CallNativeSafe(
reinterpret_cast<void*>(EmulateVectorAverageSignedI32));
e.vmovaps(i.dest, e.xmm0);
}
break;
default:
assert_unhandled_case(part_type);
break;
}
break;
case INT16_TYPE:
if (is_unsigned) {
e.vpavgw(dest, src1, src2);
} else {
assert_always();
}
break;
case INT32_TYPE:
// No 32bit averages in AVX.
if (is_unsigned) {
if (i.src2.is_constant) {
e.LoadConstantXmm(e.xmm0, i.src2.constant());
e.lea(e.r9, e.StashXmm(1, e.xmm0));
} else {
e.lea(e.r9, e.StashXmm(1, i.src2));
}
e.lea(e.r8, e.StashXmm(0, i.src1));
e.CallNativeSafe(
reinterpret_cast<void*>(EmulateVectorAverageUnsignedI32));
e.vmovaps(i.dest, e.xmm0);
} else {
if (i.src2.is_constant) {
e.LoadConstantXmm(e.xmm0, i.src2.constant());
e.lea(e.r9, e.StashXmm(1, e.xmm0));
} else {
e.lea(e.r9, e.StashXmm(1, i.src2));
}
e.lea(e.r8, e.StashXmm(0, i.src1));
e.CallNativeSafe(
reinterpret_cast<void*>(EmulateVectorAverageSignedI32));
e.vmovaps(i.dest, e.xmm0);
}
break;
default:
assert_unhandled_case(part_type);
break;
}
});
});
}
};
EMITTER_OPCODE_TABLE(OPCODE_VECTOR_AVERAGE, VECTOR_AVERAGE);

View File

@ -420,7 +420,7 @@ bool RegisterAllocationPass::SpillOneRegister(HIRBuilder* builder, Block* block,
auto new_value = builder->LoadLocal(spill_value->local_slot);
auto spill_load = builder->last_instr();
spill_load->MoveBefore(next_use->instr);
// Note: implicit first use added.
// Note: implicit first use added.
#if ASSERT_NO_CYCLES
builder->AssertNoCycles();

View File

@ -1963,7 +1963,10 @@ Value* HIRBuilder::CountLeadingZeros(Value* value) {
if (value->IsConstantZero()) {
static const uint8_t zeros[] = {
8, 16, 32, 64,
8,
16,
32,
64,
};
assert_true(value->type <= INT64_TYPE);
return LoadConstantUint8(zeros[value->type]);

View File

@ -15,7 +15,10 @@ namespace hir {
#define DEFINE_OPCODE(num, name, sig, flags) \
const OpcodeInfo num##_info = { \
flags, sig, name, num, \
flags, \
sig, \
name, \
num, \
};
#include "xenia/cpu/hir/opcodes.inl"
#undef DEFINE_OPCODE

View File

@ -87,8 +87,7 @@ enum PackType : uint16_t {
PACK_TYPE_16_IN_32 = 7,
PACK_TYPE_MODE = 0x000F, // just to get the mode
// Unpack to low or high parts.
// Unpack to low or high parts.
PACK_TYPE_TO_LO = 0 << 12,
PACK_TYPE_TO_HI = 1 << 12,

View File

@ -52,7 +52,12 @@ bool MMIOHandler::RegisterRange(uint32_t virtual_address, uint32_t mask,
MMIOReadCallback read_callback,
MMIOWriteCallback write_callback) {
mapped_ranges_.push_back({
virtual_address, mask, size, context, read_callback, write_callback,
virtual_address,
mask,
size,
context,
read_callback,
write_callback,
});
return true;
}

View File

@ -2073,7 +2073,7 @@ int InstrEmit_vpkd3d128(PPCHIRBuilder& f, const InstrData& i) {
uint32_t control = kIdentityPermuteMask; // original
switch (pack) {
case 1: // VPACK_32
// VPACK_32 & shift = 3 puts lower 32 bits in x (leftmost slot).
// VPACK_32 & shift = 3 puts lower 32 bits in x (leftmost slot).
switch (shift) {
case 0:
control = MakePermuteMask(0, 0, 0, 1, 0, 2, 1, 3);

View File

@ -143,7 +143,7 @@ int InstrEmit_branch(PPCHIRBuilder& f, const char* src, uint64_t cia,
}
return 0;
}
} // namespace ppc
int InstrEmit_bx(PPCHIRBuilder& f, const InstrData& i) {
// if AA then
@ -799,6 +799,6 @@ void RegisterEmitCategoryControl() {
XEREGISTERINSTR(mtmsrd);
}
} // namespace ppc
} // namespace cpu
} // namespace xe
} // namespace xe

View File

@ -341,14 +341,16 @@ std::vector<BlockInfo> PPCScanner::FindBlocks(GuestFunction* function) {
if (ends_block) {
in_block = false;
block_map[block_start] = {
block_start, address,
block_start,
address,
};
}
}
if (in_block) {
block_map[block_start] = {
block_start, end_address,
block_start,
end_address,
};
}

View File

@ -18,9 +18,10 @@ using xe::cpu::ppc::PPCContext;
TEST_CASE("ADD_I8", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Add(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Add(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -90,9 +91,10 @@ TEST_CASE("ADD_I8", "[instr]") {
TEST_CASE("ADD_I16", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Add(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT16_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Add(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT16_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -162,9 +164,10 @@ TEST_CASE("ADD_I16", "[instr]") {
TEST_CASE("ADD_I32", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Add(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT32_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Add(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT32_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -304,9 +307,10 @@ TEST_CASE("ADD_I64", "[instr]") {
TEST_CASE("ADD_F32", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreFPR(b, 3, b.Convert(b.Add(b.Convert(LoadFPR(b, 4), FLOAT32_TYPE),
b.Convert(LoadFPR(b, 5), FLOAT32_TYPE)),
FLOAT64_TYPE));
StoreFPR(b, 3,
b.Convert(b.Add(b.Convert(LoadFPR(b, 4), FLOAT32_TYPE),
b.Convert(LoadFPR(b, 5), FLOAT32_TYPE)),
FLOAT64_TYPE));
b.Return();
});
test.Run(

View File

@ -19,10 +19,11 @@ using xe::cpu::ppc::PPCContext;
TEST_CASE("EXTRACT_INT8", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4),
b.Truncate(LoadGPR(b, 4), INT8_TYPE),
INT8_TYPE),
INT64_TYPE));
StoreGPR(
b, 3,
b.ZeroExtend(b.Extract(LoadVR(b, 4),
b.Truncate(LoadGPR(b, 4), INT8_TYPE), INT8_TYPE),
INT64_TYPE));
b.Return();
});
for (int i = 0; i < 16; ++i) {
@ -42,9 +43,10 @@ TEST_CASE("EXTRACT_INT8", "[instr]") {
TEST_CASE("EXTRACT_INT8_CONSTANT", "[instr]") {
for (int i = 0; i < 16; ++i) {
TestFunction([i](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i),
INT8_TYPE),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(
b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i), INT8_TYPE),
INT64_TYPE));
b.Return();
})
.Run(
@ -62,10 +64,11 @@ TEST_CASE("EXTRACT_INT8_CONSTANT", "[instr]") {
TEST_CASE("EXTRACT_INT16", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4),
b.Truncate(LoadGPR(b, 4), INT8_TYPE),
INT16_TYPE),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(
b.Extract(LoadVR(b, 4), b.Truncate(LoadGPR(b, 4), INT8_TYPE),
INT16_TYPE),
INT64_TYPE));
b.Return();
});
for (int i = 0; i < 8; ++i) {
@ -85,9 +88,10 @@ TEST_CASE("EXTRACT_INT16", "[instr]") {
TEST_CASE("EXTRACT_INT16_CONSTANT", "[instr]") {
for (int i = 0; i < 8; ++i) {
TestFunction([i](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i),
INT16_TYPE),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(
b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i), INT16_TYPE),
INT64_TYPE));
b.Return();
})
.Run(
@ -104,10 +108,11 @@ TEST_CASE("EXTRACT_INT16_CONSTANT", "[instr]") {
TEST_CASE("EXTRACT_INT32", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4),
b.Truncate(LoadGPR(b, 4), INT8_TYPE),
INT32_TYPE),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(
b.Extract(LoadVR(b, 4), b.Truncate(LoadGPR(b, 4), INT8_TYPE),
INT32_TYPE),
INT64_TYPE));
b.Return();
});
for (int i = 0; i < 4; ++i) {
@ -126,9 +131,10 @@ TEST_CASE("EXTRACT_INT32", "[instr]") {
TEST_CASE("EXTRACT_INT32_CONSTANT", "[instr]") {
for (int i = 0; i < 4; ++i) {
TestFunction([i](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i),
INT32_TYPE),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(
b.Extract(LoadVR(b, 4), b.LoadConstantInt8(i), INT32_TYPE),
INT64_TYPE));
b.Return();
})
.Run(

View File

@ -20,8 +20,9 @@ using xe::cpu::ppc::PPCContext;
TEST_CASE("INSERT_INT8", "[instr]") {
for (int i = 0; i < 16; ++i) {
TestFunction test([i](HIRBuilder& b) {
StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
StoreVR(b, 3,
b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
b.Return();
});
test.Run(
@ -44,8 +45,9 @@ TEST_CASE("INSERT_INT8", "[instr]") {
TEST_CASE("INSERT_INT16", "[instr]") {
for (int i = 0; i < 8; ++i) {
TestFunction test([i](HIRBuilder& b) {
StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i),
b.Truncate(LoadGPR(b, 5), INT16_TYPE)));
StoreVR(b, 3,
b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i),
b.Truncate(LoadGPR(b, 5), INT16_TYPE)));
b.Return();
});
test.Run(
@ -66,8 +68,9 @@ TEST_CASE("INSERT_INT16", "[instr]") {
TEST_CASE("INSERT_INT32", "[instr]") {
for (int i = 0; i < 4; ++i) {
TestFunction test([i](HIRBuilder& b) {
StoreVR(b, 3, b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i),
b.Truncate(LoadGPR(b, 5), INT32_TYPE)));
StoreVR(b, 3,
b.Insert(LoadVR(b, 4), b.LoadConstantInt32(i),
b.Truncate(LoadGPR(b, 5), INT32_TYPE)));
b.Return();
});
test.Run(

View File

@ -19,8 +19,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") {
{
uint32_t mask = MakePermuteMask(0, 0, 0, 1, 0, 2, 0, 3);
TestFunction([mask](HIRBuilder& b) {
StoreVR(b, 3, b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4),
LoadVR(b, 5), INT32_TYPE));
StoreVR(b, 3,
b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), LoadVR(b, 5),
INT32_TYPE));
b.Return();
})
.Run(
@ -36,8 +37,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") {
{
uint32_t mask = MakePermuteMask(1, 0, 1, 1, 1, 2, 1, 3);
TestFunction([mask](HIRBuilder& b) {
StoreVR(b, 3, b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4),
LoadVR(b, 5), INT32_TYPE));
StoreVR(b, 3,
b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), LoadVR(b, 5),
INT32_TYPE));
b.Return();
})
.Run(
@ -53,8 +55,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") {
{
uint32_t mask = MakePermuteMask(0, 3, 0, 2, 0, 1, 0, 0);
TestFunction([mask](HIRBuilder& b) {
StoreVR(b, 3, b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4),
LoadVR(b, 5), INT32_TYPE));
StoreVR(b, 3,
b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), LoadVR(b, 5),
INT32_TYPE));
b.Return();
})
.Run(
@ -70,8 +73,9 @@ TEST_CASE("PERMUTE_V128_BY_INT32_CONSTANT", "[instr]") {
{
uint32_t mask = MakePermuteMask(1, 3, 1, 2, 1, 1, 1, 0);
TestFunction([mask](HIRBuilder& b) {
StoreVR(b, 3, b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4),
LoadVR(b, 5), INT32_TYPE));
StoreVR(b, 3,
b.Permute(b.LoadConstantUint32(mask), LoadVR(b, 4), LoadVR(b, 5),
INT32_TYPE));
b.Return();
})
.Run(

View File

@ -20,8 +20,8 @@ namespace xe {
namespace cpu {
namespace sandbox {
using xe::cpu::ppc::PPCContext;
using xe::cpu::Runtime;
using xe::cpu::ppc::PPCContext;
// TODO(benvanik): simple memory? move more into core?

View File

@ -16,9 +16,10 @@ using xe::cpu::ppc::PPCContext;
TEST_CASE("SHA_I8", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Sha(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Sha(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -70,9 +71,10 @@ TEST_CASE("SHA_I8", "[instr]") {
TEST_CASE("SHA_I16", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Sha(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Sha(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -124,9 +126,10 @@ TEST_CASE("SHA_I16", "[instr]") {
TEST_CASE("SHA_I32", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Sha(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Sha(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -178,8 +181,9 @@ TEST_CASE("SHA_I32", "[instr]") {
TEST_CASE("SHA_I64", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.Sha(b.Truncate(LoadGPR(b, 4), INT64_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
StoreGPR(b, 3,
b.Sha(b.Truncate(LoadGPR(b, 4), INT64_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
b.Return();
});
test.Run(

View File

@ -16,9 +16,10 @@ using xe::cpu::ppc::PPCContext;
TEST_CASE("SHL_I8", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Shl(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Shl(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -70,9 +71,10 @@ TEST_CASE("SHL_I8", "[instr]") {
TEST_CASE("SHL_I16", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Shl(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Shl(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -124,9 +126,10 @@ TEST_CASE("SHL_I16", "[instr]") {
TEST_CASE("SHL_I32", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Shl(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Shl(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -178,8 +181,9 @@ TEST_CASE("SHL_I32", "[instr]") {
TEST_CASE("SHL_I64", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.Shl(b.Truncate(LoadGPR(b, 4), INT64_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
StoreGPR(b, 3,
b.Shl(b.Truncate(LoadGPR(b, 4), INT64_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
b.Return();
});
test.Run(

View File

@ -17,9 +17,10 @@ using xe::cpu::ppc::PPCContext;
TEST_CASE("SHR_I8", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Shr(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Shr(b.Truncate(LoadGPR(b, 4), INT8_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -71,9 +72,10 @@ TEST_CASE("SHR_I8", "[instr]") {
TEST_CASE("SHR_I16", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Shr(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Shr(b.Truncate(LoadGPR(b, 4), INT16_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -125,9 +127,10 @@ TEST_CASE("SHR_I16", "[instr]") {
TEST_CASE("SHR_I32", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.ZeroExtend(b.Shr(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
StoreGPR(b, 3,
b.ZeroExtend(b.Shr(b.Truncate(LoadGPR(b, 4), INT32_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)),
INT64_TYPE));
b.Return();
});
test.Run(
@ -179,8 +182,9 @@ TEST_CASE("SHR_I32", "[instr]") {
TEST_CASE("SHR_I64", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreGPR(b, 3, b.Shr(b.Truncate(LoadGPR(b, 4), INT64_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
StoreGPR(b, 3,
b.Shr(b.Truncate(LoadGPR(b, 4), INT64_TYPE),
b.Truncate(LoadGPR(b, 5), INT8_TYPE)));
b.Return();
});
test.Run(

View File

@ -47,8 +47,9 @@ TEST_CASE("VECTOR_ADD_I8", "[instr]") {
TEST_CASE("VECTOR_ADD_I8_SAT_SIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_SATURATE));
StoreVR(b, 3,
b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_SATURATE));
b.Return();
});
test.Run(
@ -73,8 +74,9 @@ TEST_CASE("VECTOR_ADD_I8_SAT_SIGNED", "[instr]") {
TEST_CASE("VECTOR_ADD_I8_SAT_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_SATURATE | ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_SATURATE | ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(
@ -124,8 +126,9 @@ TEST_CASE("VECTOR_ADD_I16", "[instr]") {
TEST_CASE("VECTOR_ADD_I16_SAT_SIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_SATURATE));
StoreVR(b, 3,
b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_SATURATE));
b.Return();
});
test.Run(
@ -150,8 +153,9 @@ TEST_CASE("VECTOR_ADD_I16_SAT_SIGNED", "[instr]") {
TEST_CASE("VECTOR_ADD_I16_SAT_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_SATURATE | ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_SATURATE | ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(
@ -201,8 +205,9 @@ TEST_CASE("VECTOR_ADD_I32", "[instr]") {
TEST_CASE("VECTOR_ADD_I32_SAT_SIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_SATURATE));
StoreVR(b, 3,
b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_SATURATE));
b.Return();
});
test.Run(
@ -236,8 +241,9 @@ TEST_CASE("VECTOR_ADD_I32_SAT_SIGNED", "[instr]") {
TEST_CASE("VECTOR_ADD_I32_SAT_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_SATURATE | ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorAdd(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_SATURATE | ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(

View File

@ -39,8 +39,9 @@ TEST_CASE("VECTOR_MAX_I8_SIGNED", "[instr]") {
TEST_CASE("VECTOR_MAX_I8_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorMax(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorMax(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(
@ -75,8 +76,9 @@ TEST_CASE("VECTOR_MAX_I16_SIGNED", "[instr]") {
TEST_CASE("VECTOR_MAX_I16_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorMax(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorMax(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(
@ -108,8 +110,9 @@ TEST_CASE("VECTOR_MAX_I32_SIGNED", "[instr]") {
TEST_CASE("VECTOR_MAX_I32_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorMax(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorMax(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(

View File

@ -39,8 +39,9 @@ TEST_CASE("VECTOR_MIN_I8_SIGNED", "[instr]") {
TEST_CASE("VECTOR_MIN_I8_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorMin(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorMin(LoadVR(b, 4), LoadVR(b, 5), INT8_TYPE,
ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(
@ -75,8 +76,9 @@ TEST_CASE("VECTOR_MIN_I16_SIGNED", "[instr]") {
TEST_CASE("VECTOR_MIN_I16_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorMin(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorMin(LoadVR(b, 4), LoadVR(b, 5), INT16_TYPE,
ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(
@ -108,8 +110,9 @@ TEST_CASE("VECTOR_MIN_I32_SIGNED", "[instr]") {
TEST_CASE("VECTOR_MIN_I32_UNSIGNED", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorMin(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_UNSIGNED));
StoreVR(b, 3,
b.VectorMin(LoadVR(b, 4), LoadVR(b, 5), INT32_TYPE,
ARITHMETIC_UNSIGNED));
b.Return();
});
test.Run(

View File

@ -37,10 +37,12 @@ TEST_CASE("VECTOR_SHA_I8", "[instr]") {
TEST_CASE("VECTOR_SHA_I8_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), b.LoadConstantVec128(vec128b(
0, 1, 2, 8, 4, 4, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15)),
INT8_TYPE));
StoreVR(
b, 3,
b.VectorSha(LoadVR(b, 4),
b.LoadConstantVec128(vec128b(0, 1, 2, 8, 4, 4, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15)),
INT8_TYPE));
b.Return();
});
test.Run(
@ -76,9 +78,11 @@ TEST_CASE("VECTOR_SHA_I16", "[instr]") {
TEST_CASE("VECTOR_SHA_I16_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorSha(LoadVR(b, 4), b.LoadConstantVec128(vec128s(
0, 1, 8, 15, 15, 8, 1, 16)),
INT16_TYPE));
StoreVR(
b, 3,
b.VectorSha(LoadVR(b, 4),
b.LoadConstantVec128(vec128s(0, 1, 8, 15, 15, 8, 1, 16)),
INT16_TYPE));
b.Return();
});
test.Run(
@ -122,12 +126,14 @@ TEST_CASE("VECTOR_SHA_I32", "[instr]") {
TEST_CASE("VECTOR_SHA_I32_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorSha(LoadVR(b, 4),
b.LoadConstantVec128(vec128i(0, 1, 16, 31)),
INT32_TYPE));
StoreVR(b, 4, b.VectorSha(LoadVR(b, 5),
b.LoadConstantVec128(vec128i(31, 16, 1, 32)),
INT32_TYPE));
StoreVR(
b, 3,
b.VectorSha(LoadVR(b, 4), b.LoadConstantVec128(vec128i(0, 1, 16, 31)),
INT32_TYPE));
StoreVR(
b, 4,
b.VectorSha(LoadVR(b, 5), b.LoadConstantVec128(vec128i(31, 16, 1, 32)),
INT32_TYPE));
b.Return();
});
test.Run(

View File

@ -37,10 +37,12 @@ TEST_CASE("VECTOR_SHL_I8", "[instr]") {
TEST_CASE("VECTOR_SHL_I8_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), b.LoadConstantVec128(vec128b(
0, 1, 2, 8, 4, 4, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15)),
INT8_TYPE));
StoreVR(
b, 3,
b.VectorShl(LoadVR(b, 4),
b.LoadConstantVec128(vec128b(0, 1, 2, 8, 4, 4, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15)),
INT8_TYPE));
b.Return();
});
test.Run(
@ -76,9 +78,11 @@ TEST_CASE("VECTOR_SHL_I16", "[instr]") {
TEST_CASE("VECTOR_SHL_I16_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorShl(LoadVR(b, 4), b.LoadConstantVec128(vec128s(
0, 1, 8, 15, 15, 8, 1, 16)),
INT16_TYPE));
StoreVR(
b, 3,
b.VectorShl(LoadVR(b, 4),
b.LoadConstantVec128(vec128s(0, 1, 8, 15, 15, 8, 1, 16)),
INT16_TYPE));
b.Return();
});
test.Run(
@ -122,12 +126,14 @@ TEST_CASE("VECTOR_SHL_I32", "[instr]") {
TEST_CASE("VECTOR_SHL_I32_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorShl(LoadVR(b, 4),
b.LoadConstantVec128(vec128i(0, 1, 16, 31)),
INT32_TYPE));
StoreVR(b, 4, b.VectorShl(LoadVR(b, 5),
b.LoadConstantVec128(vec128i(31, 16, 1, 32)),
INT32_TYPE));
StoreVR(
b, 3,
b.VectorShl(LoadVR(b, 4), b.LoadConstantVec128(vec128i(0, 1, 16, 31)),
INT32_TYPE));
StoreVR(
b, 4,
b.VectorShl(LoadVR(b, 5), b.LoadConstantVec128(vec128i(31, 16, 1, 32)),
INT32_TYPE));
b.Return();
});
test.Run(

View File

@ -37,10 +37,12 @@ TEST_CASE("VECTOR_SHR_I8", "[instr]") {
TEST_CASE("VECTOR_SHR_I8_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), b.LoadConstantVec128(vec128b(
0, 1, 2, 8, 4, 4, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15)),
INT8_TYPE));
StoreVR(
b, 3,
b.VectorShr(LoadVR(b, 4),
b.LoadConstantVec128(vec128b(0, 1, 2, 8, 4, 4, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15)),
INT8_TYPE));
b.Return();
});
test.Run(
@ -76,9 +78,11 @@ TEST_CASE("VECTOR_SHR_I16", "[instr]") {
TEST_CASE("VECTOR_SHR_I16_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorShr(LoadVR(b, 4), b.LoadConstantVec128(vec128s(
0, 1, 8, 15, 15, 8, 1, 16)),
INT16_TYPE));
StoreVR(
b, 3,
b.VectorShr(LoadVR(b, 4),
b.LoadConstantVec128(vec128s(0, 1, 8, 15, 15, 8, 1, 16)),
INT16_TYPE));
b.Return();
});
test.Run(
@ -122,12 +126,14 @@ TEST_CASE("VECTOR_SHR_I32", "[instr]") {
TEST_CASE("VECTOR_SHR_I32_CONSTANT", "[instr]") {
TestFunction test([](HIRBuilder& b) {
StoreVR(b, 3, b.VectorShr(LoadVR(b, 4),
b.LoadConstantVec128(vec128i(0, 1, 16, 31)),
INT32_TYPE));
StoreVR(b, 4, b.VectorShr(LoadVR(b, 5),
b.LoadConstantVec128(vec128i(31, 16, 1, 32)),
INT32_TYPE));
StoreVR(
b, 3,
b.VectorShr(LoadVR(b, 4), b.LoadConstantVec128(vec128i(0, 1, 16, 31)),
INT32_TYPE));
StoreVR(
b, 4,
b.VectorShr(LoadVR(b, 5), b.LoadConstantVec128(vec128i(31, 16, 1, 32)),
INT32_TYPE));
b.Return();
});
test.Run(

View File

@ -395,7 +395,10 @@ void DebugWindow::DrawSourcePane() {
ImGui::SameLine();
if (function->is_guest()) {
const char* kSourceDisplayModes[] = {
"PPC", "PPC+HIR+x64", "PPC+HIR (opt)+x64", "PPC+x64",
"PPC",
"PPC+HIR+x64",
"PPC+HIR (opt)+x64",
"PPC+x64",
};
ImGui::PushItemWidth(90);
ImGui::Combo("##display_mode", &state_.source_display_mode,
@ -1338,8 +1341,9 @@ void DebugWindow::DrawBreakpointsPane() {
function->MapGuestAddressToMachineCode(
breakpoint->guest_address()));
} else {
NavigateToFunction(function, function->MapMachineCodeToGuestAddress(
breakpoint->host_address()),
NavigateToFunction(function,
function->MapMachineCodeToGuestAddress(
breakpoint->host_address()),
breakpoint->host_address());
}
}

View File

@ -782,7 +782,9 @@ GL4CommandProcessor::UpdateStatus GL4CommandProcessor::UpdateRenderTargets() {
GLuint color_targets[4] = {kAnyTarget, kAnyTarget, kAnyTarget, kAnyTarget};
if (enable_mode == ModeControl::kColorDepth) {
uint32_t color_info[4] = {
regs.rb_color_info, regs.rb_color1_info, regs.rb_color2_info,
regs.rb_color_info,
regs.rb_color1_info,
regs.rb_color2_info,
regs.rb_color3_info,
};
// A2XX_RB_COLOR_MASK_WRITE_* == D3DRS_COLORWRITEENABLE
@ -1099,7 +1101,9 @@ GL4CommandProcessor::UpdateStatus GL4CommandProcessor::UpdateRasterizerState(
}
static const GLenum kFillModes[3] = {
GL_POINT, GL_LINE, GL_FILL,
GL_POINT,
GL_LINE,
GL_FILL,
};
bool poly_mode = ((regs.pa_su_sc_mode_cntl >> 3) & 0x3) != 0;
if (poly_mode) {
@ -1590,7 +1594,8 @@ bool GL4CommandProcessor::IssueCopy() {
if (copy_src_select <= 3 || color_clear_enabled) {
// Source from a color target.
uint32_t color_info[4] = {
regs[XE_GPU_REG_RB_COLOR_INFO].u32, regs[XE_GPU_REG_RB_COLOR1_INFO].u32,
regs[XE_GPU_REG_RB_COLOR_INFO].u32,
regs[XE_GPU_REG_RB_COLOR1_INFO].u32,
regs[XE_GPU_REG_RB_COLOR2_INFO].u32,
regs[XE_GPU_REG_RB_COLOR3_INFO].u32,
};

View File

@ -940,8 +940,9 @@ bool TextureCache::UploadTexture2D(GLuint texture,
texture_info.size_2d.logical_height);
y++, output_base_offset += host_info.size_2d.output_pitch) {
auto input_base_offset = TextureInfo::TiledOffset2DOuter(
offset_y + y, (texture_info.size_2d.input_width /
texture_info.format_info()->block_width),
offset_y + y,
(texture_info.size_2d.input_width /
texture_info.format_info()->block_width),
bpp);
for (uint32_t x = 0, output_offset = output_base_offset;
x < texture_info.size_2d.block_width;
@ -1048,8 +1049,9 @@ bool TextureCache::UploadTextureCube(GLuint texture,
y < texture_info.size_cube.block_height;
y++, output_base_offset += host_info.size_cube.output_pitch) {
auto input_base_offset = TextureInfo::TiledOffset2DOuter(
offset_y + y, (texture_info.size_cube.input_width /
texture_info.format_info()->block_width),
offset_y + y,
(texture_info.size_cube.input_width /
texture_info.format_info()->block_width),
bpp);
for (uint32_t x = 0, output_offset = output_base_offset;
x < texture_info.size_cube.block_width;

View File

@ -159,8 +159,8 @@ uint32_t GraphicsSystem::ReadRegister(uint32_t addr) {
case 0x1951: // ? vblank pending?
return 1;
case 0x1961: // AVIVO_D1MODE_VIEWPORT_SIZE
// Screen res - 1280x720
// [width(0x0FFF), height(0x0FFF)]
// Screen res - 1280x720
// [width(0x0FFF), height(0x0FFF)]
return 0x050002D0;
default:
if (!register_file_.GetRegisterInfo(r)) {

View File

@ -23,7 +23,8 @@ const RegisterInfo* RegisterFile::GetRegisterInfo(uint32_t index) {
#define XE_GPU_REGISTER(index, type, name) \
case index: { \
static const RegisterInfo reg_info = { \
RegisterInfo::Type::type, #name, \
RegisterInfo::Type::type, \
#name, \
}; \
return &reg_info; \
}

View File

@ -356,7 +356,10 @@ void ParsedVertexFetchInstruction::Disassemble(StringBuffer* out) const {
void ParsedTextureFetchInstruction::Disassemble(StringBuffer* out) const {
static const char* kTextureFilterNames[] = {
"point", "linear", "BASEMAP", "keep",
"point",
"linear",
"BASEMAP",
"keep",
};
static const char* kAnisoFilterNames[] = {
"disabled", "max1to1", "max2to1", "max4to1",

View File

@ -950,9 +950,8 @@ void TraceViewer::DrawVertexFetcher(Shader* shader,
}
}
ImGui::Columns(1);
ImGui::SetCursorPosY(ImGui::GetCursorPosY() +
(vertex_count - display_end) *
ImGui::GetTextLineHeight());
ImGui::SetCursorPosY(ImGui::GetCursorPosY() + (vertex_count - display_end) *
ImGui::GetTextLineHeight());
ImGui::PopStyleVar();
ImGui::EndChild();
}
@ -971,10 +970,14 @@ static const char* kStencilFuncNames[] = {
"Decrement and Clamp",
};
static const char* kIndexFormatNames[] = {
"uint16", "uint32",
"uint16",
"uint32",
};
static const char* kEndiannessNames[] = {
"unspecified endianness", "8-in-16", "8-in-32", "16-in-32",
"unspecified endianness",
"8-in-16",
"8-in-32",
"16-in-32",
};
static const char* kColorFormatNames[] = {
/* 0 */ "k_8_8_8_8",
@ -995,7 +998,8 @@ static const char* kColorFormatNames[] = {
/* 15 */ "k_32_32_FLOAT",
};
static const char* kDepthFormatNames[] = {
"kD24S8", "kD24FS8",
"kD24S8",
"kD24FS8",
};
void ProgressBar(float frac, float width, float height = 0,
@ -1185,7 +1189,9 @@ void TraceViewer::DrawStateUI() {
uint32_t surface_pitch = surface_info & 0x3FFF;
auto surface_msaa = (surface_info >> 16) & 0x3;
static const char* kMsaaNames[] = {
"1X", "2X", "4X",
"1X",
"2X",
"4X",
};
ImGui::BulletText("Surface Pitch: %d", surface_pitch);
ImGui::BulletText("Surface HI-Z Pitch: %d", surface_hiz);
@ -1270,7 +1276,9 @@ void TraceViewer::DrawStateUI() {
ImGui::BulletText("Front-face: counter-clockwise");
}
static const char* kFillModeNames[3] = {
"point", "line", "fill",
"point",
"line",
"fill",
};
bool poly_mode = ((pa_su_sc_mode_cntl >> 3) & 0x3) != 0;
if (poly_mode) {

View File

@ -72,7 +72,9 @@ void TraceWriter::WritePrimaryBufferStart(uint32_t base_ptr, uint32_t count) {
return;
}
PrimaryBufferStartCommand cmd = {
TraceCommandType::kPrimaryBufferStart, base_ptr, 0,
TraceCommandType::kPrimaryBufferStart,
base_ptr,
0,
};
fwrite(&cmd, 1, sizeof(cmd), file_);
}
@ -92,7 +94,9 @@ void TraceWriter::WriteIndirectBufferStart(uint32_t base_ptr, uint32_t count) {
return;
}
IndirectBufferStartCommand cmd = {
TraceCommandType::kIndirectBufferStart, base_ptr, 0,
TraceCommandType::kIndirectBufferStart,
base_ptr,
0,
};
fwrite(&cmd, 1, sizeof(cmd), file_);
}
@ -112,7 +116,9 @@ void TraceWriter::WritePacketStart(uint32_t base_ptr, uint32_t count) {
return;
}
PacketStartCommand cmd = {
TraceCommandType::kPacketStart, base_ptr, count,
TraceCommandType::kPacketStart,
base_ptr,
count,
};
fwrite(&cmd, 1, sizeof(cmd), file_);
fwrite(membase_ + base_ptr, 4, count, file_);
@ -220,7 +226,8 @@ void TraceWriter::WriteEvent(EventCommand::Type event_type) {
return;
}
EventCommand cmd = {
TraceCommandType::kEvent, event_type,
TraceCommandType::kEvent,
event_type,
};
fwrite(&cmd, 1, sizeof(cmd), file_);
}

View File

@ -83,7 +83,8 @@ BufferCache::BufferCache(RegisterFile* register_file, Memory* memory,
descriptor_set_layout_info.pNext = nullptr;
descriptor_set_layout_info.flags = 0;
VkDescriptorSetLayoutBinding uniform_bindings[] = {
vertex_uniform_binding, fragment_uniform_binding,
vertex_uniform_binding,
fragment_uniform_binding,
};
descriptor_set_layout_info.bindingCount =
static_cast<uint32_t>(xe::countof(uniform_bindings));

View File

@ -1158,7 +1158,9 @@ PipelineCache::UpdateStatus PipelineCache::UpdateRasterizationState(
// Vulkan only supports both matching.
assert_true(front_poly_mode == back_poly_mode);
static const VkPolygonMode kFillModes[3] = {
VK_POLYGON_MODE_POINT, VK_POLYGON_MODE_LINE, VK_POLYGON_MODE_FILL,
VK_POLYGON_MODE_POINT,
VK_POLYGON_MODE_LINE,
VK_POLYGON_MODE_FILL,
};
state_info.polygonMode = kFillModes[front_poly_mode];
} else {

View File

@ -223,7 +223,9 @@ CachedTileView::CachedTileView(ui::vulkan::VulkanDevice* device,
image_view_info.format = image_info.format;
// TODO(benvanik): manipulate? may not be able to when attached.
image_view_info.components = {
VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_R,
VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_A,
};
image_view_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
@ -746,7 +748,9 @@ bool RenderCache::ParseConfiguration(RenderConfiguration* config) {
// Color attachment configuration.
if (config->mode_control == ModeControl::kColorDepth) {
reg::RB_COLOR_INFO color_info[4] = {
regs.rb_color_info, regs.rb_color1_info, regs.rb_color2_info,
regs.rb_color_info,
regs.rb_color1_info,
regs.rb_color2_info,
regs.rb_color3_info,
};
for (int i = 0; i < 4; ++i) {

View File

@ -1099,8 +1099,10 @@ void TextureCache::WritebackTexture(Texture* texture) {
auto command_buffer = wb_command_pool_->AcquireEntry();
VkCommandBufferBeginInfo begin_info = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr,
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
nullptr,
};
vkBeginCommandBuffer(command_buffer, &begin_info);

View File

@ -91,7 +91,9 @@ bool VulkanCommandProcessor::SetupContext() {
render_cache_ = std::make_unique<RenderCache>(register_file_, device_);
VkEventCreateInfo info = {
VK_STRUCTURE_TYPE_EVENT_CREATE_INFO, nullptr, 0,
VK_STRUCTURE_TYPE_EVENT_CREATE_INFO,
nullptr,
0,
};
VkResult result =
@ -439,7 +441,8 @@ void VulkanCommandProcessor::PerformSwap(uint32_t frontbuffer_ptr,
nullptr, 1, &barrier);
VkRect2D src_rect = {
{0, 0}, {frontbuffer_width, frontbuffer_height},
{0, 0},
{frontbuffer_width, frontbuffer_height},
};
blitter_->BlitTexture2D(
copy_commands, current_batch_fence_,
@ -1002,7 +1005,8 @@ bool VulkanCommandProcessor::IssueCopy() {
if (is_color_source) {
// Source from a color target.
uint32_t color_info[4] = {
regs[XE_GPU_REG_RB_COLOR_INFO].u32, regs[XE_GPU_REG_RB_COLOR1_INFO].u32,
regs[XE_GPU_REG_RB_COLOR_INFO].u32,
regs[XE_GPU_REG_RB_COLOR1_INFO].u32,
regs[XE_GPU_REG_RB_COLOR2_INFO].u32,
regs[XE_GPU_REG_RB_COLOR3_INFO].u32,
};
@ -1112,11 +1116,11 @@ bool VulkanCommandProcessor::IssueCopy() {
VkFilter filter = is_color_source ? VK_FILTER_LINEAR : VK_FILTER_NEAREST;
switch (copy_command) {
case CopyCommand::kRaw:
/*
render_cache_->RawCopyToImage(command_buffer, edram_base,
texture->image, texture->image_layout, is_color_source, resolve_offset,
resolve_extent); break;
*/
/*
render_cache_->RawCopyToImage(command_buffer, edram_base,
texture->image, texture->image_layout, is_color_source, resolve_offset,
resolve_extent); break;
*/
case CopyCommand::kConvert: {
/*

View File

@ -27,8 +27,8 @@ namespace xe {
namespace gpu {
namespace vulkan {
using xe::ui::vulkan::CheckResult;
using xe::ui::RawImage;
using xe::ui::vulkan::CheckResult;
VulkanGraphicsSystem::VulkanGraphicsSystem() {}
VulkanGraphicsSystem::~VulkanGraphicsSystem() = default;
@ -50,7 +50,8 @@ X_STATUS VulkanGraphicsSystem::Setup(cpu::Processor* processor,
// Create our own command pool we can use for captures.
VkCommandPoolCreateInfo create_info = {
VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr,
VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
nullptr,
VK_COMMAND_POOL_CREATE_TRANSIENT_BIT |
VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
device_->queue_family_index(),
@ -90,8 +91,10 @@ std::unique_ptr<RawImage> VulkanGraphicsSystem::Capture() {
CheckResult(status, "vkAllocateCommandBuffers");
VkCommandBufferBeginInfo begin_info = {
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT, nullptr,
VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
nullptr,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
nullptr,
};
vkBeginCommandBuffer(cmd, &begin_info);

View File

@ -116,11 +116,11 @@ int hid_demo_main(const std::vector<std::wstring>& args) {
auto& io = window->imgui_drawer()->GetIO();
ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(-1, 0));
ImGui::Begin("main_window", nullptr, ImGuiWindowFlags_NoMove |
ImGuiWindowFlags_NoResize |
ImGuiWindowFlags_NoTitleBar |
ImGuiWindowFlags_NoScrollbar |
ImGuiWindowFlags_NoSavedSettings);
ImGui::Begin("main_window", nullptr,
ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoResize |
ImGuiWindowFlags_NoTitleBar |
ImGuiWindowFlags_NoScrollbar |
ImGuiWindowFlags_NoSavedSettings);
ImGui::SetWindowPos(ImVec2(0, 0));
ImGui::SetWindowSize(io.DisplaySize);
ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(4, 4));

View File

@ -213,9 +213,8 @@ std::vector<object_ref<XObject>> ObjectTable::GetAllObjects() {
for (uint32_t slot = 0; slot < table_capacity_; slot++) {
auto& entry = table_[slot];
if (entry.object &&
std::find(results.begin(), results.end(), entry.object) ==
results.end()) {
if (entry.object && std::find(results.begin(), results.end(),
entry.object) == results.end()) {
entry.object->Retain();
results.push_back(object_ref<XObject>(entry.object));
}

View File

@ -474,7 +474,9 @@ xe::cpu::Export* RegisterExport(R (*fn)(Ps&...), const char* name,
static void Trampoline(PPCContext* ppc_context) {
++export_entry->function_data.call_count;
Param::Init init = {
ppc_context, sizeof...(Ps), 0,
ppc_context,
sizeof...(Ps),
0,
};
auto params = std::make_tuple<Ps...>(Ps(init)...);
if (export_entry->tags & xe::cpu::ExportTag::kLog &&
@ -507,7 +509,8 @@ xe::cpu::Export* RegisterExport(void (*fn)(Ps&...), const char* name,
static void Trampoline(PPCContext* ppc_context) {
++export_entry->function_data.call_count;
Param::Init init = {
ppc_context, sizeof...(Ps),
ppc_context,
sizeof...(Ps),
};
auto params = std::make_tuple<Ps...>(Ps(init)...);
if (export_entry->tags & xe::cpu::ExportTag::kLog &&

View File

@ -15,7 +15,9 @@
namespace xe {} // namespace xe
typedef struct { int reserved; } xe_xex2_options_t;
typedef struct {
int reserved;
} xe_xex2_options_t;
struct xe_xex2;
typedef struct xe_xex2* xe_xex2_ref;

View File

@ -24,7 +24,7 @@ dword_result_t XamAvatarInitialize(
lpdword_t function_ptrs, // 20b, 5 pointers
lpunknown_t unk5, // ptr in data segment
dword_t unk6 // flags - 0x00300000, 0x30, etc
) {
) {
// Negative to fail. Game should immediately call XamAvatarShutdown.
return ~0u;
}

View File

@ -47,6 +47,6 @@ DECLARE_XBDM_EXPORT(DmFindPdbSignature, ExportTag::kStub | ExportTag::kDebug);
void RegisterMiscExports(xe::cpu::ExportResolver* export_resolver,
KernelState* kernel_state) {}
} // namespace xboxkrnl
} // namespace xbdm
} // namespace kernel
} // namespace xe

View File

@ -7,13 +7,13 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/apu/audio_system.h"
#include "xenia/base/logging.h"
#include "xenia/emulator.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -7,11 +7,11 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/xbox.h"
#include "third_party/crypto/TinySHA1.hpp"
#include "third_party/crypto/des/des.cpp"

View File

@ -7,13 +7,13 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/debugging.h"
#include "xenia/base/logging.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/kernel/xthread.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -7,11 +7,11 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -7,7 +7,6 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/base/memory.h"
#include "xenia/cpu/processor.h"
@ -19,6 +18,7 @@
#include "xenia/kernel/xiocompletion.h"
#include "xenia/kernel/xthread.h"
#include "xenia/vfs/device.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -7,12 +7,12 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/kernel/xthread.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -7,7 +7,6 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/cpu/processor.h"
#include "xenia/kernel/kernel_state.h"
@ -15,6 +14,7 @@
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/util/xex2.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -7,7 +7,6 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
@ -15,6 +14,7 @@
#include "xenia/kernel/xobject.h"
#include "xenia/kernel/xsemaphore.h"
#include "xenia/kernel/xthread.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -559,13 +559,12 @@ int32_t format_core(PPCContext* ppc_context, FormatData& data, ArgList& args,
if (!is_wide) {
length = 0;
for (auto s = (const uint8_t *)str; cap > 0 && *s; ++s, cap--) {
for (auto s = (const uint8_t*)str; cap > 0 && *s; ++s, cap--) {
length++;
}
} else {
length = 0;
for (auto s = (const uint16_t *)str; cap > 0 && *s;
++s, cap--) {
for (auto s = (const uint16_t*)str; cap > 0 && *s; ++s, cap--) {
length++;
}
}

View File

@ -7,11 +7,11 @@
******************************************************************************
*/
#include "xenia/xbox.h"
#include "xenia/base/logging.h"
#include "xenia/kernel/kernel_state.h"
#include "xenia/kernel/util/shim_utils.h"
#include "xenia/kernel/xboxkrnl/xboxkrnl_private.h"
#include "xenia/xbox.h"
namespace xe {
namespace kernel {

View File

@ -257,7 +257,7 @@ dword_result_t VdInitializeScalerCommandBuffer(
lpvoid_t dest_ptr, // Points to the first 80000000h where the memcpy
// sources from.
dword_t dest_count // Count in words.
) {
) {
// We could fake the commands here, but I'm not sure the game checks for
// anything but success (non-zero ret).
// For now, we just fill it with NOPs.

View File

@ -209,39 +209,57 @@ static const struct {
} map_info[] = {
// (1024mb) - virtual 4k pages
{
0x00000000, 0x3FFFFFFF, 0x0000000000000000ull,
0x00000000,
0x3FFFFFFF,
0x0000000000000000ull,
},
// (1024mb) - virtual 64k pages (cont)
{
0x40000000, 0x7EFFFFFF, 0x0000000040000000ull,
0x40000000,
0x7EFFFFFF,
0x0000000040000000ull,
},
// (16mb) - GPU writeback + 15mb of XPS?
{
0x7F000000, 0x7FFFFFFF, 0x0000000100000000ull,
0x7F000000,
0x7FFFFFFF,
0x0000000100000000ull,
},
// (256mb) - xex 64k pages
{
0x80000000, 0x8FFFFFFF, 0x0000000080000000ull,
0x80000000,
0x8FFFFFFF,
0x0000000080000000ull,
},
// (256mb) - xex 4k pages
{
0x90000000, 0x9FFFFFFF, 0x0000000080000000ull,
0x90000000,
0x9FFFFFFF,
0x0000000080000000ull,
},
// (512mb) - physical 64k pages
{
0xA0000000, 0xBFFFFFFF, 0x0000000100000000ull,
0xA0000000,
0xBFFFFFFF,
0x0000000100000000ull,
},
// - physical 16mb pages
{
0xC0000000, 0xDFFFFFFF, 0x0000000100000000ull,
0xC0000000,
0xDFFFFFFF,
0x0000000100000000ull,
},
// - physical 4k pages
{
0xE0000000, 0xFFFFFFFF, 0x0000000100000000ull,
0xE0000000,
0xFFFFFFFF,
0x0000000100000000ull,
},
// - physical raw
{
0x100000000, 0x11FFFFFFF, 0x0000000100000000ull,
0x100000000,
0x11FFFFFFF,
0x0000000100000000ull,
},
};
int Memory::MapViews(uint8_t* mapping_base) {

View File

@ -179,5 +179,5 @@ bool apiscanner_loader::ExtractImports(const void* addr, const size_t length,
return true;
}
} // tools
} // xe
} // namespace tools
} // namespace xe

View File

@ -56,5 +56,5 @@ class apiscanner_loader {
bool ExtractImports(const void* addr, const size_t length, title& info);
};
} // tools
} // xe
} // namespace tools
} // namespace xe

View File

@ -143,7 +143,8 @@ void ImGuiDrawer::SetupFont() {
font_config.OversampleH = font_config.OversampleV = 1;
font_config.PixelSnapH = true;
static const ImWchar font_glyph_ranges[] = {
0x0020, 0x00FF, // Basic Latin + Latin Supplement
0x0020,
0x00FF, // Basic Latin + Latin Supplement
0,
};
io.Fonts->AddFontFromMemoryCompressedBase85TTF(

View File

@ -207,7 +207,10 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
vkCmdSetViewport(command_buffer, 0, 1, &viewport);
VkRect2D scissor = {
dst_offset.x, dst_offset.y, dst_extents.width, dst_extents.height,
dst_offset.x,
dst_offset.y,
dst_extents.width,
dst_extents.height,
};
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
@ -256,7 +259,10 @@ void Blitter::BlitTexture2D(VkCommandBuffer command_buffer, VkFence fence,
&vtx_constants);
PixPushConstants pix_constants = {
0, 0, 0, swap_channels ? 1 : 0,
0,
0,
0,
swap_channels ? 1 : 0,
};
vkCmdPushConstants(command_buffer, pipeline_layout_,
VK_SHADER_STAGE_FRAGMENT_BIT, sizeof(VtxPushConstants),
@ -491,7 +497,8 @@ VkPipeline Blitter::CreatePipeline(VkRenderPass render_pass,
dynamic_state_info.pNext = nullptr;
dynamic_state_info.flags = 0;
VkDynamicState dynamic_states[] = {
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
};
dynamic_state_info.dynamicStateCount =
static_cast<uint32_t>(xe::countof(dynamic_states));

View File

@ -224,7 +224,9 @@ class VulkanImmediateTexture : public ImmediateTexture {
view_info.viewType = VK_IMAGE_VIEW_TYPE_2D;
view_info.format = VK_FORMAT_R8G8B8A8_UNORM;
view_info.components = {
VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_G, VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_R,
VK_COMPONENT_SWIZZLE_G,
VK_COMPONENT_SWIZZLE_B,
VK_COMPONENT_SWIZZLE_A,
};
view_info.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1};
@ -597,7 +599,8 @@ VulkanImmediateDrawer::VulkanImmediateDrawer(VulkanContext* graphics_context)
dynamic_state_info.pNext = nullptr;
dynamic_state_info.flags = 0;
VkDynamicState dynamic_states[] = {
VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR,
VK_DYNAMIC_STATE_VIEWPORT,
VK_DYNAMIC_STATE_SCISSOR,
};
dynamic_state_info.dynamicStateCount =
static_cast<uint32_t>(xe::countof(dynamic_states));