forked from ShuriZma/suyu
1
0
Fork 0

Merge pull request #34 from bunnei/gsp-command-synch

Gsp command synch
This commit is contained in:
bunnei 2014-08-06 18:30:01 -04:00
commit d0c1794853
15 changed files with 231 additions and 107 deletions

View File

@ -42,7 +42,6 @@ void EmuThread::run()
emit CPUStepped();
}
}
HW::Update();
}
Core::Stop();

View File

@ -25,16 +25,16 @@ QVariant GPUCommandStreamItemModel::data(const QModelIndex& index, int role) con
return QVariant();
int command_index = index.row();
const GSP_GPU::GXCommand& command = GetDebugger()->ReadGXCommandHistory(command_index);
const GSP_GPU::Command& command = GetDebugger()->ReadGXCommandHistory(command_index);
if (role == Qt::DisplayRole)
{
std::map<GSP_GPU::GXCommandId, const char*> command_names = {
{ GSP_GPU::GXCommandId::REQUEST_DMA, "REQUEST_DMA" },
{ GSP_GPU::GXCommandId::SET_COMMAND_LIST_FIRST, "SET_COMMAND_LIST_FIRST" },
{ GSP_GPU::GXCommandId::SET_MEMORY_FILL, "SET_MEMORY_FILL" },
{ GSP_GPU::GXCommandId::SET_DISPLAY_TRANSFER, "SET_DISPLAY_TRANSFER" },
{ GSP_GPU::GXCommandId::SET_TEXTURE_COPY, "SET_TEXTURE_COPY" },
{ GSP_GPU::GXCommandId::SET_COMMAND_LIST_LAST, "SET_COMMAND_LIST_LAST" }
std::map<GSP_GPU::CommandId, const char*> command_names = {
{ GSP_GPU::CommandId::REQUEST_DMA, "REQUEST_DMA" },
{ GSP_GPU::CommandId::SET_COMMAND_LIST_FIRST, "SET_COMMAND_LIST_FIRST" },
{ GSP_GPU::CommandId::SET_MEMORY_FILL, "SET_MEMORY_FILL" },
{ GSP_GPU::CommandId::SET_DISPLAY_TRANSFER, "SET_DISPLAY_TRANSFER" },
{ GSP_GPU::CommandId::SET_TEXTURE_COPY, "SET_TEXTURE_COPY" },
{ GSP_GPU::CommandId::SET_COMMAND_LIST_LAST, "SET_COMMAND_LIST_LAST" }
};
const u32* command_data = reinterpret_cast<const u32*>(&command);
QString str = QString("%1 %2 %3 %4 %5 %6 %7 %8 %9").arg(command_names[command.id])

View File

@ -26,21 +26,25 @@ ARM_Interface* g_sys_core = nullptr; ///< ARM11 system (OS) core
/// Run the core CPU loop
void RunLoop() {
for (;;){
g_app_core->Run(GPU::kFrameTicks);
// This function loops for 100 instructions in the CPU before trying to update hardware.
// This is a little bit faster than SingleStep, and should be pretty much equivalent. The
// number of instructions chosen is fairly arbitrary, however a large number will more
// drastically affect the frequency of GSP interrupts and likely break things. The point of
// this is to just loop in the CPU for more than 1 instruction to reduce overhead and make
// it a little bit faster...
g_app_core->Run(100);
HW::Update();
if (HLE::g_reschedule) {
Kernel::Reschedule();
}
}
}
/// Step the CPU one instruction
void SingleStep() {
g_app_core->Step();
// Update and reschedule after approx. 1 frame
u64 current_ticks = Core::g_app_core->GetTicks();
if ((current_ticks - g_last_ticks) >= GPU::kFrameTicks || HLE::g_reschedule) {
g_last_ticks = current_ticks;
HW::Update();
if (HLE::g_reschedule) {
Kernel::Reschedule();
}
}

View File

@ -53,7 +53,7 @@ Result ArbitrateAddress(Handle handle, ArbitrationType type, u32 address, s32 va
for(int i = 0; i < value; i++)
ArbitrateHighestPriorityThread(handle, address);
}
HLE::Reschedule(__func__);
break;
// Wait current thread (acquire the arbiter)...
case ArbitrationType::WaitIfLessThan:
@ -61,6 +61,7 @@ Result ArbitrateAddress(Handle handle, ArbitrationType type, u32 address, s32 va
Kernel::WaitCurrentThread(WAITTYPE_ARB, handle);
HLE::Reschedule(__func__);
}
break;
default:
ERROR_LOG(KERNEL, "unknown type=%d", type);

View File

@ -42,7 +42,7 @@ public:
if (std::find(waiting_threads.begin(), waiting_threads.end(), thread) == waiting_threads.end()) {
waiting_threads.push_back(thread);
}
Kernel::WaitCurrentThread(WAITTYPE_EVENT);
Kernel::WaitCurrentThread(WAITTYPE_EVENT, GetHandle());
}
if (reset_type != RESETTYPE_STICKY && !permanent_locked) {
locked = true;

View File

@ -48,7 +48,7 @@ public:
*wait = locked;
if (locked) {
Kernel::WaitCurrentThread(WAITTYPE_MUTEX);
Kernel::WaitCurrentThread(WAITTYPE_MUTEX, GetHandle());
}
return 0;

View File

@ -274,7 +274,11 @@ Thread* NextThread() {
return Kernel::g_object_pool.GetFast<Thread>(next);
}
/// Puts the current thread in the wait state for the given type
/**
* Puts the current thread in the wait state for the given type
* @param wait_type Type of wait
* @param wait_handle Handle of Kernel object that we are waiting on, defaults to current thread
*/
void WaitCurrentThread(WaitType wait_type, Handle wait_handle) {
Thread* thread = GetCurrentThread();
thread->wait_type = wait_type;

View File

@ -69,7 +69,11 @@ void ArbitrateAllThreads(u32 arbiter, u32 address);
/// Gets the current thread handle
Handle GetCurrentThreadHandle();
/// Puts the current thread in the wait state for the given type
/**
* Puts the current thread in the wait state for the given type
* @param wait_type Type of wait
* @param wait_handle Handle of Kernel object that we are waiting on, defaults to current thread
*/
void WaitCurrentThread(WaitType wait_type, Handle wait_handle=GetCurrentThreadHandle());
/// Put current thread in a wait state - on WaitSynchronization

View File

@ -11,55 +11,35 @@
#include "core/hle/kernel/event.h"
#include "core/hle/kernel/shared_memory.h"
#include "core/hle/service/gsp.h"
#include "core/hw/gpu.h"
#include "video_core/gpu_debugger.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
// Main graphics debugger object - TODO: Here is probably not the best place for this
GraphicsDebugger g_debugger;
/// GSP shared memory GX command buffer header
union GX_CmdBufferHeader {
u32 hex;
// Current command index. This index is updated by GSP module after loading the command data,
// right before the command is processed. When this index is updated by GSP module, the total
// commands field is decreased by one as well.
BitField<0,8,u32> index;
// Total commands to process, must not be value 0 when GSP module handles commands. This must be
// <=15 when writing a command to shared memory. This is incremented by the application when
// writing a command to shared memory, after increasing this value TriggerCmdReqQueue is only
// used if this field is value 1.
BitField<8,8,u32> number_commands;
};
////////////////////////////////////////////////////////////////////////////////////////////////////
// Namespace GSP_GPU
namespace GSP_GPU {
Handle g_event = 0;
Handle g_shared_memory = 0;
Handle g_interrupt_event = 0; ///< Handle to event triggered when GSP interrupt has been signalled
Handle g_shared_memory = 0; ///< Handle to GSP shared memorys
u32 g_thread_id = 1; ///< Thread index into interrupt relay queue, 1 is arbitrary
u32 g_thread_id = 0;
/// Gets a pointer to a thread command buffer in GSP shared memory
static inline u8* GetCommandBuffer(u32 thread_id) {
if (0 == g_shared_memory)
return nullptr;
/// Gets a pointer to the start (header) of a command buffer in GSP shared memory
static inline u8* GX_GetCmdBufferPointer(u32 thread_id, u32 offset=0) {
return Kernel::GetSharedMemoryPointer(g_shared_memory, 0x800 + (thread_id * 0x200) + offset);
return Kernel::GetSharedMemoryPointer(g_shared_memory,
0x800 + (thread_id * sizeof(CommandBuffer)));
}
/// Finishes execution of a GSP command
void GX_FinishCommand(u32 thread_id) {
GX_CmdBufferHeader* header = (GX_CmdBufferHeader*)GX_GetCmdBufferPointer(thread_id);
g_debugger.GXCommandProcessed(GX_GetCmdBufferPointer(thread_id, 0x20 + (header->index * 0x20)));
header->number_commands = header->number_commands - 1;
// TODO: Increment header->index?
/// Gets a pointer to the interrupt relay queue for a given thread index
static inline InterruptRelayQueue* GetInterruptRelayQueue(u32 thread_id) {
return (InterruptRelayQueue*)Kernel::GetSharedMemoryPointer(g_shared_memory,
sizeof(InterruptRelayQueue) * thread_id);
}
/// Write a GSP GPU hardware register
@ -133,39 +113,55 @@ void ReadHWRegs(Service::Interface* self) {
void RegisterInterruptRelayQueue(Service::Interface* self) {
u32* cmd_buff = Service::GetCommandBuffer();
u32 flags = cmd_buff[1];
g_event = cmd_buff[3];
g_interrupt_event = cmd_buff[3];
g_shared_memory = Kernel::CreateSharedMemory("GSPSharedMem");
_assert_msg_(GSP, (g_event != 0), "handle is not valid!");
_assert_msg_(GSP, (g_interrupt_event != 0), "handle is not valid!");
Kernel::SetEventLocked(g_event, false);
// Hack - This function will permanently set the state of the GSP event such that GPU command
// synchronization barriers always passthrough. Correct solution would be to set this after the
// GPU as processed all queued up commands, but due to the emulator being single-threaded they
// will always be ready.
Kernel::SetPermanentLock(g_event, true);
cmd_buff[0] = 0; // Result - no error
cmd_buff[2] = g_thread_id; // ThreadID
cmd_buff[2] = g_thread_id++; // ThreadID
cmd_buff[4] = g_shared_memory; // GSP shared memory
Kernel::SignalEvent(g_interrupt_event); // TODO(bunnei): Is this correct?
}
/**
* Signals that the specified interrupt type has occurred to userland code
* @param interrupt_id ID of interrupt that is being signalled
*/
void SignalInterrupt(InterruptId interrupt_id) {
if (0 == g_interrupt_event) {
WARN_LOG(GSP, "cannot synchronize until GSP event has been created!");
return;
}
if (0 == g_shared_memory) {
WARN_LOG(GSP, "cannot synchronize until GSP shared memory has been created!");
return;
}
for (int thread_id = 0; thread_id < 0x4; ++thread_id) {
InterruptRelayQueue* interrupt_relay_queue = GetInterruptRelayQueue(thread_id);
interrupt_relay_queue->number_interrupts = interrupt_relay_queue->number_interrupts + 1;
/// This triggers handling of the GX command written to the command buffer in shared memory.
void TriggerCmdReqQueue(Service::Interface* self) {
u8 next = interrupt_relay_queue->index;
next += interrupt_relay_queue->number_interrupts;
next = next % 0x34; // 0x34 is the number of interrupt slots
interrupt_relay_queue->slot[next] = interrupt_id;
interrupt_relay_queue->error_code = 0x0; // No error
}
Kernel::SignalEvent(g_interrupt_event);
}
/// Executes the next GSP command
void ExecuteCommand(const Command& command) {
// Utility function to convert register ID to address
auto WriteGPURegister = [](u32 id, u32 data) {
GPU::Write<u32>(0x1EF00000 + 4 * id, data);
};
GX_CmdBufferHeader* header = (GX_CmdBufferHeader*)GX_GetCmdBufferPointer(g_thread_id);
auto& command = *(const GXCommand*)GX_GetCmdBufferPointer(g_thread_id, 0x20 + (header->index * 0x20));
switch (command.id) {
// GX request DMA - typically used for copying memory from GSP heap to VRAM
case GXCommandId::REQUEST_DMA:
case CommandId::REQUEST_DMA:
memcpy(Memory::GetPointer(command.dma_request.dest_address),
Memory::GetPointer(command.dma_request.source_address),
command.dma_request.size);
@ -174,24 +170,27 @@ void TriggerCmdReqQueue(Service::Interface* self) {
// ctrulib homebrew sends all relevant command list data with this command,
// hence we do all "interesting" stuff here and do nothing in SET_COMMAND_LIST_FIRST.
// TODO: This will need some rework in the future.
case GXCommandId::SET_COMMAND_LIST_LAST:
case CommandId::SET_COMMAND_LIST_LAST:
{
auto& params = command.set_command_list_last;
WriteGPURegister(GPU::Regs::CommandProcessor + 2, params.address >> 3);
WriteGPURegister(GPU::Regs::CommandProcessor, params.size >> 3);
WriteGPURegister(GPU::Regs::CommandProcessor + 4, 1); // TODO: Not sure if we are supposed to always write this .. seems to trigger processing though
// TODO: Not sure if we are supposed to always write this .. seems to trigger processing though
WriteGPURegister(GPU::Regs::CommandProcessor + 4, 1);
// TODO: Move this to GPU
// TODO: Not sure what units the size is measured in
g_debugger.CommandListCalled(params.address,
(u32*)Memory::GetPointer(params.address),
params.size);
SignalInterrupt(InterruptId::P3D);
break;
}
// It's assumed that the two "blocks" behave equivalently.
// Presumably this is done simply to allow two memory fills to run in parallel.
case GXCommandId::SET_MEMORY_FILL:
case CommandId::SET_MEMORY_FILL:
{
auto& params = command.memory_fill;
WriteGPURegister(GPU::Regs::MemoryFill, params.start1 >> 3);
@ -207,8 +206,18 @@ void TriggerCmdReqQueue(Service::Interface* self) {
}
// TODO: Check if texture copies are implemented correctly..
case GXCommandId::SET_DISPLAY_TRANSFER:
case GXCommandId::SET_TEXTURE_COPY:
case CommandId::SET_DISPLAY_TRANSFER:
// TODO(bunnei): Signalling all of these interrupts here is totally wrong, but it seems to
// work well enough for running demos. Need to figure out how these all work and trigger
// them correctly.
SignalInterrupt(InterruptId::PSC0);
SignalInterrupt(InterruptId::PSC1);
SignalInterrupt(InterruptId::PPF);
SignalInterrupt(InterruptId::P3D);
SignalInterrupt(InterruptId::DMA);
break;
case CommandId::SET_TEXTURE_COPY:
{
auto& params = command.image_copy;
WriteGPURegister(GPU::Regs::DisplayTransfer, params.in_buffer_address >> 3);
@ -225,7 +234,7 @@ void TriggerCmdReqQueue(Service::Interface* self) {
// TODO: Figure out what exactly SET_COMMAND_LIST_FIRST and SET_COMMAND_LIST_LAST
// are supposed to do.
case GXCommandId::SET_COMMAND_LIST_FIRST:
case CommandId::SET_COMMAND_LIST_FIRST:
{
break;
}
@ -233,8 +242,26 @@ void TriggerCmdReqQueue(Service::Interface* self) {
default:
ERROR_LOG(GSP, "unknown command 0x%08X", (int)command.id.Value());
}
}
GX_FinishCommand(g_thread_id);
/// This triggers handling of the GX command written to the command buffer in shared memory.
void TriggerCmdReqQueue(Service::Interface* self) {
// Iterate through each thread's command queue...
for (unsigned thread_id = 0; thread_id < 0x4; ++thread_id) {
CommandBuffer* command_buffer = (CommandBuffer*)GetCommandBuffer(thread_id);
// Iterate through each command...
for (unsigned i = 0; i < command_buffer->number_commands; ++i) {
g_debugger.GXCommandProcessed((u8*)&command_buffer->commands[i]);
// Decode and execute command
ExecuteCommand(command_buffer->commands[i]);
// Indicates that command has completed
command_buffer->number_commands = command_buffer->number_commands - 1;
}
}
}
const Interface::FunctionInfo FunctionTable[] = {
@ -275,7 +302,10 @@ const Interface::FunctionInfo FunctionTable[] = {
Interface::Interface() {
Register(FunctionTable, ARRAY_SIZE(FunctionTable));
g_shared_memory = Kernel::CreateSharedMemory("GSPSharedMem");
g_interrupt_event = 0;
g_shared_memory = 0;
g_thread_id = 1;
}
Interface::~Interface() {

View File

@ -12,7 +12,19 @@
namespace GSP_GPU {
enum class GXCommandId : u32 {
/// GSP interrupt ID
enum class InterruptId : u8 {
PSC0 = 0x00,
PSC1 = 0x01,
PDC0 = 0x02, // Seems called every vertical screen line
PDC1 = 0x03, // Seems called every frame
PPF = 0x04,
P3D = 0x05,
DMA = 0x06,
};
/// GSP command ID
enum class CommandId : u32 {
REQUEST_DMA = 0x00,
SET_COMMAND_LIST_LAST = 0x01,
@ -29,8 +41,32 @@ enum class GXCommandId : u32 {
SET_COMMAND_LIST_FIRST = 0x05,
};
struct GXCommand {
BitField<0, 8, GXCommandId> id;
/// GSP thread interrupt relay queue
struct InterruptRelayQueue {
union {
u32 hex;
// Index of last interrupt in the queue
BitField<0,8,u32> index;
// Number of interrupts remaining to be processed by the userland code
BitField<8,8,u32> number_interrupts;
// Error code - zero on success, otherwise an error has occurred
BitField<16,8,u32> error_code;
};
u32 unk0;
u32 unk1;
InterruptId slot[0x34]; ///< Interrupt ID slots
};
static_assert(sizeof(InterruptRelayQueue) == 0x40,
"InterruptRelayQueue struct has incorrect size");
/// GSP command
struct Command {
BitField<0, 8, CommandId> id;
union {
struct {
@ -64,7 +100,30 @@ struct GXCommand {
u8 raw_data[0x1C];
};
};
static_assert(sizeof(GXCommand) == 0x20, "GXCommand struct has incorrect size");
static_assert(sizeof(Command) == 0x20, "Command struct has incorrect size");
/// GSP shared memory GX command buffer header
struct CommandBuffer {
union {
u32 hex;
// Current command index. This index is updated by GSP module after loading the command
// data, right before the command is processed. When this index is updated by GSP module,
// the total commands field is decreased by one as well.
BitField<0,8,u32> index;
// Total commands to process, must not be value 0 when GSP module handles commands. This
// must be <=15 when writing a command to shared memory. This is incremented by the
// application when writing a command to shared memory, after increasing this value
// TriggerCmdReqQueue is only used if this field is value 1.
BitField<8,8,u32> number_commands;
};
u32 unk[7];
Command commands[0xF];
};
static_assert(sizeof(CommandBuffer) == 0x200, "CommandBuffer struct has incorrect size");
/// Interface to "srv:" service
class Interface : public Service::Interface {
@ -84,4 +143,10 @@ public:
};
/**
* Signals that the specified interrupt type has occurred to userland code
* @param interrupt_id ID of interrupt that is being signalled
*/
void SignalInterrupt(InterruptId interrupt_id);
} // namespace

View File

@ -5,28 +5,30 @@
#include "core/hle/hle.h"
#include "core/hle/service/srv.h"
#include "core/hle/service/service.h"
#include "core/hle/kernel/mutex.h"
#include "core/hle/kernel/event.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
// Namespace SRV
namespace SRV {
Handle g_mutex = 0;
Handle g_event_handle = 0;
void Initialize(Service::Interface* self) {
DEBUG_LOG(OSHLE, "called");
if (!g_mutex) {
g_mutex = Kernel::CreateMutex(true, "SRV:Lock");
}
}
void GetProcSemaphore(Service::Interface* self) {
DEBUG_LOG(OSHLE, "called");
// Get process semaphore?
u32* cmd_buff = Service::GetCommandBuffer();
// TODO(bunnei): Change to a semaphore once these have been implemented
g_event_handle = Kernel::CreateEvent(RESETTYPE_ONESHOT, "SRV:Event");
Kernel::SetEventLocked(g_event_handle, false);
cmd_buff[1] = 0; // No error
cmd_buff[3] = g_mutex; // Return something... 0 == nullptr, raises an exception
cmd_buff[3] = g_event_handle;
}
void GetServiceHandle(Service::Interface* self) {

View File

@ -185,8 +185,6 @@ Result CreateAddressArbiter(u32* arbiter) {
/// Arbitrate address
Result ArbitrateAddress(Handle arbiter, u32 address, u32 type, u32 value, s64 nanoseconds) {
DEBUG_LOG(SVC, "called arbiter=0x%08X, address=0x%08X, type=0x%08X, value=0x%08X, "
"nanoseconds=%d", arbiter, address, type, value, nanoseconds);
return Kernel::ArbitrateAddress(arbiter, static_cast<Kernel::ArbitrationType>(type), address,
value);
}

View File

@ -7,7 +7,11 @@
#include "core/core.h"
#include "core/mem_map.h"
#include "core/hle/hle.h"
#include "core/hle/kernel/thread.h"
#include "core/hle/service/gsp.h"
#include "core/hw/gpu.h"
#include "video_core/video_core.h"
@ -17,7 +21,8 @@ namespace GPU {
RegisterSet<u32, Regs> g_regs;
u64 g_last_ticks = 0; ///< Last CPU ticks
u32 g_cur_line = 0; ///< Current vertical screen line
u64 g_last_line_ticks = 0; ///< CPU tick count from last vertical screen line
/**
* Sets whether the framebuffers are in the GSP heap (FCRAM) or VRAM
@ -247,19 +252,31 @@ template void Write<u8>(u32 addr, const u8 data);
/// Update hardware
void Update() {
auto& framebuffer_top = g_regs.Get<Regs::FramebufferTop>();
u64 current_ticks = Core::g_app_core->GetTicks();
// Fake a vertical blank
if ((current_ticks - g_last_ticks) >= kFrameTicks) {
g_last_ticks = current_ticks;
// Synchronize line...
if ((current_ticks - g_last_line_ticks) >= GPU::kFrameTicks / framebuffer_top.height) {
GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PDC0);
g_cur_line++;
g_last_line_ticks = current_ticks;
}
// Synchronize frame...
if (g_cur_line >= framebuffer_top.height) {
g_cur_line = 0;
GSP_GPU::SignalInterrupt(GSP_GPU::InterruptId::PDC1);
VideoCore::g_renderer->SwapBuffers();
Kernel::WaitCurrentThread(WAITTYPE_VBLANK);
HLE::Reschedule(__func__);
}
}
/// Initialize hardware
void Init() {
g_last_ticks = Core::g_app_core->GetTicks();
g_cur_line = 0;
g_last_line_ticks = Core::g_app_core->GetTicks();
// SetFramebufferLocation(FRAMEBUFFER_LOCATION_FCRAM);
SetFramebufferLocation(FRAMEBUFFER_LOCATION_VRAM);

View File

@ -168,7 +168,7 @@ u8 *GetPointer(const u32 addr) {
return g_system_mem + (vaddr & SYSTEM_MEMORY_MASK);
// VRAM
} else if ((vaddr > VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) {
} else if ((vaddr >= VRAM_VADDR) && (vaddr < VRAM_VADDR_END)) {
return g_vram + (vaddr & VRAM_MASK);
} else {

View File

@ -49,7 +49,7 @@ public:
*/
virtual void GXCommandProcessed(int total_command_count)
{
const GSP_GPU::GXCommand& cmd = observed->ReadGXCommandHistory(total_command_count-1);
const GSP_GPU::Command& cmd = observed->ReadGXCommandHistory(total_command_count-1);
ERROR_LOG(GSP, "Received command: id=%x", (int)cmd.id.Value());
}
@ -81,10 +81,10 @@ public:
if (observers.empty())
return;
gx_command_history.push_back(GSP_GPU::GXCommand());
GSP_GPU::GXCommand& cmd = gx_command_history[gx_command_history.size()-1];
gx_command_history.push_back(GSP_GPU::Command());
GSP_GPU::Command& cmd = gx_command_history[gx_command_history.size()-1];
memcpy(&cmd, command_data, sizeof(GSP_GPU::GXCommand));
memcpy(&cmd, command_data, sizeof(GSP_GPU::Command));
ForEachObserver([this](DebuggerObserver* observer) {
observer->GXCommandProcessed(this->gx_command_history.size());
@ -123,7 +123,7 @@ public:
} );
}
const GSP_GPU::GXCommand& ReadGXCommandHistory(int index) const
const GSP_GPU::Command& ReadGXCommandHistory(int index) const
{
// TODO: Is this thread-safe?
return gx_command_history[index];
@ -155,7 +155,7 @@ private:
std::vector<DebuggerObserver*> observers;
std::vector<GSP_GPU::GXCommand> gx_command_history;
std::vector<GSP_GPU::Command> gx_command_history;
// vector of pairs of command lists and their storage address
std::vector<std::pair<u32,PicaCommandList>> command_lists;