RawSPU: Reinvoke pending interrupts if missed

This commit is contained in:
Eladash 2022-05-24 11:20:30 +03:00 committed by Ivan
parent 9c824aa0b5
commit 961d41d0bd
5 changed files with 130 additions and 30 deletions

View File

@ -1355,6 +1355,8 @@ ppu_thread::~ppu_thread()
perf_log.notice("Perf stats for instructions: total %u", exec_bytes / 4);
}
void ppu_interrupt_thread_entry(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u32 prio, int detached)
: cpu_thread(idm::last_id())
, prio(prio)
@ -1375,6 +1377,14 @@ ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u3
gpr[3] = param.arg0;
gpr[4] = param.arg1;
}
else
{
cmd_list
({
{ ppu_cmd::ptr_call, 0 },
std::bit_cast<u64>(&ppu_interrupt_thread_entry)
});
}
// Trigger the scheduler
state += cpu_flag::suspend;

View File

@ -985,7 +985,7 @@ void spu_int_ctrl_t::set(u64 ints)
if (auto handler = tag->handler; lv2_obj::check(handler))
{
rlock.unlock();
handler->exec();
thread_ctrl::notify(*handler->thread);
}
}
}

View File

@ -171,6 +171,9 @@ struct spu_channel
// Low 32 bits contain value
atomic_t<u64> data;
// Pending value to be inserted when it is possible at pop()
atomic_t<u32> jostling_value;
public:
static constexpr u32 off_wait = 32;
static constexpr u32 off_count = 63;
@ -258,12 +261,25 @@ public:
}
// Pop unconditionally (loading last value), may require notification
// If the SPU tries to insert a value, do it instead the SPU
u32 pop()
{
// Value is not cleared and may be read again
const u64 old = data.fetch_and(~(bit_count | bit_wait));
constexpr u64 mask = bit_count | bit_wait;
if (old & bit_wait)
const u64 old = data.fetch_op([&](u64& data)
{
if ((data & mask) == mask)
{
// Insert the pending value, leave no time in which the channel has no data
data = bit_count | jostling_value;
return;
}
data &= ~mask;
});
if ((old & mask) == mask)
{
data.notify_one();
}
@ -300,6 +316,11 @@ public:
if (spu.is_stopped())
{
if (u64 old2 = data.exchange(0); old2 & bit_count)
{
return static_cast<u32>(old2);
}
return -1;
}
@ -309,14 +330,13 @@ public:
// Waiting for channel push state availability, actually pushing if specified
bool push_wait(cpu_thread& spu, u32 value, bool push = true)
{
while (true)
{
u64 state;
data.fetch_op([&](u64& data)
{
if (data & bit_count) [[unlikely]]
{
jostling_value.release(push ? value : static_cast<u32>(data));
data |= bit_wait;
}
else if (push)
@ -333,17 +353,26 @@ public:
return true;
});
while (true)
{
if (!(state & bit_wait))
{
if (!push)
{
data &= ~bit_count;
}
return true;
}
if (spu.is_stopped())
{
data &= ~bit_wait;
return false;
}
thread_ctrl::wait_on(data, state);
state = data;
}
}

View File

@ -5,6 +5,7 @@
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/PPUOpcodes.h"
LOG_CHANNEL(sys_interrupt);
@ -24,6 +25,8 @@ lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thre
exists.release(1);
}
void ppu_interrupt_thread_entry(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
void lv2_int_serv::exec() const
{
thread->cmd_list
@ -31,14 +34,16 @@ void lv2_int_serv::exec() const
{ ppu_cmd::reset_stack, 0 },
{ ppu_cmd::set_args, 2 }, arg1, arg2,
{ ppu_cmd::opd_call, 0 }, thread->entry_func,
{ ppu_cmd::sleep, 0 }
{ ppu_cmd::sleep, 0 },
{ ppu_cmd::ptr_call, 0 },
std::bit_cast<u64>(&ppu_interrupt_thread_entry)
});
thread->cmd_notify++;
thread->cmd_notify.notify_one();
}
bool ppu_thread_exit(ppu_thread& ppu);
void ppu_thread_exit(ppu_thread&, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*);
void lv2_int_serv::join() const
{
@ -185,4 +190,61 @@ void sys_interrupt_thread_eoi(ppu_thread& ppu)
sys_interrupt.trace("sys_interrupt_thread_eoi()");
ppu.state += cpu_flag::ret;
lv2_obj::sleep(ppu);
}
void ppu_interrupt_thread_entry(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*)
{
while (true)
{
std::shared_ptr<lv2_int_serv> serv = nullptr;
// Loop endlessly trying to invoke an interrupt if required
idm::select<named_thread<spu_thread>>([&](u32, spu_thread& spu)
{
if (spu.get_type() != spu_type::threaded)
{
auto& ctrl = spu.int_ctrl[2];
if (lv2_obj::check(ctrl.tag))
{
auto& handler = ctrl.tag->handler;
if (lv2_obj::check(handler))
{
if (handler->thread.get() == &ppu)
{
if (spu.ch_out_intr_mbox.get_count() && ctrl.mask & SPU_INT2_STAT_MAILBOX_INT)
{
ctrl.stat |= SPU_INT2_STAT_MAILBOX_INT;
}
if (ctrl.mask & ctrl.stat)
{
ensure(!serv);
serv = handler;
}
}
}
}
}
});
if (serv)
{
// Queue interrupt, after the interrupt has finished the PPU returns to this loop
serv->exec();
return;
}
const auto state = +ppu.state;
if (::is_stopped(state))
{
return;
}
thread_ctrl::wait_on(ppu.state, state);
}
}

View File

@ -8,6 +8,7 @@
#include "Emu/Cell/ErrorCodes.h"
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/PPUCallback.h"
#include "Emu/Cell/PPUOpcodes.h"
#include "Emu/Memory/vm_locking.h"
#include "sys_event.h"
#include "sys_process.h"
@ -35,7 +36,7 @@ struct ppu_thread_cleaner
ppu_thread_cleaner& operator=(const ppu_thread_cleaner&) = delete;
};
bool ppu_thread_exit(ppu_thread& ppu)
void ppu_thread_exit(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struct ppu_intrp_func*)
{
ppu.state += cpu_flag::exit + cpu_flag::wait;
@ -53,8 +54,6 @@ bool ppu_thread_exit(ppu_thread& ppu)
ppu.call_history.index = 0;
ppu_log.notice("Calling history: %s", str);
}
return false;
}
void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
@ -117,7 +116,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
thread_ctrl::wait_on(ppu.joiner, ppu_join_status::zombie);
}
ppu_thread_exit(ppu);
ppu_thread_exit(ppu, {}, nullptr, nullptr);
}
s32 sys_ppu_thread_yield(ppu_thread& ppu)