diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index 3f40eeae52..a3bb41bf2f 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -1394,7 +1394,7 @@ void spu_thread::cpu_on_stop() { if (start_time) { - ppu_log.warning("'%s' aborted (%fs)", current_func, (get_guest_system_time() - start_time) / 1000000.); + ppu_log.warning("'%s' aborted (%fs)", current_func, (get_system_time() - start_time) / 1000000.); } else { @@ -1549,6 +1549,8 @@ void spu_thread::cpu_task() #ifdef __APPLE__ pthread_jit_write_protect_np(true); #endif + start_time = 0; + // Get next PC and SPU Interrupt status pc = status_npc.load().npc; @@ -5033,6 +5035,8 @@ bool spu_thread::set_ch_value(u32 ch, u32 value) spu_log.trace("sys_spu_thread_send_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); + spu_function_logger logger(*this, "sys_spu_thread_send_event"); + std::shared_ptr queue; { std::lock_guard lock(group->mutex); @@ -5083,6 +5087,8 @@ bool spu_thread::set_ch_value(u32 ch, u32 value) spu_log.trace("sys_spu_thread_throw_event(spup=%d, data0=0x%x, data1=0x%x)", spup, value & 0x00ffffff, data); + spu_function_logger logger(*this, "sys_spu_thread_throw_event"); + std::shared_ptr queue; { std::lock_guard lock{group->mutex}; @@ -5117,6 +5123,8 @@ bool spu_thread::set_ch_value(u32 ch, u32 value) spu_log.trace("sys_event_flag_set_bit(id=%d, value=0x%x (flag=%d))", data, value, flag); + spu_function_logger logger(*this, "sys_event_flag_set_bit"); + { std::lock_guard lock(group->mutex); @@ -5160,6 +5168,8 @@ bool spu_thread::set_ch_value(u32 ch, u32 value) spu_log.trace("sys_event_flag_set_bit_impatient(id=%d, value=0x%x (flag=%d))", data, value, flag); + spu_function_logger logger(*this, "sys_event_flag_set_bit_impatient"); + // Use the syscall to set flag if (sys_event_flag_set(*this, data, 1ull << flag) + 0u == CELL_EAGAIN) { @@ -5761,6 +5771,8 @@ bool spu_thread::stop_and_signal(u32 code) spu_log.trace("sys_spu_thread_group_exit(status=0x%x)", value); + spu_function_logger logger(*this, "sys_spu_thread_group_exit"); + while (true) { // Check group status (by actually checking thread status), wait if necessary @@ -5870,6 +5882,8 @@ bool spu_thread::stop_and_signal(u32 code) fmt::throw_exception("sys_spu_thread_exit(): Out_MBox is empty"); } + spu_function_logger logger(*this, "sys_spu_thread_exit"); + spu_log.trace("sys_spu_thread_exit(status=0x%x)", value); last_exit_status.release(value); set_status_npc(); @@ -6115,7 +6129,7 @@ void spu_thread::wakeup_delay(u32 div) const thread_ctrl::wait_for_accurate(utils::aligned_div(+g_cfg.core.spu_wakeup_delay, div)); } -spu_function_logger::spu_function_logger(spu_thread& spu, const char* func) +spu_function_logger::spu_function_logger(spu_thread& spu, const char* func) noexcept : spu(spu) { spu.current_func = func; diff --git a/rpcs3/Emu/Cell/SPUThread.h b/rpcs3/Emu/Cell/SPUThread.h index a2f24a9836..4d490cfb2d 100644 --- a/rpcs3/Emu/Cell/SPUThread.h +++ b/rpcs3/Emu/Cell/SPUThread.h @@ -901,9 +901,9 @@ class spu_function_logger spu_thread& spu; public: - spu_function_logger(spu_thread& spu, const char* func); + spu_function_logger(spu_thread& spu, const char* func) noexcept; - ~spu_function_logger() + ~spu_function_logger() noexcept { if (!spu.is_stopped()) { diff --git a/rpcs3/Emu/System.cpp b/rpcs3/Emu/System.cpp index 99a7a1f141..3a3fa5c08d 100644 --- a/rpcs3/Emu/System.cpp +++ b/rpcs3/Emu/System.cpp @@ -2937,9 +2937,16 @@ s32 error_code::error_report(s32 result, const logs::message* channel, const cha if (auto ppu = get_current_cpu_thread()) { - if (ppu->current_function) + if (auto current = ppu->current_function) { - func = ppu->current_function; + func = current; + } + } + else if (auto spu = get_current_cpu_thread()) + { + if (auto current = spu->current_func; current && spu->start_time) + { + func = current; } } diff --git a/rpcs3/rpcs3qt/kernel_explorer.cpp b/rpcs3/rpcs3qt/kernel_explorer.cpp index 2c2c7ef4ab..0122e85109 100644 --- a/rpcs3/rpcs3qt/kernel_explorer.cpp +++ b/rpcs3/rpcs3qt/kernel_explorer.cpp @@ -365,21 +365,21 @@ void kernel_explorer::update() { auto& mutex = static_cast(obj); const auto control = mutex.control.load(); - show_waiters(add_solid_node(node, qstr(fmt::format(u8"Mutex 0x%08x: “%s”, %s,%s Owner: %#x, Locks: %u, Key: %#llx, Conds: %u", id, lv2_obj::name64(mutex.name), mutex.protocol, + show_waiters(add_solid_node(node, qstr(fmt::format(u8"Mutex 0x%08x: “%s”", id, lv2_obj::name64(mutex.name))), qstr(fmt::format(u8"Mutex 0x%08x: “%s”, %s,%s Owner: %#x, Locks: %u, Key: %#llx, Conds: %u", id, lv2_obj::name64(mutex.name), mutex.protocol, mutex.recursive == SYS_SYNC_RECURSIVE ? " Recursive," : "", control.owner, +mutex.lock_count, mutex.key, mutex.cond_count))), control.sq); break; } case SYS_COND_OBJECT: { auto& cond = static_cast(obj); - show_waiters(add_solid_node(node, qstr(fmt::format(u8"Cond 0x%08x: “%s”, %s, Mutex: 0x%08x, Key: %#llx", id, lv2_obj::name64(cond.name), cond.mutex->protocol, cond.mtx_id, cond.key))), cond.sq); + show_waiters(add_solid_node(node, qstr(fmt::format(u8"Cond 0x%08x: “%s”", id, lv2_obj::name64(cond.name))), qstr(fmt::format(u8"Cond 0x%08x: “%s”, %s, Mutex: 0x%08x, Key: %#llx", id, lv2_obj::name64(cond.name), cond.mutex->protocol, cond.mtx_id, cond.key))), cond.sq); break; } case SYS_RWLOCK_OBJECT: { auto& rw = static_cast(obj); const s64 val = rw.owner; - auto tree = add_solid_node(node, qstr(fmt::format(u8"RW Lock 0x%08x: “%s”, %s, Owner: %#x(%d), Key: %#llx", id, lv2_obj::name64(rw.name), rw.protocol, + auto tree = add_solid_node(node, qstr(fmt::format(u8"RW Lock 0x%08x: “%s”", id, lv2_obj::name64(rw.name))), qstr(fmt::format(u8"RW Lock 0x%08x: “%s”, %s, Owner: %#x(%d), Key: %#llx", id, lv2_obj::name64(rw.name), rw.protocol, std::max(0, val >> 1), -std::min(0, val >> 1), rw.key))); if (auto rq = +rw.rq) @@ -417,7 +417,7 @@ void kernel_explorer::update() case SYS_EVENT_QUEUE_OBJECT: { auto& eq = static_cast(obj); - show_waiters(add_solid_node(node, qstr(fmt::format(u8"Event Queue 0x%08x: “%s”, %s, %s, Key: %#llx, Events: %zu/%d", id, lv2_obj::name64(eq.name), eq.protocol, + show_waiters(add_solid_node(node, qstr(fmt::format(u8"Event Queue 0x%08x: “%s”", id, lv2_obj::name64(eq.name))), qstr(fmt::format(u8"Event Queue 0x%08x: “%s”, %s, %s, Key: %#llx, Events: %zu/%d", id, lv2_obj::name64(eq.name), eq.protocol, eq.type == SYS_SPU_QUEUE ? "SPU" : "PPU", eq.key, eq.events.size(), eq.size))), eq.type == SYS_SPU_QUEUE ? static_cast(+eq.sq) : +eq.pq); break; } @@ -515,11 +515,11 @@ void kernel_explorer::update() } else { - show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s, Signal: %#x (unmapped/invalid control data at *0x%x)", id, lv2_obj::name64(lwm.name), lwm.protocol, +lv2_control.signaled, lwm.control))), lv2_control.sq); + show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”", id, lv2_obj::name64(lwm.name))), qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s, Signal: %#x (unmapped/invalid control data at *0x%x)", id, lv2_obj::name64(lwm.name), lwm.protocol, +lv2_control.signaled, lwm.control))), lv2_control.sq); break; } - show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s,%s Owner: %s, Locks: %u, Signal: %#x, Control: *0x%x", id, lv2_obj::name64(lwm.name), lwm.protocol, + show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”", id, lv2_obj::name64(lwm.name))), qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s,%s Owner: %s, Locks: %u, Signal: %#x, Control: *0x%x", id, lv2_obj::name64(lwm.name), lwm.protocol, (lwm_data.attribute & SYS_SYNC_RECURSIVE) ? " Recursive," : "", owner_str, lwm_data.recursive_count, +lv2_control.signaled, lwm.control))), lv2_control.sq); break; } @@ -538,20 +538,20 @@ void kernel_explorer::update() { auto& sema = static_cast(obj); const auto val = +sema.val; - show_waiters(add_solid_node(node, qstr(fmt::format(u8"Sema 0x%08x: “%s”, %s, Count: %d/%d, Key: %#llx", id, lv2_obj::name64(sema.name), sema.protocol, + show_waiters(add_solid_node(node, qstr(fmt::format(u8"Sema 0x%08x: “%s”", id, lv2_obj::name64(sema.name))), qstr(fmt::format(u8"Sema 0x%08x: “%s”, %s, Count: %d/%d, Key: %#llx", id, lv2_obj::name64(sema.name), sema.protocol, std::max(val, 0), sema.max, sema.key, -std::min(val, 0)))), sema.sq); break; } case SYS_LWCOND_OBJECT: { auto& lwc = static_cast(obj); - show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWCond 0x%08x: “%s”, %s, OG LWMutex: 0x%08x, Control: *0x%x", id, lv2_obj::name64(lwc.name), lwc.protocol, lwc.lwid, lwc.control))), lwc.sq); + show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWCond 0x%08x: “%s”", id, lv2_obj::name64(lwc.name))), qstr(fmt::format(u8"LWCond 0x%08x: “%s”, %s, OG LWMutex: 0x%08x, Control: *0x%x", id, lv2_obj::name64(lwc.name), lwc.protocol, lwc.lwid, lwc.control))), lwc.sq); break; } case SYS_EVENT_FLAG_OBJECT: { auto& ef = static_cast(obj); - show_waiters(add_solid_node(node, qstr(fmt::format(u8"Event Flag 0x%08x: “%s”, %s, Type: 0x%x, Key: %#llx, Pattern: 0x%llx", id, lv2_obj::name64(ef.name), ef.protocol, + show_waiters(add_solid_node(node, qstr(fmt::format(u8"Event Flag 0x%08x: “%s”", id, lv2_obj::name64(ef.name))), qstr(fmt::format(u8"Event Flag 0x%08x: “%s”, %s, Type: 0x%x, Key: %#llx, Pattern: 0x%llx", id, lv2_obj::name64(ef.name), ef.protocol, ef.type, ef.key, ef.pattern.load()))), ef.sq); break; } @@ -602,7 +602,19 @@ void kernel_explorer::update() std::optional> lock_idm_lv2(std::in_place, id_manager::g_mutex, lv2_obj::g_mutex); // Postponed as much as possible for time accuracy - const u64 current_time = get_guest_system_time(); + u64 current_time_storage = get_guest_system_time(); + + auto get_current_time = [¤t_time_storage]() + { + if (!current_time_storage) + { + // Evaluate on the first use for better consistency (this function can be quite slow) + // Yet once it is evaluated, keep it on the same value for consistency. + current_time_storage = get_guest_system_time(); + } + + return current_time_storage; + }; auto get_wait_time_str = [&](u64 start_time) -> std::string { @@ -611,7 +623,12 @@ void kernel_explorer::update() return {}; } - const f64 wait_time = (current_time - start_time) / 1000000.; + if (start_time > get_current_time() && start_time - get_current_time() > 1'000'000) + { + return " (time underflow)"; + } + + const f64 wait_time = (get_current_time() >= start_time ? get_current_time() - start_time : 0) / 1000000.; return fmt::format(" (%.1fs)", wait_time); }; @@ -628,7 +645,10 @@ void kernel_explorer::update() idm::select>([&](u32 /*id*/, spu_thread& spu) { - QTreeWidgetItem* spu_thread_tree = add_solid_node(find_node(root, additional_nodes::spu_threads), qstr(fmt::format(u8"SPU 0x%07x: “%s”, State: %s, Type: %s%s", spu.lv2_id, *spu.spu_tname.load(), spu.state.load(), spu.get_type(), get_wait_time_str(spu.start_time)))); + const auto func = spu.current_func; + const u64 start_time = spu.start_time; + + QTreeWidgetItem* spu_thread_tree = add_solid_node(find_node(root, additional_nodes::spu_threads), qstr(fmt::format(u8"SPU 0x%07x: “%s”", spu.lv2_id, *spu.spu_tname.load())), qstr(fmt::format(u8"SPU 0x%07x: “%s”, State: %s, Type: %s, Func: \"%s\"%s", spu.lv2_id, *spu.spu_tname.load(), spu.state.load(), spu.get_type(), start_time && func ? func : "", start_time ? get_wait_time_str(get_guest_system_time(start_time)) : ""))); if (spu.get_type() == spu_type::threaded) { @@ -684,7 +704,7 @@ void kernel_explorer::update() idm::select([&](u32 id, lv2_spu_group& tg) { - QTreeWidgetItem* spu_tree = add_solid_node(find_node(root, additional_nodes::spu_thread_groups), qstr(fmt::format(u8"SPU Group 0x%07x: “%s”, Status = %s, Priority = %d, Type = 0x%x", id, tg.name, tg.run_state.load(), +tg.prio, tg.type))); + QTreeWidgetItem* spu_tree = add_solid_node(find_node(root, additional_nodes::spu_thread_groups), qstr(fmt::format(u8"SPU Group 0x%07x: “%s”, Type = 0x%x", id, tg.name, tg.type)), qstr(fmt::format(u8"SPU Group 0x%07x: “%s”, Status = %s, Priority = %d, Type = 0x%x", id, tg.name, tg.run_state.load(), +tg.prio, tg.type))); if (tg.name.ends_with("CellSpursKernelGroup"sv)) {