diff --git a/Source/Core/Core/CoreTiming.cpp b/Source/Core/Core/CoreTiming.cpp index f34bc5f9de..549c455a3f 100644 --- a/Source/Core/Core/CoreTiming.cpp +++ b/Source/Core/Core/CoreTiming.cpp @@ -32,16 +32,6 @@ namespace CoreTiming { -// Sort by time, unless the times are the same, in which case sort by the order added to the queue -static bool operator>(const Event& left, const Event& right) -{ - return std::tie(left.time, left.fifo_order) > std::tie(right.time, right.fifo_order); -} -static bool operator<(const Event& left, const Event& right) -{ - return std::tie(left.time, left.fifo_order) < std::tie(right.time, right.fifo_order); -} - static constexpr int MAX_SLICE_LENGTH = 20000; static void EmptyTimedCallback(Core::System& system, u64 userdata, s64 cyclesLate) @@ -205,7 +195,7 @@ void CoreTimingManager::DoState(PointerWrap& p) // When loading from a save state, we must assume the Event order is random and meaningless. // The exact layout of the heap in memory is implementation defined, therefore it is platform // and library version specific. - std::make_heap(m_event_queue.begin(), m_event_queue.end(), std::greater()); + std::ranges::make_heap(m_event_queue, std::ranges::greater{}); // The stave state has changed the time, so our previous Throttle targets are invalid. // Especially when global_time goes down; So we create a fake throttle update. @@ -263,7 +253,7 @@ void CoreTimingManager::ScheduleEvent(s64 cycles_into_future, EventType* event_t ForceExceptionCheck(cycles_into_future); m_event_queue.emplace_back(Event{timeout, m_event_fifo_id++, userdata, event_type}); - std::push_heap(m_event_queue.begin(), m_event_queue.end(), std::greater()); + std::ranges::push_heap(m_event_queue, std::ranges::greater{}); } else { @@ -288,7 +278,7 @@ void CoreTimingManager::RemoveEvent(EventType* event_type) // Removing random items breaks the invariant so we have to re-establish it. if (erased != 0) { - std::make_heap(m_event_queue.begin(), m_event_queue.end(), std::greater()); + std::ranges::make_heap(m_event_queue, std::ranges::greater{}); } } @@ -317,7 +307,7 @@ void CoreTimingManager::MoveEvents() { ev.fifo_order = m_event_fifo_id++; m_event_queue.emplace_back(std::move(ev)); - std::push_heap(m_event_queue.begin(), m_event_queue.end(), std::greater()); + std::ranges::push_heap(m_event_queue, std::ranges::greater{}); } } @@ -341,7 +331,7 @@ void CoreTimingManager::Advance() while (!m_event_queue.empty() && m_event_queue.front().time <= m_globals.global_timer) { Event evt = std::move(m_event_queue.front()); - std::pop_heap(m_event_queue.begin(), m_event_queue.end(), std::greater()); + std::ranges::pop_heap(m_event_queue, std::ranges::greater{}); m_event_queue.pop_back(); Throttle(evt.time); @@ -440,7 +430,7 @@ bool CoreTimingManager::UseSyncOnSkipIdle() const void CoreTimingManager::LogPendingEvents() const { auto clone = m_event_queue; - std::sort(clone.begin(), clone.end()); + std::ranges::sort(clone); for (const Event& ev : clone) { INFO_LOG_FMT(POWERPC, "PENDING: Now: {} Pending: {} Type: {}", m_globals.global_timer, ev.time, @@ -483,7 +473,7 @@ std::string CoreTimingManager::GetScheduledEventsSummary() const text.reserve(1000); auto clone = m_event_queue; - std::sort(clone.begin(), clone.end()); + std::ranges::sort(clone); for (const Event& ev : clone) { text += fmt::format("{} : {} {:016x}\n", *ev.type->name, ev.time, ev.userdata); diff --git a/Source/Core/Core/CoreTiming.h b/Source/Core/Core/CoreTiming.h index 6c60b74479..b76e898e70 100644 --- a/Source/Core/Core/CoreTiming.h +++ b/Source/Core/Core/CoreTiming.h @@ -16,8 +16,10 @@ // inside callback: // ScheduleEvent(periodInCycles - cyclesLate, callback, "whatever") +#include #include #include +#include #include #include @@ -58,6 +60,16 @@ struct Event u64 fifo_order; u64 userdata; EventType* type; + + // Sort by time, unless the times are the same, in which case sort by the order added to the queue + constexpr auto operator<=>(const Event& other) const + { + return std::tie(time, fifo_order) <=> std::tie(other.time, other.fifo_order); + } + constexpr bool operator==(const Event& other) const + { + return std::tie(time, fifo_order) == std::tie(other.time, other.fifo_order); + } }; enum class FromThread @@ -163,7 +175,7 @@ private: std::unordered_map m_event_types; // STATE_TO_SAVE - // The queue is a min-heap using std::make_heap/push_heap/pop_heap. + // The queue is a min-heap using std::ranges::make_heap/push_heap/pop_heap. // We don't use std::priority_queue because we need to be able to serialize, unserialize and // erase arbitrary events (RemoveEvent()) regardless of the queue order. These aren't accomodated // by the standard adaptor class. diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp index a82949bf6e..19cd2b0f32 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.cpp @@ -88,7 +88,7 @@ u32 Arm64RegCache::GetUnlockedRegisterCount() const void Arm64RegCache::LockRegister(ARM64Reg host_reg) { - auto reg = std::find(m_host_registers.begin(), m_host_registers.end(), host_reg); + auto reg = std::ranges::find(m_host_registers, host_reg, &HostReg::GetReg); ASSERT_MSG(DYNA_REC, reg != m_host_registers.end(), "Don't try locking a register that isn't in the cache. Reg {}", static_cast(host_reg)); @@ -97,7 +97,7 @@ void Arm64RegCache::LockRegister(ARM64Reg host_reg) void Arm64RegCache::UnlockRegister(ARM64Reg host_reg) { - auto reg = std::find(m_host_registers.begin(), m_host_registers.end(), host_reg); + auto reg = std::ranges::find(m_host_registers, host_reg, &HostReg::GetReg); ASSERT_MSG(DYNA_REC, reg != m_host_registers.end(), "Don't try unlocking a register that isn't in the cache. Reg {}", static_cast(host_reg)); diff --git a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h index 5182a8456d..019edaa726 100644 --- a/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h +++ b/Source/Core/Core/PowerPC/JitArm64/JitArm64_RegCache.h @@ -150,8 +150,6 @@ public: void Unlock() { m_locked = false; } Arm64Gen::ARM64Reg GetReg() const { return m_reg; } - bool operator==(Arm64Gen::ARM64Reg reg) const { return reg == m_reg; } - private: Arm64Gen::ARM64Reg m_reg = Arm64Gen::ARM64Reg::INVALID_REG; bool m_locked = false; diff --git a/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.cpp b/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.cpp index 76d1ad6af6..ef71ba6eb8 100644 --- a/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.cpp +++ b/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.cpp @@ -330,8 +330,3 @@ void GraphicsModConfig::DeserializeFromProfile(const picojson::object& obj) } } } - -bool GraphicsModConfig::operator<(const GraphicsModConfig& other) const -{ - return m_weight < other.m_weight; -} diff --git a/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.h b/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.h index fd35b963dd..f845c612ad 100644 --- a/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.h +++ b/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsMod.h @@ -43,6 +43,4 @@ struct GraphicsModConfig void SerializeToProfile(picojson::object* value) const; void DeserializeFromProfile(const picojson::object& value); - - bool operator<(const GraphicsModConfig& other) const; }; diff --git a/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsModGroup.cpp b/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsModGroup.cpp index 5fa2c011b6..3c234e9ec1 100644 --- a/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsModGroup.cpp +++ b/Source/Core/VideoCommon/GraphicsModSystem/Config/GraphicsModGroup.cpp @@ -111,7 +111,7 @@ void GraphicsModGroupConfig::Load() try_add_mod(graphics_mod_directory, GraphicsModConfig::Source::System); } - std::sort(m_graphics_mods.begin(), m_graphics_mods.end()); + std::ranges::sort(m_graphics_mods, {}, &GraphicsModConfig::m_weight); for (auto& mod : m_graphics_mods) { m_path_to_graphics_mod[mod.GetAbsolutePath()] = &mod;