From 4fd21f7764edb8de895c6785eb8fc2e1b0b79e28 Mon Sep 17 00:00:00 2001 From: Megamouse Date: Thu, 18 May 2023 00:10:49 +0200 Subject: [PATCH] Windows Use vector for performance counter data allocation --- rpcs3/util/cpu_stats.cpp | 48 +++++++++++++++++++++++++--------------- rpcs3/util/cpu_stats.hpp | 1 + 2 files changed, 31 insertions(+), 18 deletions(-) diff --git a/rpcs3/util/cpu_stats.cpp b/rpcs3/util/cpu_stats.cpp index 0f4ded60b1..7405382534 100644 --- a/rpcs3/util/cpu_stats.cpp +++ b/rpcs3/util/cpu_stats.cpp @@ -2,7 +2,9 @@ #include "util/cpu_stats.hpp" #include "util/sysinfo.hpp" #include "util/logs.hpp" +#include "util/asm.hpp" #include "Utilities/StrUtil.h" + #include #ifdef _WIN32 @@ -80,6 +82,20 @@ namespace utils #endif } + cpu_stats::~cpu_stats() + { +#ifdef _WIN32 + if (m_cpu_query) + { + PDH_STATUS status = PdhCloseQuery(m_cpu_query); + if (ERROR_SUCCESS != status) + { + perf_log.error("Failed to close cpu query of per core cpu usage: %s", pdh_error(status)); + } + } +#endif + } + void cpu_stats::init_cpu_query() { #ifdef _WIN32 @@ -139,37 +155,35 @@ namespace utils return; } - PDH_FMT_COUNTERVALUE counterVal{}; - DWORD dwBufferSize = 0; // Size of the pItems buffer - DWORD dwItemCount = 0; // Number of items in the pItems buffer - PDH_FMT_COUNTERVALUE_ITEM *pItems = NULL; // Array of PDH_FMT_COUNTERVALUE_ITEM structures + DWORD dwBufferSize = 0; // Size of the items buffer + DWORD dwItemCount = 0; // Number of items in the items buffer - status = PdhGetFormattedCounterArray(m_cpu_cores, PDH_FMT_DOUBLE, &dwBufferSize, &dwItemCount, pItems); + status = PdhGetFormattedCounterArray(m_cpu_cores, PDH_FMT_DOUBLE, &dwBufferSize, &dwItemCount, nullptr); if (PDH_MORE_DATA == status) { - pItems = (PDH_FMT_COUNTERVALUE_ITEM*)malloc(dwBufferSize); - if (pItems) + std::vector items(utils::aligned_div(dwBufferSize, sizeof(PDH_FMT_COUNTERVALUE_ITEM))); + if (items.size() >= dwItemCount) { - status = PdhGetFormattedCounterArray(m_cpu_cores, PDH_FMT_DOUBLE, &dwBufferSize, &dwItemCount, pItems); + status = PdhGetFormattedCounterArray(m_cpu_cores, PDH_FMT_DOUBLE, &dwBufferSize, &dwItemCount, items.data()); if (ERROR_SUCCESS == status) { - ensure(dwItemCount > 0); - ensure((dwItemCount - 1) == per_core_usage.size()); // Remove one for _Total + ensure(dwItemCount == per_core_usage.size() + 1); // Plus one for _Total // Loop through the array and get the instance name and percentage. - for (DWORD i = 0; i < dwItemCount; i++) + for (usz i = 0; i < dwItemCount; i++) { - const std::string token = wchar_to_utf8(pItems[i].szName); + const PDH_FMT_COUNTERVALUE_ITEM& item = items[i]; + const std::string token = wchar_to_utf8(item.szName); if (const std::string lower = fmt::to_lower(token); lower.find("total") != umax) { - total_usage = pItems[i].FmtValue.doubleValue; + total_usage = item.FmtValue.doubleValue; continue; } if (const auto [success, cpu_index] = string_to_number(token); success && cpu_index < dwItemCount) { - per_core_usage[cpu_index] = pItems[i].FmtValue.doubleValue; + per_core_usage[cpu_index] = item.FmtValue.doubleValue; } else if (!success) { @@ -188,10 +202,9 @@ namespace utils } else { - perf_log.error("Failed to allocate buffer for per core cpu usage."); + perf_log.error("Failed to allocate buffer for per core cpu usage. (size=%d, dwItemCount=%d)", items.size(), dwItemCount); } } - if (pItems) free(pItems); #elif __linux__ @@ -362,8 +375,7 @@ namespace utils entry.dwSize = sizeof(entry); // get the first process info. - BOOL ret = true; - ret = Process32First(snapshot, &entry); + BOOL ret = Process32First(snapshot, &entry); while (ret && entry.th32ProcessID != id) { ret = Process32Next(snapshot, &entry); diff --git a/rpcs3/util/cpu_stats.hpp b/rpcs3/util/cpu_stats.hpp index bc8184a2f1..a873bdb420 100644 --- a/rpcs3/util/cpu_stats.hpp +++ b/rpcs3/util/cpu_stats.hpp @@ -28,6 +28,7 @@ namespace utils public: cpu_stats(); + ~cpu_stats(); double get_usage();