GPU: Further improve vsync handling

This commit is contained in:
Stenzek 2024-05-24 01:59:35 +10:00
parent 4e922a34a7
commit 42a5fe0a6e
No known key found for this signature in database
20 changed files with 184 additions and 163 deletions

View File

@ -288,8 +288,7 @@ bool Host::CreateGPUDevice(RenderAPI api, Error* error)
if (!g_gpu_device || !g_gpu_device->Create(
g_settings.gpu_adapter,
g_settings.gpu_disable_shader_cache ? std::string_view() : std::string_view(EmuFolders::Cache),
SHADER_CACHE_VERSION, g_settings.gpu_use_debug_device, System::IsHostVSyncEffectivelyEnabled(),
System::IsHostVSyncEffectivelyEnabled() && !System::IsHostVSyncUsedForTiming(),
SHADER_CACHE_VERSION, g_settings.gpu_use_debug_device, System::GetEffectiveVSyncMode(),
g_settings.gpu_threaded_presentation, exclusive_fullscreen_control,
static_cast<GPUDevice::FeatureMask>(disabled_features), &create_error))
{

View File

@ -122,6 +122,8 @@ static void Throttle(Common::Timer::Value current_time);
static void UpdatePerformanceCounters();
static void AccumulatePreFrameSleepTime();
static void UpdatePreFrameSleepTime();
static void UpdateDisplayVSync();
static void UpdateDisplayMaxFPS();
static void SetRewinding(bool enabled);
static bool SaveRewindState();
@ -184,7 +186,6 @@ static bool s_turbo_enabled = false;
static bool s_throttler_enabled = false;
static bool s_optimal_frame_pacing = false;
static bool s_pre_frame_sleep = false;
static bool s_syncing_to_host = false;
static bool s_syncing_to_host_with_vsync = false;
static bool s_skip_presenting_duplicate_frames = false;
static u32 s_skipped_frame_count = 0;
@ -1263,6 +1264,7 @@ void System::PauseSystem(bool paused)
Host::OnSystemPaused();
Host::OnIdleStateChanged();
UpdateDisplayMaxFPS();
InvalidateDisplay();
}
else
@ -1277,6 +1279,7 @@ void System::PauseSystem(bool paused)
Host::OnSystemResumed();
Host::OnIdleStateChanged();
UpdateDisplayMaxFPS();
ResetPerformanceCounters();
ResetThrottler();
}
@ -1819,7 +1822,8 @@ void System::DestroySystem()
// Restore present-all-frames behavior.
if (s_keep_gpu_device_on_shutdown && g_gpu_device)
{
g_gpu_device->SetDisplayMaxFPS(0.0f);
UpdateDisplayVSync();
UpdateDisplayMaxFPS();
}
else
{
@ -2685,7 +2689,7 @@ float System::GetTargetSpeed()
float System::GetAudioNominalRate()
{
return s_throttler_enabled ? s_target_speed : 1.0f;
return (s_throttler_enabled || s_syncing_to_host_with_vsync) ? s_target_speed : 1.0f;
}
void System::UpdatePerformanceCounters()
@ -2845,32 +2849,31 @@ void System::UpdateSpeedLimiterState()
g_gpu_device->GetWindowInfo().IsSurfaceless(); // surfaceless check for regtest
s_skip_presenting_duplicate_frames = s_throttler_enabled && g_settings.display_skip_presenting_duplicate_frames;
s_pre_frame_sleep = s_optimal_frame_pacing && g_settings.display_pre_frame_sleep;
s_syncing_to_host = false;
s_syncing_to_host_with_vsync = false;
if (g_settings.sync_to_host_refresh_rate &&
(g_settings.audio_stream_parameters.stretch_mode != AudioStretchMode::Off) && s_target_speed == 1.0f && IsValid())
if (const float host_refresh_rate = g_gpu_device->GetWindowInfo().surface_refresh_rate; host_refresh_rate > 0.0f)
{
if (std::optional<float> host_refresh_rate = g_gpu_device->GetHostRefreshRate(); host_refresh_rate.has_value())
const float ratio = host_refresh_rate / System::GetThrottleFrequency();
const bool can_sync_to_host = (ratio >= 0.95f && ratio <= 1.05f);
INFO_LOG("Refresh rate: Host={}hz Guest={}hz Ratio={} - {}", host_refresh_rate, System::GetThrottleFrequency(),
ratio, can_sync_to_host ? "can sync" : "can't sync");
if (can_sync_to_host && g_settings.sync_to_host_refresh_rate && s_target_speed == 1.0f)
{
const float ratio = host_refresh_rate.value() / System::GetThrottleFrequency();
s_syncing_to_host = (ratio >= 0.95f && ratio <= 1.05f);
INFO_LOG("Refresh rate: Host={}hz Guest={}hz Ratio={} - {}", host_refresh_rate.value(),
System::GetThrottleFrequency(), ratio, s_syncing_to_host ? "can sync" : "can't sync");
if (s_syncing_to_host)
s_target_speed *= ratio;
}
}
s_target_speed = ratio;
// When syncing to host and using vsync, we don't need to sleep.
s_syncing_to_host_with_vsync = (s_syncing_to_host && IsHostVSyncEffectivelyEnabled());
s_syncing_to_host_with_vsync = g_settings.display_vsync;
if (s_syncing_to_host_with_vsync)
{
INFO_LOG("Using host vsync for throttling.");
s_throttler_enabled = false;
}
}
}
VERBOSE_LOG("Target speed: {}%", s_target_speed * 100.0f);
VERBOSE_LOG("Preset timing: {}", s_optimal_frame_pacing ? "consistent" : "immediate");
// Update audio output.
AudioStream* stream = SPU::GetOutputStream();
@ -2879,35 +2882,60 @@ void System::UpdateSpeedLimiterState()
UpdateThrottlePeriod();
ResetThrottler();
UpdateDisplaySync();
UpdateDisplayVSync();
UpdateDisplayMaxFPS();
if (g_settings.increase_timer_resolution)
SetTimerResolutionIncreased(s_throttler_enabled);
}
void System::UpdateDisplaySync()
void System::UpdateDisplayVSync()
{
const bool vsync_enabled = IsHostVSyncEffectivelyEnabled();
const float max_display_fps = (s_throttler_enabled || s_syncing_to_host) ? 0.0f : g_settings.display_max_fps;
VERBOSE_LOG("VSync: {}{}", vsync_enabled ? "Enabled" : "Disabled",
static constexpr std::array<const char*, static_cast<size_t>(GPUVSyncMode::Count)> vsync_modes = {{
"Disabled",
"DoubleBuffered",
"TripleBuffered",
}};
// Avoid flipping vsync on and off by manually throttling when vsync is on.
const GPUVSyncMode vsync_mode = GetEffectiveVSyncMode();
VERBOSE_LOG("VSync: {}{}", vsync_modes[static_cast<size_t>(vsync_mode)],
s_syncing_to_host_with_vsync ? " (for throttling)" : "");
g_gpu_device->SetVSyncMode(vsync_mode);
}
void System::UpdateDisplayMaxFPS()
{
const GPUVSyncMode vsync_mode = GetEffectiveVSyncMode();
const float max_display_fps =
(!IsPaused() && IsValid() &&
(s_target_speed == 0.0f ||
(vsync_mode != GPUVSyncMode::Disabled && s_target_speed != 1.0f && !s_syncing_to_host_with_vsync))) ?
g_gpu_device->GetWindowInfo().surface_refresh_rate :
0.0f;
VERBOSE_LOG("Max display fps: {}", max_display_fps);
VERBOSE_LOG("Preset timing: {}", s_optimal_frame_pacing ? "consistent" : "immediate");
g_gpu_device->SetDisplayMaxFPS(max_display_fps);
g_gpu_device->SetVSyncEnabled(vsync_enabled, vsync_enabled && !IsHostVSyncUsedForTiming());
}
bool System::IsHostVSyncEffectivelyEnabled()
GPUVSyncMode System::GetEffectiveVSyncMode()
{
// Disable vsync if running outside 100%.
return (g_settings.display_vsync && (s_state != State::Shutdown && s_state != State::Stopping) &&
!IsRunningAtNonStandardSpeed());
}
// Vsync off => always disabled.
if (!g_settings.display_vsync)
return GPUVSyncMode::Disabled;
bool System::IsHostVSyncUsedForTiming()
{
return (IsHostVSyncEffectivelyEnabled() && s_syncing_to_host_with_vsync);
// If there's no VM, or we're using vsync for timing, then we always use double-buffered (blocking).
if (s_state == State::Shutdown || s_state == State::Stopping || s_syncing_to_host_with_vsync)
return GPUVSyncMode::DoubleBuffered;
// For PAL games, we always want to triple buffer, because otherwise we'll be tearing.
// Or for when we aren't using sync-to-host-refresh, to avoid dropping frames.
// Force vsync off when not running at 100% speed.
// We can avoid this by manually throttling when vsync is on (see above).
return (s_throttler_enabled && g_gpu_device->GetWindowInfo().surface_refresh_rate == 0.0f) ?
GPUVSyncMode::Disabled :
GPUVSyncMode::TripleBuffered;
}
bool System::IsFastForwardEnabled()
@ -3978,7 +4006,6 @@ void System::CheckForSettingsChanges(const Settings& old_settings)
g_settings.increase_timer_resolution != old_settings.increase_timer_resolution ||
g_settings.emulation_speed != old_settings.emulation_speed ||
g_settings.fast_forward_speed != old_settings.fast_forward_speed ||
g_settings.display_max_fps != old_settings.display_max_fps ||
g_settings.display_optimal_frame_pacing != old_settings.display_optimal_frame_pacing ||
g_settings.display_skip_presenting_duplicate_frames != old_settings.display_skip_presenting_duplicate_frames ||
g_settings.display_pre_frame_sleep != old_settings.display_pre_frame_sleep ||
@ -3999,6 +4026,17 @@ void System::CheckForSettingsChanges(const Settings& old_settings)
PostProcessing::UpdateSettings();
}
else
{
if (g_gpu_device)
{
if (g_settings.display_vsync != old_settings.display_vsync)
{
UpdateDisplayVSync();
UpdateDisplayMaxFPS();
}
}
}
if (g_gpu_device)
{

View File

@ -19,6 +19,8 @@ class Error;
class SmallStringBase;
class StateWrapper;
enum class GPUVSyncMode : u8;
class Controller;
struct CheatCode;
@ -295,9 +297,6 @@ void UpdateThrottlePeriod();
void ResetThrottler();
void ResetPerformanceCounters();
/// Resets vsync/max present fps state.
void UpdateDisplaySync();
// Access controllers for simulating input.
Controller* GetController(u32 slot);
void UpdateControllers();
@ -466,10 +465,7 @@ void ToggleWidescreen();
bool IsRunningAtNonStandardSpeed();
/// Returns true if vsync should be used.
bool IsHostVSyncEffectivelyEnabled();
/// Returns true if vsync is being used for frame pacing.
bool IsHostVSyncUsedForTiming();
GPUVSyncMode GetEffectiveVSyncMode();
/// Quick switch between software and hardware rendering.
void ToggleSoftwareRendering();

View File

@ -1754,7 +1754,7 @@ void EmuThread::run()
if (g_gpu_device)
{
System::PresentDisplay(false, false);
if (!g_gpu_device->IsVSyncEnabled())
if (!g_gpu_device->IsVSyncModeBlocking())
g_gpu_device->ThrottlePresentation();
}
}

View File

@ -7,6 +7,7 @@
#include "core/system.h"
#include "common/byte_stream.h"
#include "common/log.h"
#include <QtCore/QCoreApplication>
#include <QtCore/QMetaObject>
@ -38,6 +39,8 @@
#include "common/windows_headers.h"
#endif
Log_SetChannel(QtUtils);
namespace QtUtils {
QFrame* CreateHorizontalLine(QWidget* parent)
@ -365,6 +368,11 @@ std::optional<WindowInfo> GetWindowInfoForWidget(QWidget* widget)
wi.surface_width = static_cast<u32>(static_cast<qreal>(widget->width()) * dpr);
wi.surface_height = static_cast<u32>(static_cast<qreal>(widget->height()) * dpr);
wi.surface_scale = static_cast<float>(dpr);
// Query refresh rate, we need it for sync.
wi.surface_refresh_rate = WindowInfo::QueryRefreshRateForWindow(wi).value_or(0.0f);
INFO_LOG("Surface refresh rate: {} hz", wi.surface_refresh_rate);
return wi;
}

View File

@ -201,7 +201,7 @@ u32 D3D11Device::GetSwapChainBufferCount() const
{
// With vsync off, we only need two buffers. Same for blocking vsync.
// With triple buffering, we need three.
return (m_vsync_enabled && m_vsync_prefer_triple_buffer) ? 3 : 2;
return (m_vsync_mode == GPUVSyncMode::TripleBuffered) ? 3 : 2;
}
bool D3D11Device::CreateSwapChain()
@ -357,10 +357,6 @@ bool D3D11Device::CreateSwapChainRTV()
m_window_info.surface_refresh_rate = static_cast<float>(desc.BufferDesc.RefreshRate.Numerator) /
static_cast<float>(desc.BufferDesc.RefreshRate.Denominator);
}
else
{
m_window_info.surface_refresh_rate = 0.0f;
}
}
return true;
@ -592,31 +588,13 @@ void D3D11Device::InvalidateRenderTarget(GPUTexture* t)
static_cast<D3D11Texture*>(t)->CommitClear(m_context.Get());
}
std::optional<float> D3D11Device::GetHostRefreshRate()
void D3D11Device::SetVSyncMode(GPUVSyncMode mode)
{
if (m_swap_chain && m_is_exclusive_fullscreen)
{
DXGI_SWAP_CHAIN_DESC desc;
if (SUCCEEDED(m_swap_chain->GetDesc(&desc)) && desc.BufferDesc.RefreshRate.Numerator > 0 &&
desc.BufferDesc.RefreshRate.Denominator > 0)
{
DEV_LOG("using fs rr: {} {}", desc.BufferDesc.RefreshRate.Numerator, desc.BufferDesc.RefreshRate.Denominator);
return static_cast<float>(desc.BufferDesc.RefreshRate.Numerator) /
static_cast<float>(desc.BufferDesc.RefreshRate.Denominator);
}
}
return GPUDevice::GetHostRefreshRate();
}
void D3D11Device::SetVSyncEnabled(bool enabled, bool prefer_triple_buffer)
{
if (m_vsync_enabled == enabled && m_vsync_prefer_triple_buffer == prefer_triple_buffer)
if (m_vsync_mode == mode)
return;
const u32 old_buffer_count = GetSwapChainBufferCount();
m_vsync_enabled = enabled;
m_vsync_prefer_triple_buffer = prefer_triple_buffer;
m_vsync_mode = mode;
if (!m_swap_chain)
return;
@ -656,7 +634,7 @@ bool D3D11Device::BeginPresent(bool skip_present)
// This blows our our GPU usage number considerably, so read the timestamp before the final blit
// in this configuration. It does reduce accuracy a little, but better than seeing 100% all of
// the time, when it's more like a couple of percent.
if (m_vsync_enabled && m_gpu_timing_enabled)
if (IsVSyncModeBlocking() && m_gpu_timing_enabled)
PopTimestampQuery();
static constexpr float clear_color[4] = {0.0f, 0.0f, 0.0f, 1.0f};
@ -674,12 +652,12 @@ void D3D11Device::EndPresent(bool explicit_present)
DebugAssert(!explicit_present);
DebugAssert(m_num_current_render_targets == 0 && !m_current_depth_target);
if (!m_vsync_enabled && m_gpu_timing_enabled)
if (!IsVSyncModeBlocking() && m_gpu_timing_enabled)
PopTimestampQuery();
// DirectX has no concept of tear-or-sync. I guess if we measured times ourselves, we could implement it.
if (m_vsync_enabled)
m_swap_chain->Present(BoolToUInt32(1), 0);
if (IsVSyncModeBlocking())
m_swap_chain->Present(1, 0);
else if (m_using_allow_tearing) // Disabled or VRR, VRR requires the allow tearing flag :/
m_swap_chain->Present(0, DXGI_PRESENT_ALLOW_TEARING);
else

View File

@ -96,8 +96,7 @@ public:
void DrawIndexed(u32 index_count, u32 base_index, u32 base_vertex) override;
void DrawIndexedWithBarrier(u32 index_count, u32 base_index, u32 base_vertex, DrawBarrier type) override;
std::optional<float> GetHostRefreshRate() override;
void SetVSyncEnabled(bool enabled, bool prefer_triple_buffer) override;
void SetVSyncMode(GPUVSyncMode mode) override;
bool SetGPUTimingEnabled(bool enabled) override;
float GetAndResetAccumulatedGPUTime() override;

View File

@ -801,7 +801,7 @@ u32 D3D12Device::GetSwapChainBufferCount() const
{
// With vsync off, we only need two buffers. Same for blocking vsync.
// With triple buffering, we need three.
return (m_vsync_enabled && m_vsync_prefer_triple_buffer) ? 3 : 2;
return (m_vsync_mode == GPUVSyncMode::TripleBuffered) ? 3 : 2;
}
bool D3D12Device::CreateSwapChain()
@ -942,10 +942,6 @@ bool D3D12Device::CreateSwapChainRTV()
m_window_info.surface_refresh_rate = static_cast<float>(desc.BufferDesc.RefreshRate.Numerator) /
static_cast<float>(desc.BufferDesc.RefreshRate.Denominator);
}
else
{
m_window_info.surface_refresh_rate = 0.0f;
}
}
m_current_swap_chain_buffer = 0;
@ -1088,31 +1084,13 @@ std::string D3D12Device::GetDriverInfo() const
return ret;
}
std::optional<float> D3D12Device::GetHostRefreshRate()
void D3D12Device::SetVSyncMode(GPUVSyncMode mode)
{
if (m_swap_chain && m_is_exclusive_fullscreen)
{
DXGI_SWAP_CHAIN_DESC desc;
if (SUCCEEDED(m_swap_chain->GetDesc(&desc)) && desc.BufferDesc.RefreshRate.Numerator > 0 &&
desc.BufferDesc.RefreshRate.Denominator > 0)
{
DEV_LOG("using fs rr: {} {}", desc.BufferDesc.RefreshRate.Numerator, desc.BufferDesc.RefreshRate.Denominator);
return static_cast<float>(desc.BufferDesc.RefreshRate.Numerator) /
static_cast<float>(desc.BufferDesc.RefreshRate.Denominator);
}
}
return GPUDevice::GetHostRefreshRate();
}
void D3D12Device::SetVSyncEnabled(bool enabled, bool prefer_triple_buffer)
{
if (m_vsync_enabled == enabled && m_vsync_prefer_triple_buffer == prefer_triple_buffer)
if (m_vsync_mode == mode)
return;
const u32 old_buffer_count = GetSwapChainBufferCount();
m_vsync_enabled = enabled;
m_vsync_prefer_triple_buffer = prefer_triple_buffer;
m_vsync_mode = mode;
if (!m_swap_chain)
return;
@ -1181,8 +1159,8 @@ void D3D12Device::SubmitPresent()
DebugAssert(m_swap_chain);
// DirectX has no concept of tear-or-sync. I guess if we measured times ourselves, we could implement it.
if (m_vsync_enabled)
m_swap_chain->Present(BoolToUInt32(1), 0);
if (IsVSyncModeBlocking())
m_swap_chain->Present(1, 0);
else if (m_using_allow_tearing) // Disabled or VRR, VRR requires the allow tearing flag :/
m_swap_chain->Present(0, DXGI_PRESENT_ALLOW_TEARING);
else

View File

@ -118,8 +118,7 @@ public:
void DrawIndexed(u32 index_count, u32 base_index, u32 base_vertex) override;
void DrawIndexedWithBarrier(u32 index_count, u32 base_index, u32 base_vertex, DrawBarrier type) override;
std::optional<float> GetHostRefreshRate() override;
void SetVSyncEnabled(bool enabled, bool prefer_triple_buffer) override;
void SetVSyncMode(GPUVSyncMode mode) override;
bool SetGPUTimingEnabled(bool enabled) override;
float GetAndResetAccumulatedGPUTime() override;

View File

@ -276,11 +276,10 @@ bool GPUDevice::IsSameRenderAPI(RenderAPI lhs, RenderAPI rhs)
}
bool GPUDevice::Create(std::string_view adapter, std::string_view shader_cache_path, u32 shader_cache_version,
bool debug_device, bool vsync, bool vsync_prefer_triple_buffer, bool threaded_presentation,
bool debug_device, GPUVSyncMode vsync, bool threaded_presentation,
std::optional<bool> exclusive_fullscreen_control, FeatureMask disabled_features, Error* error)
{
m_vsync_enabled = vsync;
m_vsync_prefer_triple_buffer = vsync_prefer_triple_buffer;
m_vsync_mode = vsync;
m_debug_device = debug_device;
if (!AcquireWindow(true))
@ -591,11 +590,6 @@ void GPUDevice::RenderImGui()
}
}
void GPUDevice::SetVSyncEnabled(bool enabled, bool prefer_triple_buffer)
{
m_vsync_enabled = enabled;
}
void GPUDevice::UploadVertexBuffer(const void* vertices, u32 vertex_size, u32 vertex_count, u32* base_vertex)
{
void* map;
@ -1053,14 +1047,6 @@ void GPUDevice::ThrottlePresentation()
Common::Timer::SleepUntil(m_last_frame_displayed_time, false);
}
std::optional<float> GPUDevice::GetHostRefreshRate()
{
if (m_window_info.surface_refresh_rate > 0.0f)
return m_window_info.surface_refresh_rate;
return WindowInfo::QueryRefreshRateForWindow(m_window_info);
}
bool GPUDevice::SetGPUTimingEnabled(bool enabled)
{
return false;

View File

@ -36,6 +36,14 @@ enum class RenderAPI : u32
Metal
};
enum class GPUVSyncMode : u8
{
Disabled,
DoubleBuffered,
TripleBuffered,
Count
};
class GPUSampler
{
public:
@ -573,8 +581,8 @@ public:
virtual RenderAPI GetRenderAPI() const = 0;
bool Create(std::string_view adapter, std::string_view shader_cache_path, u32 shader_cache_version, bool debug_device,
bool vsync, bool vsync_prefer_triple_buffer, bool threaded_presentation,
std::optional<bool> exclusive_fullscreen_control, FeatureMask disabled_features, Error* error);
GPUVSyncMode vsync, bool threaded_presentation, std::optional<bool> exclusive_fullscreen_control,
FeatureMask disabled_features, Error* error);
void Destroy();
virtual bool HasSurface() const = 0;
@ -672,8 +680,9 @@ public:
/// Renders ImGui screen elements. Call before EndPresent().
void RenderImGui();
ALWAYS_INLINE bool IsVSyncEnabled() const { return m_vsync_enabled; }
virtual void SetVSyncEnabled(bool enabled, bool prefer_triple_buffer);
ALWAYS_INLINE GPUVSyncMode GetVSyncMode() const { return m_vsync_mode; }
ALWAYS_INLINE bool IsVSyncModeBlocking() const { return (m_vsync_mode >= GPUVSyncMode::DoubleBuffered); }
virtual void SetVSyncMode(GPUVSyncMode mode) = 0;
ALWAYS_INLINE bool IsDebugDevice() const { return m_debug_device; }
ALWAYS_INLINE size_t GetVRAMUsage() const { return s_total_vram_usage; }
@ -689,8 +698,6 @@ public:
virtual bool SupportsTextureFormat(GPUTexture::Format format) const = 0;
virtual std::optional<float> GetHostRefreshRate();
/// Enables/disables GPU frame timing.
virtual bool SetGPUTimingEnabled(bool enabled);
@ -793,8 +800,7 @@ private:
protected:
static Statistics s_stats;
bool m_vsync_enabled = false;
bool m_vsync_prefer_triple_buffer = false;
GPUVSyncMode m_vsync_mode = GPUVSyncMode::Disabled;
bool m_gpu_timing_enabled = false;
bool m_debug_device = false;
};

View File

@ -260,12 +260,10 @@ public:
void DrawIndexed(u32 index_count, u32 base_index, u32 base_vertex) override;
void DrawIndexedWithBarrier(u32 index_count, u32 base_index, u32 base_vertex, DrawBarrier type) override;
std::optional<float> GetHostRefreshRate() override;
bool SetGPUTimingEnabled(bool enabled) override;
float GetAndResetAccumulatedGPUTime() override;
void SetVSyncEnabled(bool enabled, bool prefer_triple_buffer) override;
void SetVSyncMode(GPUVSyncMode mode) override;
bool BeginPresent(bool skip_present) override;
void EndPresent(bool explicit_submit) override;

View File

@ -115,20 +115,14 @@ bool MetalDevice::HasSurface() const
return (m_layer != nil);
}
std::optional<float> MetalDevice::GetHostRefreshRate()
void MetalDevice::SetVSyncMode(GPUVSyncMode mode)
{
return GPUDevice::GetHostRefreshRate();
}
void MetalDevice::SetVSyncEnabled(bool enabled, bool prefer_triple_buffer)
{
if (m_vsync_enabled == enabled && m_vsync_prefer_triple_buffer == prefer_triple_buffer)
if (m_vsync_mode == mode)
return;
m_vsync_enabled = enabled;
m_vsync_prefer_triple_buffer = prefer_triple_buffer;
m_vsync_mode = mode;
if (m_layer != nil)
[m_layer setDisplaySyncEnabled:enabled];
[m_layer setDisplaySyncEnabled:m_vsync_mode >= GPUVSyncMode::DoubleBuffered];
}
bool MetalDevice::CreateDevice(std::string_view adapter, bool threaded_presentation,
@ -402,7 +396,7 @@ bool MetalDevice::CreateLayer()
}
});
[m_layer setDisplaySyncEnabled:m_vsync_enabled];
[m_layer setDisplaySyncEnabled:m_vsync_mode >= GPUVSyncMode::DoubleBuffered];
DebugAssert(m_layer_pass_desc == nil);
m_layer_pass_desc = [[MTLRenderPassDescriptor renderPassDescriptor] retain];

View File

@ -238,13 +238,12 @@ void OpenGLDevice::InsertDebugMessage(const char* msg)
#endif
}
void OpenGLDevice::SetVSyncEnabled(bool enabled, bool prefer_triple_buffer)
void OpenGLDevice::SetVSyncMode(GPUVSyncMode mode)
{
if (m_vsync_enabled == enabled && m_vsync_prefer_triple_buffer == prefer_triple_buffer)
if (m_vsync_mode == mode)
return;
m_vsync_enabled = enabled;
m_vsync_prefer_triple_buffer = prefer_triple_buffer;
m_vsync_mode = mode;
SetSwapInterval();
}
@ -584,7 +583,7 @@ void OpenGLDevice::SetSwapInterval()
return;
// Window framebuffer has to be bound to call SetSwapInterval.
const s32 interval = m_vsync_enabled ? 1 : 0;
const s32 interval = (m_vsync_mode >= GPUVSyncMode::DoubleBuffered) ? 1 : 0;
GLint current_fbo = 0;
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &current_fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);

View File

@ -100,7 +100,7 @@ public:
void DrawIndexed(u32 index_count, u32 base_index, u32 base_vertex) override;
void DrawIndexedWithBarrier(u32 index_count, u32 base_index, u32 base_vertex, DrawBarrier type) override;
void SetVSyncEnabled(bool enabled, bool prefer_triple_buffer) override;
void SetVSyncMode(GPUVSyncMode mode) override;
bool BeginPresent(bool skip_present) override;
void EndPresent(bool explicit_present) override;

View File

@ -24,3 +24,9 @@ namespace Host {
/// Return the current window handle. Needed for DInput.
std::optional<WindowInfo> GetTopLevelWindowInfo();
} // namespace Host
// TODO: Move all the other Cocoa stuff in here.
namespace CocoaTools {
/// Returns the refresh rate of the display the window is placed on.
std::optional<float> GetViewRefreshRate(const WindowInfo& wi);
}

View File

@ -147,3 +147,24 @@ void CocoaTools::DestroyMetalLayer(WindowInfo* wi)
[view setWantsLayer:NO];
[layer release];
}
std::optional<float> CocoaTools::GetViewRefreshRate(const WindowInfo& wi)
{
if (![NSThread isMainThread])
{
std::optional<float> ret;
dispatch_sync(dispatch_get_main_queue(), [&ret, wi]{ ret = GetViewRefreshRate(wi); });
return ret;
}
std::optional<float> ret;
NSView* const view = (__bridge NSView*)wi.window_handle;
const u32 did = [[[[[view window] screen] deviceDescription] valueForKey:@"NSScreenNumber"] unsignedIntValue];
if (CGDisplayModeRef mode = CGDisplayCopyDisplayMode(did))
{
ret = CGDisplayModeGetRefreshRate(mode);
CGDisplayModeRelease(mode);
}
return ret;
}

View File

@ -440,8 +440,13 @@ VkPresentModeKHR VulkanDevice::SelectPresentMode() const
{
// Use mailbox/triple buffering for "normal" vsync, due to PAL refresh rate mismatch.
// Otherwise, use FIFO when syncing to host, because we don't want to return early.
return m_vsync_enabled ? (m_vsync_prefer_triple_buffer ? VK_PRESENT_MODE_MAILBOX_KHR : VK_PRESENT_MODE_FIFO_KHR) :
VK_PRESENT_MODE_IMMEDIATE_KHR;
static constexpr std::array<VkPresentModeKHR, static_cast<size_t>(GPUVSyncMode::Count)> modes = {{
VK_PRESENT_MODE_IMMEDIATE_KHR, // Disabled
VK_PRESENT_MODE_FIFO_KHR, // DoubleBuffered
VK_PRESENT_MODE_MAILBOX_KHR, // TripleBuffered
}};
return modes[static_cast<size_t>(m_vsync_mode)];
}
bool VulkanDevice::CreateDevice(VkSurfaceKHR surface, bool enable_validation_layer)
@ -2344,13 +2349,12 @@ std::string VulkanDevice::GetDriverInfo() const
return ret;
}
void VulkanDevice::SetVSyncEnabled(bool enabled, bool prefer_triple_buffer)
void VulkanDevice::SetVSyncMode(GPUVSyncMode mode)
{
if (m_vsync_enabled == enabled && m_vsync_prefer_triple_buffer == prefer_triple_buffer)
if (m_vsync_mode == mode)
return;
m_vsync_enabled = enabled;
m_vsync_prefer_triple_buffer = prefer_triple_buffer;
m_vsync_mode = mode;
if (!m_swap_chain)
return;

View File

@ -129,7 +129,7 @@ public:
bool SetGPUTimingEnabled(bool enabled) override;
float GetAndResetAccumulatedGPUTime() override;
void SetVSyncEnabled(bool enabled, bool prefer_triple_buffer) override;
void SetVSyncMode(GPUVSyncMode mode) override;
bool BeginPresent(bool skip_present) override;
void EndPresent(bool explicit_present) override;

View File

@ -160,6 +160,18 @@ std::optional<float> WindowInfo::QueryRefreshRateForWindow(const WindowInfo& wi)
return ret;
}
#elif defined(__APPLE__)
#include "util/platform_misc.h"
std::optional<float> WindowInfo::QueryRefreshRateForWindow(const WindowInfo& wi)
{
if (wi.type == WindowInfo::Type::MacOS)
return CocoaTools::GetViewRefreshRate(wi);
return std::nullopt;
}
#else
#ifdef ENABLE_X11