Merge pull request #10547 from AdmiralCurtiss/save-state-threading

Core/State: Rewrite threading behavior when multiple Save requests are happening.
This commit is contained in:
Admiral H. Curtiss 2022-10-02 00:16:03 +02:00 committed by GitHub
commit 2f0358a75e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 166 additions and 112 deletions

View File

@ -60,7 +60,6 @@ public:
bool IsCancelled() const { return m_cancelled.IsSet(); }
private:
void Shutdown()
{
if (m_thread.joinable())
@ -71,6 +70,7 @@ private:
}
}
private:
void ThreadLoop()
{
Common::SetCurrentThreadName("WorkQueueThread");

View File

@ -3,8 +3,10 @@
#include "Core/State.h"
#include <lzo/lzo1x.h>
#include <atomic>
#include <condition_variable>
#include <map>
#include <memory>
#include <mutex>
#include <string>
#include <thread>
@ -13,16 +15,18 @@
#include <fmt/format.h>
#include <lzo/lzo1x.h>
#include "Common/ChunkFile.h"
#include "Common/CommonTypes.h"
#include "Common/Event.h"
#include "Common/FileUtil.h"
#include "Common/IOFile.h"
#include "Common/MsgHandler.h"
#include "Common/ScopeGuard.h"
#include "Common/Thread.h"
#include "Common/Timer.h"
#include "Common/Version.h"
#include "Common/WorkQueueThread.h"
#include "Core/ConfigManager.h"
#include "Core/Core.h"
@ -62,16 +66,32 @@ static HEAP_ALLOC(wrkmem, LZO1X_1_MEM_COMPRESS);
static AfterLoadCallbackFunc s_on_after_load_callback;
// Temporary undo state buffer
static std::vector<u8> g_undo_load_buffer;
static std::vector<u8> g_current_buffer;
static std::vector<u8> s_undo_load_buffer;
static std::mutex s_undo_load_buffer_mutex;
static std::mutex s_load_or_save_in_progress_mutex;
static std::mutex g_cs_undo_load_buffer;
static std::mutex g_cs_current_buffer;
static Common::Event g_compressAndDumpStateSyncEvent;
struct CompressAndDumpState_args
{
std::vector<u8> buffer_vector;
std::string filename;
std::shared_ptr<Common::Event> state_write_done_event;
};
static std::recursive_mutex g_save_thread_mutex;
static std::thread g_save_thread;
// Protects against simultaneous reads and writes to the final savestate location from multiple
// threads.
static std::mutex s_save_thread_mutex;
// Queue for compressing and writing savestates to disk.
static Common::WorkQueueThread<CompressAndDumpState_args> s_save_thread;
// Keeps track of savestate writes that are currently happening, so we don't load a state while
// another one is still saving. This is particularly important so if you save to a slot and then
// immediately load from the same one, you don't accidentally load the state that's still at that
// file path before the write is done.
static std::mutex s_state_writes_in_queue_mutex;
static size_t s_state_writes_in_queue;
static std::condition_variable s_state_write_queue_is_empty;
// Don't forget to increase this after doing changes on the savestate system
constexpr u32 STATE_VERSION = 149; // Last changed in PR 10781
@ -326,58 +346,24 @@ static std::map<double, int> GetSavedStates()
return m;
}
struct CompressAndDumpState_args
static void CompressAndDumpState(CompressAndDumpState_args& save_args)
{
std::vector<u8>* buffer_vector = nullptr;
std::mutex* buffer_mutex = nullptr;
std::string filename;
bool wait = false;
};
const u8* const buffer_data = save_args.buffer_vector.data();
const size_t buffer_size = save_args.buffer_vector.size();
const std::string& filename = save_args.filename;
static void CompressAndDumpState(CompressAndDumpState_args save_args)
{
std::lock_guard lk(*save_args.buffer_mutex);
// ScopeGuard is used here to ensure that g_compressAndDumpStateSyncEvent.Set()
// will be called and that it will happen after the IOFile is closed.
// Both ScopeGuard's and IOFile's finalization occur at respective object destruction time.
// As Local (stack) objects are destructed in the reverse order of construction and "ScopeGuard
// on_exit"
// is created before the "IOFile f", it is guaranteed that the file will be finalized before
// the ScopeGuard's finalization (i.e. "g_compressAndDumpStateSyncEvent.Set()" call).
Common::ScopeGuard on_exit([]() { g_compressAndDumpStateSyncEvent.Set(); });
// If it is not required to wait, we call finalizer early (and it won't be called again at
// destruction).
if (!save_args.wait)
on_exit.Exit();
const u8* const buffer_data = &(*(save_args.buffer_vector))[0];
const size_t buffer_size = (save_args.buffer_vector)->size();
std::string& filename = save_args.filename;
// For easy debugging
Common::SetCurrentThreadName("SaveState thread");
// Moving to last overwritten save-state
if (File::Exists(filename))
// Find free temporary filename.
// TODO: The file exists check and the actual opening of the file should be atomic, we don't have
// functions for that.
std::string temp_filename;
size_t temp_counter = static_cast<size_t>(Common::CurrentThreadId());
do
{
if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav"))
File::Delete((File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav"));
if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav.dtm"))
File::Delete((File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav.dtm"));
temp_filename = fmt::format("{}{}.tmp", filename, temp_counter);
++temp_counter;
} while (File::Exists(temp_filename));
if (!File::Rename(filename, File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav"))
Core::DisplayMessage("Failed to move previous state to state undo backup", 1000);
else if (File::Exists(filename + ".dtm"))
File::Rename(filename + ".dtm", File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav.dtm");
}
if ((Movie::IsMovieActive()) && !Movie::IsJustStartingRecordingInputFromSaveState())
Movie::SaveRecording(filename + ".dtm");
else if (!Movie::IsMovieActive())
File::Delete(filename + ".dtm");
File::IOFile f(filename, "wb");
File::IOFile f(temp_filename, "wb");
if (!f)
{
Core::DisplayMessage("Could not save state", 2000);
@ -427,6 +413,44 @@ static void CompressAndDumpState(CompressAndDumpState_args save_args)
f.WriteBytes(buffer_data, buffer_size);
}
const std::string last_state_filename = File::GetUserPath(D_STATESAVES_IDX) + "lastState.sav";
const std::string last_state_dtmname = last_state_filename + ".dtm";
const std::string dtmname = filename + ".dtm";
{
std::lock_guard lk(s_save_thread_mutex);
// Backup existing state (overwriting an existing backup, if any).
if (File::Exists(filename))
{
if (File::Exists(last_state_filename))
File::Delete((last_state_filename));
if (File::Exists(last_state_dtmname))
File::Delete((last_state_dtmname));
if (!File::Rename(filename, last_state_filename))
{
Core::DisplayMessage("Failed to move previous state to state undo backup", 1000);
}
else if (File::Exists(dtmname))
{
if (!File::Rename(dtmname, last_state_dtmname))
Core::DisplayMessage("Failed to move previous state's dtm to state undo backup", 1000);
}
}
if ((Movie::IsMovieActive()) && !Movie::IsJustStartingRecordingInputFromSaveState())
Movie::SaveRecording(dtmname);
else if (!Movie::IsMovieActive())
File::Delete(dtmname);
// Move written state to final location.
// TODO: This should also be atomic. This is possible on all systems, but needs a special
// implementation of IOFile on Windows.
f.Close();
File::Rename(temp_filename, filename);
}
Core::DisplayMessage(fmt::format("Saved State to {}", filename), 2000);
Host_UpdateMainFrame();
}
@ -439,6 +463,11 @@ void SaveAs(const std::string& filename, bool wait)
Core::RunOnCPUThread(
[&] {
{
std::lock_guard lk(s_state_writes_in_queue_mutex);
++s_state_writes_in_queue;
}
// Measure the size of the buffer.
u8* ptr = nullptr;
PointerWrap p_measure(&ptr, 0, PointerWrap::Mode::Measure);
@ -446,37 +475,41 @@ void SaveAs(const std::string& filename, bool wait)
const size_t buffer_size = reinterpret_cast<size_t>(ptr);
// Then actually do the write.
bool is_write_mode;
{
std::lock_guard lk2(g_cs_current_buffer);
g_current_buffer.resize(buffer_size);
ptr = g_current_buffer.data();
PointerWrap p(&ptr, buffer_size, PointerWrap::Mode::Write);
DoState(p);
is_write_mode = p.IsWriteMode();
}
std::vector<u8> current_buffer;
current_buffer.resize(buffer_size);
ptr = current_buffer.data();
PointerWrap p(&ptr, buffer_size, PointerWrap::Mode::Write);
DoState(p);
if (is_write_mode)
if (p.IsWriteMode())
{
Core::DisplayMessage("Saving State...", 1000);
CompressAndDumpState_args save_args;
save_args.buffer_vector = &g_current_buffer;
save_args.buffer_mutex = &g_cs_current_buffer;
save_args.filename = filename;
save_args.wait = wait;
std::shared_ptr<Common::Event> sync_event;
CompressAndDumpState_args save_args;
save_args.buffer_vector = std::move(current_buffer);
save_args.filename = filename;
if (wait)
{
std::lock_guard lk3(g_save_thread_mutex);
Flush();
g_save_thread = std::thread(CompressAndDumpState, save_args);
sync_event = std::make_shared<Common::Event>();
save_args.state_write_done_event = sync_event;
}
g_compressAndDumpStateSyncEvent.Wait();
s_save_thread.EmplaceItem(std::move(save_args));
if (sync_event)
sync_event->Wait();
}
else
{
// someone aborted the save by changing the mode?
{
// Note: The worker thread takes care of this in the other branch.
std::lock_guard lk(s_state_writes_in_queue_mutex);
if (--s_state_writes_in_queue == 0)
s_state_write_queue_is_empty.notify_all();
}
Core::DisplayMessage("Unable to save: Internal DoState Error", 4000);
}
},
@ -485,7 +518,9 @@ void SaveAs(const std::string& filename, bool wait)
bool ReadHeader(const std::string& filename, StateHeader& header)
{
Flush();
// ensure that the savestate write thread isn't moving around states while we do this
std::lock_guard lk(s_save_thread_mutex);
File::IOFile f(filename, "rb");
return f.ReadArray(&header, 1);
}
@ -515,8 +550,23 @@ u64 GetUnixTimeOfSlot(int slot)
static void LoadFileStateData(const std::string& filename, std::vector<u8>& ret_data)
{
Flush();
File::IOFile f(filename, "rb");
File::IOFile f;
{
// If a state is currently saving, wait for that to end or time out.
std::unique_lock lk(s_state_writes_in_queue_mutex);
if (s_state_writes_in_queue != 0)
{
if (!s_state_write_queue_is_empty.wait_for(lk, std::chrono::seconds(3),
[]() { return s_state_writes_in_queue == 0; }))
{
Core::DisplayMessage(
"A previous state saving operation is still in progress, cancelling load.", 2000);
return;
}
}
f.Open(filename, "rb");
}
StateHeader header;
if (!f.ReadArray(&header, 1))
@ -600,12 +650,13 @@ void LoadAs(const std::string& filename)
// Save temp buffer for undo load state
if (!Movie::IsJustStartingRecordingInputFromSaveState())
{
std::lock_guard lk2(g_cs_undo_load_buffer);
SaveToBuffer(g_undo_load_buffer);
std::lock_guard lk2(s_undo_load_buffer_mutex);
SaveToBuffer(s_undo_load_buffer);
const std::string dtmpath = File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm";
if (Movie::IsMovieActive())
Movie::SaveRecording(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm");
else if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm"))
File::Delete(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm");
Movie::SaveRecording(dtmpath);
else if (File::Exists(dtmpath))
File::Delete(dtmpath);
}
bool loaded = false;
@ -661,23 +712,31 @@ void Init()
{
if (lzo_init() != LZO_E_OK)
PanicAlertFmtT("Internal LZO Error - lzo_init() failed");
s_save_thread.Reset([](CompressAndDumpState_args args) {
CompressAndDumpState(args);
{
std::lock_guard lk(s_state_writes_in_queue_mutex);
if (--s_state_writes_in_queue == 0)
s_state_write_queue_is_empty.notify_all();
}
if (args.state_write_done_event)
args.state_write_done_event->Set();
});
}
void Shutdown()
{
Flush();
s_save_thread.Shutdown();
// swapping with an empty vector, rather than clear()ing
// this gives a better guarantee to free the allocated memory right NOW (as opposed to, actually,
// never)
{
std::lock_guard lk(g_cs_current_buffer);
std::vector<u8>().swap(g_current_buffer);
}
{
std::lock_guard lk(g_cs_undo_load_buffer);
std::vector<u8>().swap(g_undo_load_buffer);
std::lock_guard lk(s_undo_load_buffer_mutex);
std::vector<u8>().swap(s_undo_load_buffer);
}
}
@ -728,30 +787,28 @@ void SaveFirstSaved()
}
}
void Flush()
{
std::lock_guard lk(g_save_thread_mutex);
// If already saving state, wait for it to finish
if (g_save_thread.joinable())
g_save_thread.join();
}
// Load the last state before loading the state
void UndoLoadState()
{
std::lock_guard lk(g_cs_undo_load_buffer);
if (!g_undo_load_buffer.empty())
std::lock_guard lk(s_undo_load_buffer_mutex);
if (!s_undo_load_buffer.empty())
{
if (File::Exists(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm") || (!Movie::IsMovieActive()))
if (Movie::IsMovieActive())
{
LoadFromBuffer(g_undo_load_buffer);
if (Movie::IsMovieActive())
Movie::LoadInput(File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm");
const std::string dtmpath = File::GetUserPath(D_STATESAVES_IDX) + "undo.dtm";
if (File::Exists(dtmpath))
{
LoadFromBuffer(s_undo_load_buffer);
Movie::LoadInput(dtmpath);
}
else
{
PanicAlertFmtT("No undo.dtm found, aborting undo load state to prevent movie desyncs");
}
}
else
{
PanicAlertFmtT("No undo.dtm found, aborting undo load state to prevent movie desyncs");
LoadFromBuffer(s_undo_load_buffer);
}
}
else

View File

@ -64,9 +64,6 @@ void SaveFirstSaved();
void UndoSaveState();
void UndoLoadState();
// wait until previously scheduled savestate event (if any) is done
void Flush();
// for calling back into UI code without introducing a dependency on it in core
using AfterLoadCallbackFunc = std::function<void()>;
void SetOnAfterLoadCallback(AfterLoadCallbackFunc callback);