Merge pull request #9182 from jordan-woyak/frame-dump-cleanup
VideoCommon: FrameDump fixes/cleanups.
This commit is contained in:
commit
87e4a0785a
|
@ -213,10 +213,6 @@ static void DoState(PointerWrap& p)
|
|||
p.DoMarker("Wiimote");
|
||||
Gecko::DoState(p);
|
||||
p.DoMarker("Gecko");
|
||||
|
||||
#if defined(HAVE_FFMPEG)
|
||||
FrameDump::DoState();
|
||||
#endif
|
||||
}
|
||||
|
||||
void LoadFromBuffer(std::vector<u8>& buffer)
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
// Licensed under GPLv2+
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include "VideoCommon/FrameDump.h"
|
||||
|
||||
#if defined(__FreeBSD__)
|
||||
#define __STDC_CONSTANT_MACROS 1
|
||||
#endif
|
||||
|
@ -9,6 +11,7 @@
|
|||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include <fmt/chrono.h>
|
||||
#include <fmt/format.h>
|
||||
|
||||
extern "C" {
|
||||
|
@ -18,17 +21,15 @@ extern "C" {
|
|||
#include <libswscale/swscale.h>
|
||||
}
|
||||
|
||||
#include "Common/ChunkFile.h"
|
||||
#include "Common/FileUtil.h"
|
||||
#include "Common/Logging/Log.h"
|
||||
#include "Common/MsgHandler.h"
|
||||
#include "Common/StringUtil.h"
|
||||
|
||||
#include "Core/ConfigManager.h"
|
||||
#include "Core/HW/SystemTimers.h"
|
||||
#include "Core/HW/VideoInterface.h" //for TargetRefreshRate
|
||||
#include "Core/Movie.h"
|
||||
#include "Core/HW/VideoInterface.h"
|
||||
|
||||
#include "VideoCommon/FrameDump.h"
|
||||
#include "VideoCommon/OnScreenDisplay.h"
|
||||
#include "VideoCommon/VideoConfig.h"
|
||||
|
||||
|
@ -38,24 +39,38 @@ extern "C" {
|
|||
#define av_frame_free avcodec_free_frame
|
||||
#endif
|
||||
|
||||
static AVFormatContext* s_format_context = nullptr;
|
||||
static AVStream* s_stream = nullptr;
|
||||
static AVCodecContext* s_codec_context = nullptr;
|
||||
static AVFrame* s_src_frame = nullptr;
|
||||
static AVFrame* s_scaled_frame = nullptr;
|
||||
static AVPixelFormat s_pix_fmt = AV_PIX_FMT_BGR24;
|
||||
static SwsContext* s_sws_context = nullptr;
|
||||
static int s_width;
|
||||
static int s_height;
|
||||
static u64 s_last_frame;
|
||||
static bool s_last_frame_is_valid = false;
|
||||
static bool s_start_dumping = false;
|
||||
static u64 s_last_pts;
|
||||
static int s_file_index = 0;
|
||||
static int s_savestate_index = 0;
|
||||
static int s_last_savestate_index = 0;
|
||||
struct FrameDumpContext
|
||||
{
|
||||
AVFormatContext* format = nullptr;
|
||||
AVStream* stream = nullptr;
|
||||
AVCodecContext* codec = nullptr;
|
||||
AVFrame* src_frame = nullptr;
|
||||
AVFrame* scaled_frame = nullptr;
|
||||
SwsContext* sws = nullptr;
|
||||
|
||||
static void InitAVCodec()
|
||||
s64 last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
int width = 0;
|
||||
int height = 0;
|
||||
|
||||
u64 first_frame_ticks = 0;
|
||||
u32 savestate_index = 0;
|
||||
|
||||
bool gave_vfr_warning = false;
|
||||
};
|
||||
|
||||
namespace
|
||||
{
|
||||
AVRational GetTimeBaseForCurrentRefreshRate()
|
||||
{
|
||||
int num;
|
||||
int den;
|
||||
av_reduce(&num, &den, int(VideoInterface::GetTargetRefreshRateDenominator()),
|
||||
int(VideoInterface::GetTargetRefreshRateNumerator()), std::numeric_limits<int>::max());
|
||||
return AVRational{num, den};
|
||||
}
|
||||
|
||||
void InitAVCodec()
|
||||
{
|
||||
static bool first_run = true;
|
||||
if (first_run)
|
||||
|
@ -63,12 +78,13 @@ static void InitAVCodec()
|
|||
#if LIBAVCODEC_VERSION_MICRO >= 100 && LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)
|
||||
av_register_all();
|
||||
#endif
|
||||
// TODO: We never call avformat_network_deinit.
|
||||
avformat_network_init();
|
||||
first_run = false;
|
||||
}
|
||||
}
|
||||
|
||||
static bool AVStreamCopyContext(AVStream* stream, AVCodecContext* codec_context)
|
||||
bool AVStreamCopyContext(AVStream* stream, AVCodecContext* codec_context)
|
||||
{
|
||||
#if (LIBAVCODEC_VERSION_MICRO >= 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 33, 100)) || \
|
||||
(LIBAVCODEC_VERSION_MICRO < 100 && LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 5, 0))
|
||||
|
@ -80,40 +96,26 @@ static bool AVStreamCopyContext(AVStream* stream, AVCodecContext* codec_context)
|
|||
#endif
|
||||
}
|
||||
|
||||
bool FrameDump::Start(int w, int h)
|
||||
{
|
||||
s_pix_fmt = AV_PIX_FMT_RGBA;
|
||||
|
||||
s_width = w;
|
||||
s_height = h;
|
||||
s_last_pts = 0;
|
||||
s_last_frame_is_valid = s_file_index != 0;
|
||||
|
||||
InitAVCodec();
|
||||
bool success = CreateVideoFile();
|
||||
if (!success)
|
||||
{
|
||||
CloseVideoFile();
|
||||
OSD::AddMessage("FrameDump Start failed");
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
static std::string GetDumpPath(const std::string& format)
|
||||
std::string GetDumpPath(const std::string& extension, std::time_t time, u32 index)
|
||||
{
|
||||
if (!g_Config.sDumpPath.empty())
|
||||
return g_Config.sDumpPath;
|
||||
|
||||
const std::string dump_path = File::GetUserPath(D_DUMPFRAMES_IDX) + "framedump" +
|
||||
std::to_string(s_file_index) + "." + format;
|
||||
const std::string path_prefix =
|
||||
File::GetUserPath(D_DUMPFRAMES_IDX) + SConfig::GetInstance().GetGameID();
|
||||
|
||||
// Ask to delete file
|
||||
if (File::Exists(dump_path))
|
||||
const std::string base_name =
|
||||
fmt::format("{}_{:%Y-%m-%d_%H-%M-%S}_{}", path_prefix, *std::localtime(&time), index);
|
||||
|
||||
const std::string path = fmt::format("{}.{}", base_name, extension);
|
||||
|
||||
// Ask to delete file.
|
||||
if (File::Exists(path))
|
||||
{
|
||||
if (SConfig::GetInstance().m_DumpFramesSilent ||
|
||||
AskYesNoT("Delete the existing file '%s'?", dump_path.c_str()))
|
||||
AskYesNoT("Delete the existing file '%s'?", path.c_str()))
|
||||
{
|
||||
File::Delete(dump_path);
|
||||
File::Delete(path);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -122,29 +124,106 @@ static std::string GetDumpPath(const std::string& format)
|
|||
}
|
||||
}
|
||||
|
||||
return dump_path;
|
||||
return path;
|
||||
}
|
||||
|
||||
int ReceivePacket(AVCodecContext* avctx, AVPacket* pkt, int* got_packet)
|
||||
{
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
|
||||
return avcodec_encode_video2(avctx, pkt, nullptr, got_packet);
|
||||
#else
|
||||
*got_packet = 0;
|
||||
|
||||
const int error = avcodec_receive_packet(avctx, pkt);
|
||||
if (!error)
|
||||
*got_packet = 1;
|
||||
|
||||
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF)
|
||||
return 0;
|
||||
|
||||
return error;
|
||||
#endif
|
||||
}
|
||||
|
||||
int SendFrameAndReceivePacket(AVCodecContext* avctx, AVPacket* pkt, AVFrame* frame, int* got_packet)
|
||||
{
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
|
||||
return avcodec_encode_video2(avctx, pkt, frame, got_packet);
|
||||
#else
|
||||
*got_packet = 0;
|
||||
|
||||
const int error = avcodec_send_frame(avctx, frame);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return ReceivePacket(avctx, pkt, got_packet);
|
||||
#endif
|
||||
}
|
||||
|
||||
void WritePacket(AVPacket& pkt, const FrameDumpContext& context)
|
||||
{
|
||||
av_packet_rescale_ts(&pkt, context.codec->time_base, context.stream->time_base);
|
||||
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(56, 60, 100)
|
||||
if (context.codec->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
#endif
|
||||
|
||||
pkt.stream_index = context.stream->index;
|
||||
av_interleaved_write_frame(context.format, &pkt);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool FrameDump::Start(int w, int h)
|
||||
{
|
||||
if (IsStarted())
|
||||
return true;
|
||||
|
||||
m_savestate_index = 0;
|
||||
m_start_time = std::time(nullptr);
|
||||
m_file_index = 0;
|
||||
|
||||
return PrepareEncoding(w, h);
|
||||
}
|
||||
|
||||
bool FrameDump::PrepareEncoding(int w, int h)
|
||||
{
|
||||
m_context = std::make_unique<FrameDumpContext>();
|
||||
|
||||
m_context->width = w;
|
||||
m_context->height = h;
|
||||
|
||||
InitAVCodec();
|
||||
const bool success = CreateVideoFile();
|
||||
if (!success)
|
||||
{
|
||||
CloseVideoFile();
|
||||
OSD::AddMessage("FrameDump Start failed");
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
bool FrameDump::CreateVideoFile()
|
||||
{
|
||||
const std::string& format = g_Config.sDumpFormat;
|
||||
|
||||
const std::string dump_path = GetDumpPath(format);
|
||||
const std::string dump_path = GetDumpPath(format, m_start_time, m_file_index);
|
||||
|
||||
if (dump_path.empty())
|
||||
return false;
|
||||
|
||||
File::CreateFullPath(dump_path);
|
||||
|
||||
AVOutputFormat* output_format = av_guess_format(format.c_str(), dump_path.c_str(), nullptr);
|
||||
AVOutputFormat* const output_format = av_guess_format(format.c_str(), dump_path.c_str(), nullptr);
|
||||
if (!output_format)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Invalid format %s", format.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (avformat_alloc_output_context2(&s_format_context, output_format, nullptr, dump_path.c_str()) <
|
||||
0)
|
||||
if (avformat_alloc_output_context2(&m_context->format, output_format, nullptr,
|
||||
dump_path.c_str()) < 0)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Could not allocate output context");
|
||||
return false;
|
||||
|
@ -156,7 +235,7 @@ bool FrameDump::CreateVideoFile()
|
|||
|
||||
if (!codec_name.empty())
|
||||
{
|
||||
const AVCodecDescriptor* codec_desc = avcodec_descriptor_get_by_name(codec_name.c_str());
|
||||
const AVCodecDescriptor* const codec_desc = avcodec_descriptor_get_by_name(codec_name.c_str());
|
||||
if (codec_desc)
|
||||
codec_id = codec_desc->id;
|
||||
else
|
||||
|
@ -174,8 +253,8 @@ bool FrameDump::CreateVideoFile()
|
|||
if (!codec)
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
|
||||
s_codec_context = avcodec_alloc_context3(codec);
|
||||
if (!codec || !s_codec_context)
|
||||
m_context->codec = avcodec_alloc_context3(codec);
|
||||
if (!codec || !m_context->codec)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Could not find encoder or allocate codec context");
|
||||
return false;
|
||||
|
@ -183,274 +262,278 @@ bool FrameDump::CreateVideoFile()
|
|||
|
||||
// Force XVID FourCC for better compatibility when using H.263
|
||||
if (codec->id == AV_CODEC_ID_MPEG4)
|
||||
s_codec_context->codec_tag = MKTAG('X', 'V', 'I', 'D');
|
||||
m_context->codec->codec_tag = MKTAG('X', 'V', 'I', 'D');
|
||||
|
||||
s_codec_context->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
s_codec_context->bit_rate = static_cast<int64_t>(g_Config.iBitrateKbps) * 1000;
|
||||
s_codec_context->width = s_width;
|
||||
s_codec_context->height = s_height;
|
||||
s_codec_context->time_base.num = VideoInterface::GetTargetRefreshRateDenominator();
|
||||
s_codec_context->time_base.den = VideoInterface::GetTargetRefreshRateNumerator();
|
||||
s_codec_context->gop_size = 1;
|
||||
s_codec_context->level = 1;
|
||||
s_codec_context->pix_fmt = g_Config.bUseFFV1 ? AV_PIX_FMT_BGR0 : AV_PIX_FMT_YUV420P;
|
||||
const auto time_base = GetTimeBaseForCurrentRefreshRate();
|
||||
|
||||
INFO_LOG_FMT(VIDEO, "Creating video file: {} x {} @ {}/{} fps", m_context->width,
|
||||
m_context->height, time_base.den, time_base.num);
|
||||
|
||||
m_context->codec->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
m_context->codec->bit_rate = static_cast<int64_t>(g_Config.iBitrateKbps) * 1000;
|
||||
m_context->codec->width = m_context->width;
|
||||
m_context->codec->height = m_context->height;
|
||||
m_context->codec->time_base = time_base;
|
||||
m_context->codec->gop_size = 1;
|
||||
m_context->codec->level = 1;
|
||||
m_context->codec->pix_fmt = g_Config.bUseFFV1 ? AV_PIX_FMT_BGR0 : AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (output_format->flags & AVFMT_GLOBALHEADER)
|
||||
s_codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
m_context->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
if (avcodec_open2(s_codec_context, codec, nullptr) < 0)
|
||||
if (avcodec_open2(m_context->codec, codec, nullptr) < 0)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Could not open codec");
|
||||
return false;
|
||||
}
|
||||
|
||||
s_src_frame = av_frame_alloc();
|
||||
s_scaled_frame = av_frame_alloc();
|
||||
m_context->src_frame = av_frame_alloc();
|
||||
m_context->scaled_frame = av_frame_alloc();
|
||||
|
||||
s_scaled_frame->format = s_codec_context->pix_fmt;
|
||||
s_scaled_frame->width = s_width;
|
||||
s_scaled_frame->height = s_height;
|
||||
m_context->scaled_frame->format = m_context->codec->pix_fmt;
|
||||
m_context->scaled_frame->width = m_context->width;
|
||||
m_context->scaled_frame->height = m_context->height;
|
||||
|
||||
#if LIBAVCODEC_VERSION_MAJOR >= 55
|
||||
if (av_frame_get_buffer(s_scaled_frame, 1))
|
||||
if (av_frame_get_buffer(m_context->scaled_frame, 1))
|
||||
return false;
|
||||
#else
|
||||
if (avcodec_default_get_buffer(s_codec_context, s_scaled_frame))
|
||||
if (avcodec_default_get_buffer(m_context->codec, m_context->scaled_frame))
|
||||
return false;
|
||||
#endif
|
||||
|
||||
s_stream = avformat_new_stream(s_format_context, codec);
|
||||
if (!s_stream || !AVStreamCopyContext(s_stream, s_codec_context))
|
||||
m_context->stream = avformat_new_stream(m_context->format, codec);
|
||||
if (!m_context->stream || !AVStreamCopyContext(m_context->stream, m_context->codec))
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Could not create stream");
|
||||
return false;
|
||||
}
|
||||
|
||||
NOTICE_LOG(VIDEO, "Opening file %s for dumping", dump_path.c_str());
|
||||
if (avio_open(&s_format_context->pb, dump_path.c_str(), AVIO_FLAG_WRITE) < 0 ||
|
||||
avformat_write_header(s_format_context, nullptr))
|
||||
if (avio_open(&m_context->format->pb, dump_path.c_str(), AVIO_FLAG_WRITE) < 0 ||
|
||||
avformat_write_header(m_context->format, nullptr))
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Could not open %s", dump_path.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
OSD::AddMessage(fmt::format("Dumping Frames to \"{}\" ({}x{})", dump_path, s_width, s_height));
|
||||
if (av_cmp_q(m_context->stream->time_base, time_base) != 0)
|
||||
{
|
||||
WARN_LOG_FMT(VIDEO, "Stream time base differs at {}/{}", m_context->stream->time_base.den,
|
||||
m_context->stream->time_base.num);
|
||||
}
|
||||
|
||||
OSD::AddMessage(fmt::format("Dumping Frames to \"{}\" ({}x{})", dump_path, m_context->width,
|
||||
m_context->height));
|
||||
return true;
|
||||
}
|
||||
|
||||
static void PreparePacket(AVPacket* pkt)
|
||||
bool FrameDump::IsFirstFrameInCurrentFile() const
|
||||
{
|
||||
av_init_packet(pkt);
|
||||
pkt->data = nullptr;
|
||||
pkt->size = 0;
|
||||
return m_context->last_pts == AV_NOPTS_VALUE;
|
||||
}
|
||||
|
||||
static int ReceivePacket(AVCodecContext* avctx, AVPacket* pkt, int* got_packet)
|
||||
void FrameDump::AddFrame(const FrameData& frame)
|
||||
{
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
|
||||
return avcodec_encode_video2(avctx, pkt, nullptr, got_packet);
|
||||
#else
|
||||
*got_packet = 0;
|
||||
int error = avcodec_receive_packet(avctx, pkt);
|
||||
if (!error)
|
||||
*got_packet = 1;
|
||||
if (error == AVERROR(EAGAIN))
|
||||
return 0;
|
||||
// Are we even dumping?
|
||||
if (!IsStarted())
|
||||
return;
|
||||
|
||||
return error;
|
||||
#endif
|
||||
}
|
||||
CheckForConfigChange(frame);
|
||||
|
||||
static int SendFrameAndReceivePacket(AVCodecContext* avctx, AVPacket* pkt, AVFrame* frame,
|
||||
int* got_packet)
|
||||
{
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57, 37, 100)
|
||||
return avcodec_encode_video2(avctx, pkt, frame, got_packet);
|
||||
#else
|
||||
*got_packet = 0;
|
||||
int error = avcodec_send_frame(avctx, frame);
|
||||
if (error)
|
||||
return error;
|
||||
// Handle failure after a config change.
|
||||
if (!IsStarted())
|
||||
return;
|
||||
|
||||
return ReceivePacket(avctx, pkt, got_packet);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void WritePacket(AVPacket& pkt)
|
||||
{
|
||||
// Write the compressed frame in the media file.
|
||||
if (pkt.pts != (s64)AV_NOPTS_VALUE)
|
||||
if (IsFirstFrameInCurrentFile())
|
||||
{
|
||||
pkt.pts = av_rescale_q(pkt.pts, s_codec_context->time_base, s_stream->time_base);
|
||||
}
|
||||
if (pkt.dts != (s64)AV_NOPTS_VALUE)
|
||||
{
|
||||
pkt.dts = av_rescale_q(pkt.dts, s_codec_context->time_base, s_stream->time_base);
|
||||
}
|
||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(56, 60, 100)
|
||||
if (s_codec_context->coded_frame->key_frame)
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
#endif
|
||||
pkt.stream_index = s_stream->index;
|
||||
av_interleaved_write_frame(s_format_context, &pkt);
|
||||
}
|
||||
|
||||
static u64 TicksToTimeBaseUnits(u64 ticks, AVRational time_base, u32 ticks_per_second)
|
||||
{
|
||||
return ticks * time_base.den / time_base.num / ticks_per_second;
|
||||
}
|
||||
|
||||
void FrameDump::AddFrame(const u8* data, int width, int height, int stride, const Frame& state)
|
||||
{
|
||||
// Assume that the timing is valid, if the savestate id of the new frame
|
||||
// doesn't match the last one.
|
||||
if (state.savestate_index != s_last_savestate_index)
|
||||
{
|
||||
s_last_savestate_index = state.savestate_index;
|
||||
s_last_frame_is_valid = false;
|
||||
m_context->first_frame_ticks = frame.state.ticks;
|
||||
m_context->savestate_index = frame.state.savestate_index;
|
||||
}
|
||||
|
||||
CheckResolution(width, height);
|
||||
s_src_frame->data[0] = const_cast<u8*>(data);
|
||||
s_src_frame->linesize[0] = stride;
|
||||
s_src_frame->format = s_pix_fmt;
|
||||
s_src_frame->width = s_width;
|
||||
s_src_frame->height = s_height;
|
||||
// Calculate presentation timestamp from current ticks since first frame ticks.
|
||||
const s64 pts = av_rescale_q(frame.state.ticks - m_context->first_frame_ticks,
|
||||
AVRational{1, int(SystemTimers::GetTicksPerSecond())},
|
||||
m_context->codec->time_base);
|
||||
|
||||
// Convert image from {BGR24, RGBA} to desired pixel format
|
||||
s_sws_context =
|
||||
sws_getCachedContext(s_sws_context, width, height, s_pix_fmt, s_width, s_height,
|
||||
s_codec_context->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
|
||||
if (s_sws_context)
|
||||
if (!IsFirstFrameInCurrentFile())
|
||||
{
|
||||
sws_scale(s_sws_context, s_src_frame->data, s_src_frame->linesize, 0, height,
|
||||
s_scaled_frame->data, s_scaled_frame->linesize);
|
||||
if (pts <= m_context->last_pts)
|
||||
{
|
||||
WARN_LOG(VIDEO, "PTS delta < 1. Current frame will not be dumped.");
|
||||
return;
|
||||
}
|
||||
else if (pts > m_context->last_pts + 1 && !m_context->gave_vfr_warning)
|
||||
{
|
||||
WARN_LOG(VIDEO, "PTS delta > 1. Resulting file will have variable frame rate. "
|
||||
"Subsequent occurances will not be reported.");
|
||||
m_context->gave_vfr_warning = true;
|
||||
}
|
||||
}
|
||||
|
||||
constexpr AVPixelFormat pix_fmt = AV_PIX_FMT_RGBA;
|
||||
|
||||
m_context->src_frame->data[0] = const_cast<u8*>(frame.data);
|
||||
m_context->src_frame->linesize[0] = frame.stride;
|
||||
m_context->src_frame->format = pix_fmt;
|
||||
m_context->src_frame->width = m_context->width;
|
||||
m_context->src_frame->height = m_context->height;
|
||||
|
||||
// Convert image from RGBA to desired pixel format.
|
||||
m_context->sws = sws_getCachedContext(
|
||||
m_context->sws, frame.width, frame.height, pix_fmt, m_context->width, m_context->height,
|
||||
m_context->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
|
||||
if (m_context->sws)
|
||||
{
|
||||
sws_scale(m_context->sws, m_context->src_frame->data, m_context->src_frame->linesize, 0,
|
||||
frame.height, m_context->scaled_frame->data, m_context->scaled_frame->linesize);
|
||||
}
|
||||
|
||||
m_context->last_pts = pts;
|
||||
m_context->scaled_frame->pts = pts;
|
||||
|
||||
// Encode and write the image.
|
||||
AVPacket pkt;
|
||||
PreparePacket(&pkt);
|
||||
av_init_packet(&pkt);
|
||||
|
||||
int got_packet = 0;
|
||||
int error = 0;
|
||||
u64 delta;
|
||||
s64 last_pts;
|
||||
// Check to see if the first frame being dumped is the first frame of output from the emulator.
|
||||
// This prevents an issue with starting dumping later in emulation from placing the frames
|
||||
// incorrectly.
|
||||
if (!s_last_frame_is_valid)
|
||||
{
|
||||
s_last_frame = state.ticks;
|
||||
s_last_frame_is_valid = true;
|
||||
}
|
||||
if (!s_start_dumping && state.first_frame)
|
||||
{
|
||||
delta = state.ticks;
|
||||
last_pts = AV_NOPTS_VALUE;
|
||||
s_start_dumping = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
delta = state.ticks - s_last_frame;
|
||||
last_pts = TicksToTimeBaseUnits(s_last_pts, s_codec_context->time_base, state.ticks_per_second);
|
||||
}
|
||||
u64 pts_in_ticks = s_last_pts + delta;
|
||||
s_scaled_frame->pts =
|
||||
TicksToTimeBaseUnits(pts_in_ticks, s_codec_context->time_base, state.ticks_per_second);
|
||||
if (s_scaled_frame->pts != last_pts)
|
||||
{
|
||||
s_last_frame = state.ticks;
|
||||
s_last_pts = pts_in_ticks;
|
||||
error = SendFrameAndReceivePacket(s_codec_context, &pkt, s_scaled_frame, &got_packet);
|
||||
}
|
||||
if (!error && got_packet)
|
||||
{
|
||||
WritePacket(pkt);
|
||||
}
|
||||
const int error =
|
||||
SendFrameAndReceivePacket(m_context->codec, &pkt, m_context->scaled_frame, &got_packet);
|
||||
|
||||
if (error)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Error while encoding video: %d", error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (got_packet)
|
||||
WritePacket(pkt, *m_context);
|
||||
|
||||
HandleDelayedPackets();
|
||||
}
|
||||
|
||||
static void HandleDelayedPackets()
|
||||
void FrameDump::HandleDelayedPackets()
|
||||
{
|
||||
AVPacket pkt;
|
||||
|
||||
while (true)
|
||||
{
|
||||
PreparePacket(&pkt);
|
||||
int got_packet;
|
||||
int error = ReceivePacket(s_codec_context, &pkt, &got_packet);
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
int got_packet = 0;
|
||||
const int error = ReceivePacket(m_context->codec, &pkt, &got_packet);
|
||||
if (error)
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Error while stopping video: %d", error);
|
||||
ERROR_LOG(VIDEO, "Error while encoding delayed frames: %d", error);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!got_packet)
|
||||
break;
|
||||
|
||||
WritePacket(pkt);
|
||||
WritePacket(pkt, *m_context);
|
||||
}
|
||||
}
|
||||
|
||||
void FrameDump::Stop()
|
||||
{
|
||||
if (!IsStarted())
|
||||
return;
|
||||
|
||||
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(57, 37, 100)
|
||||
// Signal end of stream to encoder.
|
||||
if (const int flush_error = avcodec_send_frame(m_context->codec, nullptr))
|
||||
WARN_LOG_FMT(VIDEO, "Error sending flush packet: {}", flush_error);
|
||||
#endif
|
||||
|
||||
HandleDelayedPackets();
|
||||
av_write_trailer(s_format_context);
|
||||
av_write_trailer(m_context->format);
|
||||
CloseVideoFile();
|
||||
s_file_index = 0;
|
||||
s_start_dumping = false;
|
||||
NOTICE_LOG(VIDEO, "Stopping frame dump");
|
||||
OSD::AddMessage("Stopped dumping frames");
|
||||
}
|
||||
|
||||
bool FrameDump::IsStarted() const
|
||||
{
|
||||
return m_context != nullptr;
|
||||
}
|
||||
|
||||
void FrameDump::CloseVideoFile()
|
||||
{
|
||||
av_frame_free(&s_src_frame);
|
||||
av_frame_free(&s_scaled_frame);
|
||||
av_frame_free(&m_context->src_frame);
|
||||
av_frame_free(&m_context->scaled_frame);
|
||||
|
||||
avcodec_free_context(&s_codec_context);
|
||||
avcodec_free_context(&m_context->codec);
|
||||
|
||||
if (s_format_context)
|
||||
{
|
||||
avio_closep(&s_format_context->pb);
|
||||
}
|
||||
avformat_free_context(s_format_context);
|
||||
s_format_context = nullptr;
|
||||
if (m_context->format)
|
||||
avio_closep(&m_context->format->pb);
|
||||
|
||||
if (s_sws_context)
|
||||
{
|
||||
sws_freeContext(s_sws_context);
|
||||
s_sws_context = nullptr;
|
||||
}
|
||||
avformat_free_context(m_context->format);
|
||||
|
||||
if (m_context->sws)
|
||||
sws_freeContext(m_context->sws);
|
||||
|
||||
m_context.reset();
|
||||
}
|
||||
|
||||
void FrameDump::DoState()
|
||||
void FrameDump::DoState(PointerWrap& p)
|
||||
{
|
||||
s_savestate_index++;
|
||||
if (p.GetMode() == PointerWrap::MODE_READ)
|
||||
++m_savestate_index;
|
||||
}
|
||||
|
||||
void FrameDump::CheckResolution(int width, int height)
|
||||
void FrameDump::CheckForConfigChange(const FrameData& frame)
|
||||
{
|
||||
bool restart_dump = false;
|
||||
|
||||
// We check here to see if the requested width and height have changed since the last frame which
|
||||
// was dumped, then create a new file accordingly. However, is it possible for the height
|
||||
// (possibly width as well, but no examples known) to have a value of zero. This can occur as the
|
||||
// VI is able to be set to a zero value for height/width to disable output. If this is the case,
|
||||
// simply keep the last known resolution of the video for the added frame.
|
||||
if ((width != s_width || height != s_height) && (width > 0 && height > 0))
|
||||
if ((frame.width != m_context->width || frame.height != m_context->height) &&
|
||||
(frame.width > 0 && frame.height > 0))
|
||||
{
|
||||
INFO_LOG(VIDEO, "Starting new dump on resolution change.");
|
||||
restart_dump = true;
|
||||
}
|
||||
else if (!IsFirstFrameInCurrentFile() &&
|
||||
frame.state.savestate_index != m_context->savestate_index)
|
||||
{
|
||||
INFO_LOG(VIDEO, "Starting new dump on savestate load.");
|
||||
restart_dump = true;
|
||||
}
|
||||
else if (frame.state.refresh_rate_den != m_context->codec->time_base.num ||
|
||||
frame.state.refresh_rate_num != m_context->codec->time_base.den)
|
||||
{
|
||||
INFO_LOG_FMT(VIDEO, "Starting new dump on refresh rate change {}/{} vs {}/{}.",
|
||||
m_context->codec->time_base.den, m_context->codec->time_base.num,
|
||||
frame.state.refresh_rate_num, frame.state.refresh_rate_den);
|
||||
restart_dump = true;
|
||||
}
|
||||
|
||||
if (restart_dump)
|
||||
{
|
||||
int temp_file_index = s_file_index;
|
||||
Stop();
|
||||
s_file_index = temp_file_index + 1;
|
||||
Start(width, height);
|
||||
++m_file_index;
|
||||
PrepareEncoding(frame.width, frame.height);
|
||||
}
|
||||
}
|
||||
|
||||
FrameDump::Frame FrameDump::FetchState(u64 ticks)
|
||||
FrameDump::FrameState FrameDump::FetchState(u64 ticks) const
|
||||
{
|
||||
Frame state;
|
||||
FrameState state;
|
||||
state.ticks = ticks;
|
||||
state.first_frame = Movie::GetCurrentFrame() <= 1;
|
||||
state.ticks_per_second = SystemTimers::GetTicksPerSecond();
|
||||
state.savestate_index = s_savestate_index;
|
||||
state.savestate_index = m_savestate_index;
|
||||
|
||||
const auto time_base = GetTimeBaseForCurrentRefreshRate();
|
||||
state.refresh_rate_num = time_base.den;
|
||||
state.refresh_rate_den = time_base.num;
|
||||
return state;
|
||||
}
|
||||
|
||||
FrameDump::FrameDump() = default;
|
||||
|
||||
FrameDump::~FrameDump()
|
||||
{
|
||||
Stop();
|
||||
}
|
||||
|
|
|
@ -4,32 +4,72 @@
|
|||
|
||||
#pragma once
|
||||
|
||||
#include <ctime>
|
||||
#include <memory>
|
||||
|
||||
#include "Common/CommonTypes.h"
|
||||
|
||||
struct FrameDumpContext;
|
||||
class PointerWrap;
|
||||
|
||||
class FrameDump
|
||||
{
|
||||
private:
|
||||
static bool CreateVideoFile();
|
||||
static void CloseVideoFile();
|
||||
static void CheckResolution(int width, int height);
|
||||
|
||||
public:
|
||||
struct Frame
|
||||
FrameDump();
|
||||
~FrameDump();
|
||||
|
||||
// Holds relevant emulation state during a rendered frame for
|
||||
// when it is later asynchronously written.
|
||||
struct FrameState
|
||||
{
|
||||
u64 ticks = 0;
|
||||
u32 ticks_per_second = 0;
|
||||
bool first_frame = false;
|
||||
int savestate_index = 0;
|
||||
u32 savestate_index = 0;
|
||||
int refresh_rate_num = 0;
|
||||
int refresh_rate_den = 0;
|
||||
};
|
||||
|
||||
static bool Start(int w, int h);
|
||||
static void AddFrame(const u8* data, int width, int height, int stride, const Frame& state);
|
||||
static void Stop();
|
||||
static void DoState();
|
||||
struct FrameData
|
||||
{
|
||||
const u8* data;
|
||||
int width;
|
||||
int height;
|
||||
int stride;
|
||||
FrameState state;
|
||||
};
|
||||
|
||||
bool Start(int w, int h);
|
||||
void AddFrame(const FrameData&);
|
||||
void Stop();
|
||||
void DoState(PointerWrap&);
|
||||
bool IsStarted() const;
|
||||
FrameState FetchState(u64 ticks) const;
|
||||
|
||||
private:
|
||||
bool IsFirstFrameInCurrentFile() const;
|
||||
bool PrepareEncoding(int w, int h);
|
||||
bool CreateVideoFile();
|
||||
void CloseVideoFile();
|
||||
void CheckForConfigChange(const FrameData&);
|
||||
void HandleDelayedPackets();
|
||||
|
||||
#if defined(HAVE_FFMPEG)
|
||||
static Frame FetchState(u64 ticks);
|
||||
#else
|
||||
static Frame FetchState(u64 ticks) { return {}; }
|
||||
std::unique_ptr<FrameDumpContext> m_context;
|
||||
#endif
|
||||
|
||||
// Used for FetchState:
|
||||
u32 m_savestate_index = 0;
|
||||
|
||||
// Used for filename generation.
|
||||
std::time_t m_start_time = {};
|
||||
u32 m_file_index = 0;
|
||||
};
|
||||
|
||||
#if !defined(HAVE_FFMPEG)
|
||||
inline FrameDump::FrameDump() = default;
|
||||
inline FrameDump::~FrameDump() = default;
|
||||
|
||||
inline FrameDump::FrameState FrameDump::FetchState(u64 ticks) const
|
||||
{
|
||||
return {};
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -1409,17 +1409,13 @@ void Renderer::DumpCurrentFrame(const AbstractTexture* src_texture,
|
|||
copy_rect = src_texture->GetRect();
|
||||
}
|
||||
|
||||
// Index 0 was just sent to FFMPEG dump. Swap with the second texture.
|
||||
if (m_frame_dump_readback_textures[0])
|
||||
std::swap(m_frame_dump_readback_textures[0], m_frame_dump_readback_textures[1]);
|
||||
|
||||
if (!CheckFrameDumpReadbackTexture(target_width, target_height))
|
||||
return;
|
||||
|
||||
m_frame_dump_readback_textures[0]->CopyFromTexture(src_texture, copy_rect, 0, 0,
|
||||
m_frame_dump_readback_textures[0]->GetRect());
|
||||
m_last_frame_state = FrameDump::FetchState(ticks);
|
||||
m_last_frame_exported = true;
|
||||
m_frame_dump_readback_texture->CopyFromTexture(src_texture, copy_rect, 0, 0,
|
||||
m_frame_dump_readback_texture->GetRect());
|
||||
m_last_frame_state = m_frame_dump.FetchState(ticks);
|
||||
m_frame_dump_needs_flush = true;
|
||||
}
|
||||
|
||||
bool Renderer::CheckFrameDumpRenderTexture(u32 target_width, u32 target_height)
|
||||
|
@ -1450,7 +1446,7 @@ bool Renderer::CheckFrameDumpRenderTexture(u32 target_width, u32 target_height)
|
|||
|
||||
bool Renderer::CheckFrameDumpReadbackTexture(u32 target_width, u32 target_height)
|
||||
{
|
||||
std::unique_ptr<AbstractStagingTexture>& rbtex = m_frame_dump_readback_textures[0];
|
||||
std::unique_ptr<AbstractStagingTexture>& rbtex = m_frame_dump_readback_texture;
|
||||
if (rbtex && rbtex->GetWidth() == target_width && rbtex->GetHeight() == target_height)
|
||||
return true;
|
||||
|
||||
|
@ -1466,24 +1462,28 @@ bool Renderer::CheckFrameDumpReadbackTexture(u32 target_width, u32 target_height
|
|||
|
||||
void Renderer::FlushFrameDump()
|
||||
{
|
||||
if (!m_last_frame_exported)
|
||||
if (!m_frame_dump_needs_flush)
|
||||
return;
|
||||
|
||||
// Ensure the previously-queued frame was encoded.
|
||||
// Ensure dumping thread is done with output texture before swapping.
|
||||
FinishFrameData();
|
||||
|
||||
std::swap(m_frame_dump_output_texture, m_frame_dump_readback_texture);
|
||||
|
||||
// Queue encoding of the last frame dumped.
|
||||
std::unique_ptr<AbstractStagingTexture>& rbtex = m_frame_dump_readback_textures[0];
|
||||
rbtex->Flush();
|
||||
if (rbtex->Map())
|
||||
auto& output = m_frame_dump_output_texture;
|
||||
output->Flush();
|
||||
if (output->Map())
|
||||
{
|
||||
DumpFrameData(reinterpret_cast<u8*>(rbtex->GetMappedPointer()), rbtex->GetConfig().width,
|
||||
rbtex->GetConfig().height, static_cast<int>(rbtex->GetMappedStride()),
|
||||
m_last_frame_state);
|
||||
rbtex->Unmap();
|
||||
DumpFrameData(reinterpret_cast<u8*>(output->GetMappedPointer()), output->GetConfig().width,
|
||||
output->GetConfig().height, static_cast<int>(output->GetMappedStride()));
|
||||
}
|
||||
else
|
||||
{
|
||||
ERROR_LOG(VIDEO, "Failed to map texture for dumping.");
|
||||
}
|
||||
|
||||
m_last_frame_exported = false;
|
||||
m_frame_dump_needs_flush = false;
|
||||
|
||||
// Shutdown frame dumping if it is no longer active.
|
||||
if (!IsFrameDumping())
|
||||
|
@ -1508,21 +1508,21 @@ void Renderer::ShutdownFrameDumping()
|
|||
m_frame_dump_thread.join();
|
||||
m_frame_dump_render_framebuffer.reset();
|
||||
m_frame_dump_render_texture.reset();
|
||||
for (auto& tex : m_frame_dump_readback_textures)
|
||||
tex.reset();
|
||||
|
||||
m_frame_dump_readback_texture.reset();
|
||||
m_frame_dump_output_texture.reset();
|
||||
}
|
||||
|
||||
void Renderer::DumpFrameData(const u8* data, int w, int h, int stride,
|
||||
const FrameDump::Frame& state)
|
||||
void Renderer::DumpFrameData(const u8* data, int w, int h, int stride)
|
||||
{
|
||||
m_frame_dump_config = FrameDumpConfig{data, w, h, stride, state};
|
||||
m_frame_dump_data = FrameDump::FrameData{data, w, h, stride, m_last_frame_state};
|
||||
|
||||
if (!m_frame_dump_thread_running.IsSet())
|
||||
{
|
||||
if (m_frame_dump_thread.joinable())
|
||||
m_frame_dump_thread.join();
|
||||
m_frame_dump_thread_running.Set();
|
||||
m_frame_dump_thread = std::thread(&Renderer::RunFrameDumps, this);
|
||||
m_frame_dump_thread = std::thread(&Renderer::FrameDumpThreadFunc, this);
|
||||
}
|
||||
|
||||
// Wake worker thread up.
|
||||
|
@ -1537,11 +1537,14 @@ void Renderer::FinishFrameData()
|
|||
|
||||
m_frame_dump_done.Wait();
|
||||
m_frame_dump_frame_running = false;
|
||||
|
||||
m_frame_dump_output_texture->Unmap();
|
||||
}
|
||||
|
||||
void Renderer::RunFrameDumps()
|
||||
void Renderer::FrameDumpThreadFunc()
|
||||
{
|
||||
Common::SetCurrentThreadName("FrameDumping");
|
||||
|
||||
bool dump_to_ffmpeg = !g_ActiveConfig.bDumpFramesAsImages;
|
||||
bool frame_dump_started = false;
|
||||
|
||||
|
@ -1561,14 +1564,14 @@ void Renderer::RunFrameDumps()
|
|||
if (!m_frame_dump_thread_running.IsSet())
|
||||
break;
|
||||
|
||||
auto config = m_frame_dump_config;
|
||||
auto frame = m_frame_dump_data;
|
||||
|
||||
// Save screenshot
|
||||
if (m_screenshot_request.TestAndClear())
|
||||
{
|
||||
std::lock_guard<std::mutex> lk(m_screenshot_lock);
|
||||
|
||||
if (TextureToPng(config.data, config.stride, m_screenshot_name, config.width, config.height,
|
||||
if (TextureToPng(frame.data, frame.stride, m_screenshot_name, frame.width, frame.height,
|
||||
false))
|
||||
OSD::AddMessage("Screenshot saved to " + m_screenshot_name);
|
||||
|
||||
|
@ -1582,9 +1585,9 @@ void Renderer::RunFrameDumps()
|
|||
if (!frame_dump_started)
|
||||
{
|
||||
if (dump_to_ffmpeg)
|
||||
frame_dump_started = StartFrameDumpToFFMPEG(config);
|
||||
frame_dump_started = StartFrameDumpToFFMPEG(frame);
|
||||
else
|
||||
frame_dump_started = StartFrameDumpToImage(config);
|
||||
frame_dump_started = StartFrameDumpToImage(frame);
|
||||
|
||||
// Stop frame dumping if we fail to start.
|
||||
if (!frame_dump_started)
|
||||
|
@ -1595,9 +1598,9 @@ void Renderer::RunFrameDumps()
|
|||
if (frame_dump_started)
|
||||
{
|
||||
if (dump_to_ffmpeg)
|
||||
DumpFrameToFFMPEG(config);
|
||||
DumpFrameToFFMPEG(frame);
|
||||
else
|
||||
DumpFrameToImage(config);
|
||||
DumpFrameToImage(frame);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1614,29 +1617,29 @@ void Renderer::RunFrameDumps()
|
|||
|
||||
#if defined(HAVE_FFMPEG)
|
||||
|
||||
bool Renderer::StartFrameDumpToFFMPEG(const FrameDumpConfig& config)
|
||||
bool Renderer::StartFrameDumpToFFMPEG(const FrameDump::FrameData& frame)
|
||||
{
|
||||
return FrameDump::Start(config.width, config.height);
|
||||
return m_frame_dump.Start(frame.width, frame.height);
|
||||
}
|
||||
|
||||
void Renderer::DumpFrameToFFMPEG(const FrameDumpConfig& config)
|
||||
void Renderer::DumpFrameToFFMPEG(const FrameDump::FrameData& frame)
|
||||
{
|
||||
FrameDump::AddFrame(config.data, config.width, config.height, config.stride, config.state);
|
||||
m_frame_dump.AddFrame(frame);
|
||||
}
|
||||
|
||||
void Renderer::StopFrameDumpToFFMPEG()
|
||||
{
|
||||
FrameDump::Stop();
|
||||
m_frame_dump.Stop();
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
bool Renderer::StartFrameDumpToFFMPEG(const FrameDumpConfig& config)
|
||||
bool Renderer::StartFrameDumpToFFMPEG(const FrameDump::FrameData&)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void Renderer::DumpFrameToFFMPEG(const FrameDumpConfig& config)
|
||||
void Renderer::DumpFrameToFFMPEG(const FrameDump::FrameData&)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -1652,7 +1655,7 @@ std::string Renderer::GetFrameDumpNextImageFileName() const
|
|||
m_frame_dump_image_counter);
|
||||
}
|
||||
|
||||
bool Renderer::StartFrameDumpToImage(const FrameDumpConfig& config)
|
||||
bool Renderer::StartFrameDumpToImage(const FrameDump::FrameData&)
|
||||
{
|
||||
m_frame_dump_image_counter = 1;
|
||||
if (!SConfig::GetInstance().m_DumpFramesSilent)
|
||||
|
@ -1671,10 +1674,10 @@ bool Renderer::StartFrameDumpToImage(const FrameDumpConfig& config)
|
|||
return true;
|
||||
}
|
||||
|
||||
void Renderer::DumpFrameToImage(const FrameDumpConfig& config)
|
||||
void Renderer::DumpFrameToImage(const FrameDump::FrameData& frame)
|
||||
{
|
||||
std::string filename = GetFrameDumpNextImageFileName();
|
||||
TextureToPng(config.data, config.stride, filename, config.width, config.height, false);
|
||||
TextureToPng(frame.data, frame.stride, filename, frame.width, frame.height, false);
|
||||
m_frame_dump_image_counter++;
|
||||
}
|
||||
|
||||
|
@ -1718,6 +1721,10 @@ void Renderer::DoState(PointerWrap& p)
|
|||
// And actually display it.
|
||||
Swap(m_last_xfb_addr, m_last_xfb_width, m_last_xfb_stride, m_last_xfb_height, m_last_xfb_ticks);
|
||||
}
|
||||
|
||||
#if defined(HAVE_FFMPEG)
|
||||
m_frame_dump.DoState(p);
|
||||
#endif
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCommon::AsyncShaderCompiler> Renderer::CreateAsyncShaderCompiler()
|
||||
|
|
|
@ -341,7 +341,6 @@ protected:
|
|||
u64 m_imgui_last_frame_time;
|
||||
|
||||
private:
|
||||
void RunFrameDumps();
|
||||
std::tuple<int, int> CalculateOutputDimensions(int width, int height) const;
|
||||
|
||||
PEControl::PixelFormat m_prev_efb_format = PEControl::INVALID_FMT;
|
||||
|
@ -351,28 +350,37 @@ private:
|
|||
int m_last_window_request_width = 0;
|
||||
int m_last_window_request_height = 0;
|
||||
|
||||
// frame dumping
|
||||
// frame dumping:
|
||||
FrameDump m_frame_dump;
|
||||
std::thread m_frame_dump_thread;
|
||||
Common::Event m_frame_dump_start;
|
||||
Common::Event m_frame_dump_done;
|
||||
Common::Flag m_frame_dump_thread_running;
|
||||
u32 m_frame_dump_image_counter = 0;
|
||||
bool m_frame_dump_frame_running = false;
|
||||
struct FrameDumpConfig
|
||||
{
|
||||
const u8* data;
|
||||
int width;
|
||||
int height;
|
||||
int stride;
|
||||
FrameDump::Frame state;
|
||||
} m_frame_dump_config;
|
||||
|
||||
// Used to kick frame dump thread.
|
||||
Common::Event m_frame_dump_start;
|
||||
|
||||
// Set by frame dump thread on frame completion.
|
||||
Common::Event m_frame_dump_done;
|
||||
|
||||
// Holds emulation state during the last swap when dumping.
|
||||
FrameDump::FrameState m_last_frame_state;
|
||||
|
||||
// Communication of frame between video and dump threads.
|
||||
FrameDump::FrameData m_frame_dump_data;
|
||||
|
||||
// Texture used for screenshot/frame dumping
|
||||
std::unique_ptr<AbstractTexture> m_frame_dump_render_texture;
|
||||
std::unique_ptr<AbstractFramebuffer> m_frame_dump_render_framebuffer;
|
||||
std::array<std::unique_ptr<AbstractStagingTexture>, 2> m_frame_dump_readback_textures;
|
||||
FrameDump::Frame m_last_frame_state;
|
||||
bool m_last_frame_exported = false;
|
||||
|
||||
// Double buffer:
|
||||
std::unique_ptr<AbstractStagingTexture> m_frame_dump_readback_texture;
|
||||
std::unique_ptr<AbstractStagingTexture> m_frame_dump_output_texture;
|
||||
// Set when readback texture holds a frame that needs to be dumped.
|
||||
bool m_frame_dump_needs_flush = false;
|
||||
// Set when thread is processing output texture.
|
||||
bool m_frame_dump_frame_running = false;
|
||||
|
||||
// Used to generate screenshot names.
|
||||
u32 m_frame_dump_image_counter = 0;
|
||||
|
||||
// Tracking of XFB textures so we don't render duplicate frames.
|
||||
u64 m_last_xfb_id = std::numeric_limits<u64>::max();
|
||||
|
@ -383,12 +391,14 @@ private:
|
|||
u32 m_last_xfb_height = 0;
|
||||
|
||||
// NOTE: The methods below are called on the framedumping thread.
|
||||
bool StartFrameDumpToFFMPEG(const FrameDumpConfig& config);
|
||||
void DumpFrameToFFMPEG(const FrameDumpConfig& config);
|
||||
void FrameDumpThreadFunc();
|
||||
bool StartFrameDumpToFFMPEG(const FrameDump::FrameData&);
|
||||
void DumpFrameToFFMPEG(const FrameDump::FrameData&);
|
||||
void StopFrameDumpToFFMPEG();
|
||||
std::string GetFrameDumpNextImageFileName() const;
|
||||
bool StartFrameDumpToImage(const FrameDumpConfig& config);
|
||||
void DumpFrameToImage(const FrameDumpConfig& config);
|
||||
bool StartFrameDumpToImage(const FrameDump::FrameData&);
|
||||
void DumpFrameToImage(const FrameDump::FrameData&);
|
||||
|
||||
void ShutdownFrameDumping();
|
||||
|
||||
bool IsFrameDumping() const;
|
||||
|
@ -404,7 +414,7 @@ private:
|
|||
const MathUtil::Rectangle<int>& src_rect, u64 ticks);
|
||||
|
||||
// Asynchronously encodes the specified pointer of frame data to the frame dump.
|
||||
void DumpFrameData(const u8* data, int w, int h, int stride, const FrameDump::Frame& state);
|
||||
void DumpFrameData(const u8* data, int w, int h, int stride);
|
||||
|
||||
// Ensures all rendered frames are queued for encoding.
|
||||
void FlushFrameDump();
|
||||
|
|
Loading…
Reference in New Issue