utils: implement video encoder and image sink

This commit is contained in:
Megamouse 2022-11-05 10:53:26 +01:00
parent 4f5076346d
commit 88269636d3
12 changed files with 894 additions and 0 deletions

View File

@ -41,6 +41,7 @@ target_include_directories(rpcs3_emu
target_sources(rpcs3_emu PRIVATE
../util/atomic.cpp
../util/media_utils.cpp
../util/video_provider.cpp
../util/logs.cpp
../util/yaml.cpp
../util/vm_native.cpp
@ -380,6 +381,7 @@ target_link_libraries(rpcs3_emu
# Io
target_sources(rpcs3_emu PRIVATE
Io/camera_config.cpp
Io/recording_config.cpp
Io/interception.cpp
Io/KeyboardHandler.cpp
Io/pad_config.cpp

View File

@ -0,0 +1,44 @@
#include "stdafx.h"
#include "recording_config.h"
#include <charconv>
LOG_CHANNEL(cfg_log, "CFG");
cfg_recording g_cfg_recording;
cfg_recording::cfg_recording()
: cfg::node()
#ifdef _WIN32
, path(fs::get_config_dir() + "config/recording.yml")
#else
, path(fs::get_config_dir() + "recording.yml")
#endif
{
}
bool cfg_recording::load()
{
cfg_log.notice("Loading recording config from '%s'", path);
if (fs::file cfg_file{path, fs::read})
{
return from_string(cfg_file.to_string());
}
cfg_log.notice("Recording config missing. Using default settings. Path: %s", path);
from_default();
save();
return false;
}
void cfg_recording::save() const
{
cfg_log.notice("Saving recording config to '%s'", path);
fs::pending_file cfg_file(path);
if (!cfg_file.file || (cfg_file.file.write(to_string()), !cfg_file.commit()))
{
cfg_log.error("Failed to save recording config to '%s'", path);
}
}

View File

@ -0,0 +1,23 @@
#pragma once
#include "Utilities/Config.h"
struct cfg_recording final : cfg::node
{
cfg_recording();
bool load();
void save() const;
cfg::uint<0, 60> framerate{this, "Framerate", 30};
cfg::uint<0, 7680> width{this, "Width", 1280};
cfg::uint<0, 4320> height{this, "Height", 720};
cfg::uint<0, 192> pixel_format{this, "AVPixelFormat", 0}; // AVPixelFormat::AV_PIX_FMT_YUV420P
cfg::uint<0, 32813> video_codec{this, "AVCodecID", 12}; // AVCodecID::AV_CODEC_ID_MPEG4
cfg::uint<0, 25000000> video_bps{this, "Video Bitrate", 4000000};
cfg::uint<0, 5> max_b_frames{this, "Max B-Frames", 2};
cfg::uint<0, 20> gop_size{this, "Group of Pictures Size", 12};
const std::string path;
};
extern cfg_recording g_cfg_recording;

View File

@ -133,6 +133,8 @@ enum class localized_string_id
CELL_CROSS_CONTROLLER_MSG,
CELL_CROSS_CONTROLLER_FW_MSG,
RECORDING_ABORTED,
RPCN_NO_ERROR,
RPCN_ERROR_INVALID_INPUT,
RPCN_ERROR_WOLFSSL,

View File

@ -67,6 +67,7 @@
<ClCompile Include="Emu\Cell\Modules\libfs_utility_init.cpp" />
<ClCompile Include="Emu\Cell\Modules\sys_crashdump.cpp" />
<ClCompile Include="Emu\Io\camera_config.cpp" />
<ClCompile Include="Emu\Io\recording_config.cpp" />
<ClCompile Include="Emu\Io\Turntable.cpp" />
<ClCompile Include="Emu\Io\GHLtar.cpp" />
<ClCompile Include="Emu\Io\Buzz.cpp" />
@ -118,6 +119,7 @@
<ClCompile Include="util\atomic.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
</ClCompile>
<ClCompile Include="util\video_provider.cpp" />
<ClCompile Include="util\media_utils.cpp" />
<ClCompile Include="util\yaml.cpp">
<PrecompiledHeader>NotUsing</PrecompiledHeader>
@ -488,6 +490,7 @@
<ClInclude Include="Emu\Io\music_handler_base.h" />
<ClInclude Include="Emu\Io\Null\null_camera_handler.h" />
<ClInclude Include="Emu\Io\Null\null_music_handler.h" />
<ClInclude Include="Emu\Io\recording_config.h" />
<ClInclude Include="Emu\Io\Turntable.h" />
<ClInclude Include="Emu\Io\GHLtar.h" />
<ClInclude Include="Emu\Io\Buzz.h" />
@ -558,6 +561,8 @@
<ClInclude Include="Loader\disc.h" />
<ClInclude Include="Loader\mself.hpp" />
<ClInclude Include="util\atomic.hpp" />
<ClInclude Include="util\image_sink.h" />
<ClInclude Include="util\video_provider.h" />
<ClInclude Include="util\media_utils.h" />
<ClInclude Include="util\serialization.hpp" />
<ClInclude Include="util\v128.hpp" />

View File

@ -1096,6 +1096,12 @@
<ClCompile Include="Emu\Cell\lv2\sys_game.cpp">
<Filter>Emu\Cell\lv2</Filter>
</ClCompile>
<ClCompile Include="util\video_provider.cpp">
<Filter>Utilities</Filter>
</ClCompile>
<ClCompile Include="Emu\Io\recording_config.cpp">
<Filter>Emu\Io</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Crypto\aes.h">
@ -2188,6 +2194,15 @@
<ClInclude Include="Emu\Cell\lv2\sys_game.h">
<Filter>Emu\Cell\lv2</Filter>
</ClInclude>
<ClInclude Include="util\video_provider.h">
<Filter>Utilities</Filter>
</ClInclude>
<ClInclude Include="util\image_sink.h">
<Filter>Utilities</Filter>
</ClInclude>
<ClInclude Include="Emu\Io\recording_config.h">
<Filter>Emu\Io</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<None Include="Emu\RSX\Program\GLSLSnippets\GPUDeswizzle.glsl">

View File

@ -154,6 +154,7 @@ private:
case localized_string_id::CELL_SAVEDATA_OVERWRITE: return tr("Do you want to overwrite the saved data?\n\n%0", "Savedata entry info").arg(std::forward<Args>(args)...);
case localized_string_id::CELL_CROSS_CONTROLLER_MSG: return tr("Start [%0] on the PS Vita system.\nIf you have not installed [%0], go to [Remote Play] on the PS Vita system and start [Cross-Controller] from the LiveArea™ screen.", "Cross-Controller message").arg(std::forward<Args>(args)...);
case localized_string_id::CELL_CROSS_CONTROLLER_FW_MSG: return tr("If your system software version on the PS Vita system is earlier than 1.80, you must update the system software to the latest version.", "Cross-Controller firmware message");
case localized_string_id::RECORDING_ABORTED: return tr("Recording aborted!");
case localized_string_id::RPCN_NO_ERROR: return tr("RPCN: No Error");
case localized_string_id::RPCN_ERROR_INVALID_INPUT: return tr("RPCN: Invalid Input (Wrong Host/Port)");
case localized_string_id::RPCN_ERROR_WOLFSSL: return tr("RPCN Connection Error: WolfSSL Error");

52
rpcs3/util/image_sink.h Normal file
View File

@ -0,0 +1,52 @@
#pragma once
#include "util/types.hpp"
#include "util/atomic.hpp"
#include "Utilities/mutex.h"
#include <deque>
#include <cmath>
namespace utils
{
class image_sink
{
public:
image_sink() = default;
virtual void stop(bool flush = true) = 0;
virtual void add_frame(std::vector<u8>& frame, const u32 width, const u32 height, s32 pixel_format, usz timestamp_ms) = 0;
s64 get_pts(usz timestamp_ms) const
{
return static_cast<s64>(std::round((timestamp_ms * m_framerate) / 1000.f));
}
usz get_timestamp_ms(s64 pts) const
{
return static_cast<usz>(std::round((pts * 1000) / static_cast<float>(m_framerate)));
}
atomic_t<bool> has_error{false};
struct encoder_frame
{
encoder_frame() = default;
encoder_frame(usz timestamp_ms, u32 width, u32 height, s32 av_pixel_format, std::vector<u8>&& data)
: timestamp_ms(timestamp_ms), width(width), height(height), av_pixel_format(av_pixel_format), data(std::move(data))
{}
usz timestamp_ms = 0;
u32 width = 0;
u32 height = 0;
s32 av_pixel_format = 0; // NOTE: Make sure this is a valid AVPixelFormat
std::vector<u8> data;
};
protected:
shared_mutex m_mtx;
std::deque<encoder_frame> m_frames_to_encode;
atomic_t<bool> m_flush = false;
u32 m_framerate = 0;
};
}

View File

@ -20,6 +20,8 @@ extern "C" {
#include "libavformat/avformat.h"
#include "libavutil/dict.h"
#include "libavutil/opt.h"
#include "libavutil/imgutils.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
}
constexpr int averror_eof = AVERROR_EOF; // workaround for old-style-cast error
@ -180,21 +182,33 @@ namespace utils
const AVCodec* codec = nullptr;
AVCodecContext* context = nullptr;
AVFrame* frame = nullptr;
AVStream* stream = nullptr;
SwrContext* swr = nullptr;
SwsContext* sws = nullptr;
std::function<void()> kill_callback = nullptr;
~scoped_av()
{
// Clean up
if (frame)
{
av_frame_unref(frame);
av_frame_free(&frame);
}
if (swr)
swr_free(&swr);
if (sws)
sws_freeContext(sws);
if (context)
avcodec_close(context);
// AVCodec is managed by libavformat, no need to free it
// see: https://stackoverflow.com/a/18047320
if (format)
avformat_free_context(format);
//if (stream)
// av_free(stream);
if (kill_callback)
kill_callback();
}
};
@ -493,4 +507,512 @@ namespace utils
{
return m_context.step_track(next);
}
video_encoder::video_encoder()
: utils::image_sink()
{
}
video_encoder::~video_encoder()
{
stop();
}
std::string video_encoder::path() const
{
return m_path;
}
s64 video_encoder::last_pts() const
{
return m_last_pts;
}
void video_encoder::set_path(const std::string& path)
{
m_path = path;
}
void video_encoder::set_framerate(u32 framerate)
{
m_framerate = framerate;
}
void video_encoder::set_video_bitrate(u32 bitrate)
{
m_video_bitrate_bps = bitrate;
}
void video_encoder::set_output_format(video_encoder::frame_format format)
{
m_out_format = std::move(format);
}
void video_encoder::set_video_codec(s32 codec_id)
{
m_video_codec_id = codec_id;
}
void video_encoder::set_max_b_frames(s32 max_b_frames)
{
m_max_b_frames = max_b_frames;
}
void video_encoder::set_gop_size(s32 gop_size)
{
m_gop_size = gop_size;
}
void video_encoder::set_sample_rate(u32 sample_rate)
{
m_sample_rate = sample_rate;
}
void video_encoder::set_audio_bitrate(u32 bitrate)
{
m_audio_bitrate_bps = bitrate;
}
void video_encoder::set_audio_codec(s32 codec_id)
{
m_audio_codec_id = codec_id;
}
void video_encoder::add_frame(std::vector<u8>& frame, const u32 width, const u32 height, s32 pixel_format, usz timestamp_ms)
{
// Do not allow new frames while flushing
if (m_flush)
return;
std::lock_guard lock(m_mtx);
m_frames_to_encode.emplace_back(timestamp_ms, width, height, pixel_format, std::move(frame));
}
void video_encoder::pause(bool flush)
{
if (m_thread)
{
m_paused = true;
m_flush = flush;
if (flush)
{
// Let's assume this will finish in a timely manner
while (m_flush && m_running)
{
std::this_thread::sleep_for(1us);
}
}
}
}
void video_encoder::stop(bool flush)
{
media_log.notice("video_encoder: Stopping video encoder. flush=%d", flush);
if (m_thread)
{
m_flush = flush;
if (flush)
{
// Let's assume this will finish in a timely manner
while (m_flush && m_running)
{
std::this_thread::sleep_for(1ms);
}
}
auto& thread = *m_thread;
thread = thread_state::aborting;
thread();
m_thread.reset();
}
std::lock_guard lock(m_mtx);
m_frames_to_encode.clear();
has_error = false;
m_flush = false;
m_paused = false;
m_running = false;
}
void video_encoder::encode()
{
if (m_running)
{
// Resume
m_flush = false;
m_paused = false;
media_log.success("video_encoder: resuming recording of '%s'", m_path);
return;
}
m_last_pts = 0;
stop();
if (const std::string dir = fs::get_parent_dir(m_path); !fs::is_dir(dir))
{
media_log.error("video_encoder: Could not find directory: '%s' for file '%s'", dir, m_path);
has_error = true;
return;
}
media_log.success("video_encoder: Starting recording of '%s'", m_path);
m_thread = std::make_unique<named_thread<std::function<void()>>>("Video Encode Thread", [this, path = m_path]()
{
m_running = true;
// TODO: audio encoding
// Reset variables at all costs
scoped_av av;
av.kill_callback = [this]()
{
m_flush = false;
m_running = false;
};
const AVPixelFormat out_format = static_cast<AVPixelFormat>(m_out_format.av_pixel_format);
const char* av_output_format = nullptr;
const auto find_format = [&](const AVCodec* codec) -> const char*
{
if (!codec)
return nullptr;
void* opaque = nullptr;
for (const AVOutputFormat* oformat = av_muxer_iterate(&opaque); !!oformat; oformat = av_muxer_iterate(&opaque))
{
if (avformat_query_codec(oformat, codec->id, FF_COMPLIANCE_STRICT) == 1)
{
return oformat->name;
}
}
return nullptr;
};
AVCodecID used_codec = static_cast<AVCodecID>(m_video_codec_id);
// Find specified codec first
if (AVCodec* encoder = avcodec_find_encoder(used_codec); !!encoder)
{
media_log.success("video_encoder: Found requested video_codec %d = %s", static_cast<int>(used_codec), encoder->name);
av_output_format = find_format(encoder);
if (av_output_format)
{
media_log.success("video_encoder: Found requested output format '%s'", av_output_format);
}
else
{
media_log.error("video_encoder: Could not find a format for the requested video_codec %d = %s", static_cast<int>(used_codec), encoder->name);
}
}
else
{
media_log.error("video_encoder: Could not find requested video_codec %d = %s", static_cast<int>(used_codec), encoder->name);
}
// Fallback to some other codec
if (!av_output_format)
{
void* opaque = nullptr;
for (const AVCodec* codec = av_codec_iterate(&opaque); !!codec; codec = av_codec_iterate(&opaque))
{
if (AVCodec* encoder = avcodec_find_encoder(codec->id); !!encoder)
{
media_log.notice("video_encoder: Found video_codec %d = %s", static_cast<int>(used_codec), encoder->name);
av_output_format = find_format(encoder);
if (av_output_format)
{
media_log.success("video_encoder: Found fallback output format '%s'", av_output_format);
break;
}
}
}
}
if (!av_output_format)
{
media_log.error("video_encoder: Could not find any output format");
has_error = true;
return;
}
if (int err = avformat_alloc_output_context2(&av.format, nullptr, av_output_format, path.c_str()); err < 0)
{
media_log.error("video_encoder: avformat_alloc_output_context2 failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
if (!av.format)
{
media_log.error("video_encoder: avformat_alloc_output_context2 failed");
has_error = true;
return;
}
if (!(av.codec = avcodec_find_encoder(av.format->oformat->video_codec)))
{
media_log.error("video_encoder: avcodec_find_encoder failed");
has_error = true;
return;
}
if (!(av.stream = avformat_new_stream(av.format, nullptr)))
{
media_log.error("video_encoder: avformat_new_stream failed");
has_error = true;
return;
}
av.stream->id = static_cast<int>(av.format->nb_streams - 1);
if (!(av.context = avcodec_alloc_context3(av.codec)))
{
media_log.error("video_encoder: avcodec_alloc_context3 failed");
has_error = true;
return;
}
media_log.notice("video_encoder: using video_codec = %d", static_cast<int>(av.format->oformat->video_codec));
media_log.notice("video_encoder: using video_bitrate = %d", m_video_bitrate_bps);
media_log.notice("video_encoder: using out width = %d", m_out_format.width);
media_log.notice("video_encoder: using out height = %d", m_out_format.height);
media_log.notice("video_encoder: using framerate = %d", m_framerate);
media_log.notice("video_encoder: using gop_size = %d", m_gop_size);
media_log.notice("video_encoder: using max_b_frames = %d", m_max_b_frames);
av.context->codec_id = av.format->oformat->video_codec;
av.context->bit_rate = m_video_bitrate_bps;
av.context->width = static_cast<int>(m_out_format.width);
av.context->height = static_cast<int>(m_out_format.height);
av.context->time_base = {.num = 1, .den = static_cast<int>(m_framerate)};
av.context->framerate = {.num = static_cast<int>(m_framerate), .den = 1};
av.context->pix_fmt = out_format;
av.context->gop_size = m_gop_size;
av.context->max_b_frames = m_max_b_frames;
if (av.format->oformat->flags & AVFMT_GLOBALHEADER)
{
av.context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
if (int err = avcodec_open2(av.context, av.codec, nullptr); err != 0)
{
media_log.error("video_encoder: avcodec_open2 failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
if (!(av.frame = av_frame_alloc()))
{
media_log.error("video_encoder: av_frame_alloc failed");
has_error = true;
return;
}
av.frame->format = av.context->pix_fmt;
av.frame->width = av.context->width;
av.frame->height = av.context->height;
if (int err = av_frame_get_buffer(av.frame, 32); err < 0)
{
media_log.error("video_encoder: av_frame_get_buffer failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
if (int err = avcodec_parameters_from_context(av.stream->codecpar, av.context); err < 0)
{
media_log.error("video_encoder: avcodec_parameters_from_context failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
av_dump_format(av.format, 0, path.c_str(), 1);
if (int err = avio_open(&av.format->pb, path.c_str(), AVIO_FLAG_WRITE); err != 0)
{
media_log.error("video_encoder: avio_open failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
if (int err = avformat_write_header(av.format, nullptr); err < 0)
{
media_log.error("video_encoder: avformat_write_header failed. Error: %d='%s'", err, av_error_to_string(err));
if (int err = avio_close(av.format->pb); err != 0)
{
media_log.error("video_encoder: avio_close failed. Error: %d='%s'", err, av_error_to_string(err));
}
has_error = true;
return;
}
const auto flush = [&]()
{
while ((thread_ctrl::state() != thread_state::aborting || m_flush) && !has_error)
{
AVPacket* packet = av_packet_alloc();
std::unique_ptr<AVPacket, decltype([](AVPacket* p){ if (p) av_packet_unref(p); })> packet_(packet);
if (!packet)
{
media_log.error("video_encoder: av_packet_alloc failed");
has_error = true;
return;
}
if (int err = avcodec_receive_packet(av.context, packet); err < 0)
{
if (err == AVERROR(EAGAIN) || err == averror_eof)
break;
media_log.error("video_encoder: avcodec_receive_packet failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
av_packet_rescale_ts(packet, av.context->time_base, av.stream->time_base);
packet->stream_index = av.stream->index;
if (int err = av_interleaved_write_frame(av.format, packet); err < 0)
{
media_log.error("video_encoder: av_interleaved_write_frame failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
return;
}
}
};
s64 last_pts = -1;
while ((thread_ctrl::state() != thread_state::aborting || m_flush) && !has_error)
{
encoder_frame frame_data;
{
m_mtx.lock();
if (m_frames_to_encode.empty())
{
m_mtx.unlock();
if (m_flush)
{
m_flush = false;
if (!m_paused)
{
// We only stop the thread after a flush if we are not paused
break;
}
}
// We only actually pause after we process all frames
const u64 sleeptime = m_paused ? 10000 : 1;
thread_ctrl::wait_for(sleeptime);
continue;
}
frame_data = std::move(m_frames_to_encode.front());
m_frames_to_encode.pop_front();
m_mtx.unlock();
media_log.trace("video_encoder: adding new frame. timestamp=%d", frame_data.timestamp_ms);
}
// Calculate presentation timestamp.
const s64 pts = get_pts(frame_data.timestamp_ms);
// We need to skip this frame if it has the same timestamp.
if (pts <= last_pts)
{
media_log.notice("video_encoder: skipping frame. last_pts=%d, pts=%d", last_pts, pts);
continue;
}
if (int err = av_frame_make_writable(av.frame); err < 0)
{
media_log.error("video_encoder: av_frame_make_writable failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
break;
}
u8* in_data[4]{};
int in_line[4]{};
const AVPixelFormat in_format = static_cast<AVPixelFormat>(frame_data.av_pixel_format);
if (int ret = av_image_fill_linesizes(in_line, in_format, frame_data.width); ret < 0)
{
fmt::throw_exception("video_encoder: av_image_fill_linesizes failed (ret=0x%x): %s", ret, utils::av_error_to_string(ret));
}
if (int ret = av_image_fill_pointers(in_data, in_format, frame_data.height, frame_data.data.data(), in_line); ret < 0)
{
fmt::throw_exception("video_encoder: av_image_fill_pointers failed (ret=0x%x): %s", ret, utils::av_error_to_string(ret));
}
// Update the context in case the frame format has changed
av.sws = sws_getCachedContext(av.sws, frame_data.width, frame_data.height, in_format,
av.context->width, av.context->height, out_format, SWS_BICUBIC, nullptr, nullptr, nullptr);
if (!av.sws)
{
media_log.error("video_encoder: sws_getCachedContext failed");
has_error = true;
break;
}
if (int err = sws_scale(av.sws, in_data, in_line, 0, frame_data.height, av.frame->data, av.frame->linesize); err < 0)
{
media_log.error("video_encoder: sws_scale failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
break;
}
av.frame->pts = pts;
if (int err = avcodec_send_frame(av.context, av.frame); err < 0)
{
media_log.error("video_encoder: avcodec_send_frame failed. Error: %d='%s'", err, av_error_to_string(err));
has_error = true;
break;
}
flush();
last_pts = av.frame->pts;
m_last_pts = last_pts;
}
if (int err = avcodec_send_frame(av.context, nullptr); err != 0)
{
media_log.error("video_encoder: final avcodec_send_frame failed. Error: %d='%s'", err, av_error_to_string(err));
}
flush();
if (int err = av_write_trailer(av.format); err != 0)
{
media_log.error("video_encoder: av_write_trailer failed. Error: %d='%s'", err, av_error_to_string(err));
}
if (int err = avio_close(av.format->pb); err != 0)
{
media_log.error("video_encoder: avio_close failed. Error: %d='%s'", err, av_error_to_string(err));
}
});
}
}

View File

@ -7,6 +7,7 @@
#include <thread>
#include "Utilities/StrUtil.h"
#include "Utilities/Thread.h"
#include "util/video_provider.h"
#include "Emu/Cell/Modules/cellMusic.h"
namespace utils
@ -76,4 +77,58 @@ namespace utils
music_selection_context m_context{};
std::unique_ptr<named_thread<std::function<void()>>> m_thread;
};
class video_encoder : public utils::image_sink
{
public:
video_encoder();
~video_encoder();
struct frame_format
{
s32 av_pixel_format = 0; // NOTE: Make sure this is a valid AVPixelFormat
u32 width = 0;
u32 height = 0;
u32 pitch = 0;
};
std::string path() const;
s64 last_pts() const;
void set_path(const std::string& path);
void set_framerate(u32 framerate);
void set_video_bitrate(u32 bitrate);
void set_output_format(frame_format format);
void set_video_codec(s32 codec_id);
void set_max_b_frames(s32 max_b_frames);
void set_gop_size(s32 gop_size);
void set_sample_rate(u32 sample_rate);
void set_audio_bitrate(u32 bitrate);
void set_audio_codec(s32 codec_id);
void add_frame(std::vector<u8>& frame, const u32 width, const u32 height, s32 pixel_format, usz timestamp_ms) override;
void pause(bool flush = true);
void stop(bool flush = true) override;
void encode();
private:
std::string m_path;
s64 m_last_pts = 0;
// Thread control
std::unique_ptr<named_thread<std::function<void()>>> m_thread;
atomic_t<bool> m_running = false;
atomic_t<bool> m_paused = false;
// Video parameters
u32 m_video_bitrate_bps = 0;
s32 m_video_codec_id = 12; // AV_CODEC_ID_MPEG4;
s32 m_max_b_frames = 2;
s32 m_gop_size = 12;
frame_format m_out_format{};
// Audio parameters
u32 m_sample_rate = 48000;
u32 m_audio_bitrate_bps = 96000;
s32 m_audio_codec_id = 86018; // AV_CODEC_ID_AAC
};
}

View File

@ -0,0 +1,137 @@
#include "stdafx.h"
#include "video_provider.h"
#include "Emu/RSX/Overlays/overlay_message.h"
extern "C"
{
#include <libavutil/pixfmt.h>
}
LOG_CHANNEL(media_log, "Media");
atomic_t<recording_mode> g_recording_mode = recording_mode::stopped;
template <>
void fmt_class_string<recording_mode>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](recording_mode value)
{
switch (value)
{
case recording_mode::stopped: return "stopped";
case recording_mode::rpcs3: return "rpcs3";
case recording_mode::cell: return "cell";
}
return unknown;
});
}
namespace utils
{
video_provider::~video_provider()
{
g_recording_mode = recording_mode::stopped;
}
bool video_provider::set_image_sink(std::shared_ptr<image_sink> sink, recording_mode type)
{
media_log.notice("video_provider: setting new image sink. sink=%d, type=%s", !!sink, type);
if (type == recording_mode::stopped)
{
// Prevent misuse. type is supposed to be a valid state.
media_log.error("video_provider: cannot set image sink with type %s", type);
return false;
}
std::lock_guard lock(m_mutex);
if (m_image_sink)
{
// cell has preference
if (m_type == recording_mode::cell && m_type != type)
{
media_log.warning("video_provider: cannot set image sink with type %s if type %s is active", type, m_type);
return false;
}
if (m_type != type || m_image_sink != sink)
{
media_log.warning("video_provider: stopping current image sink of type %s", m_type);
m_image_sink->stop();
}
}
m_type = sink ? type : recording_mode::stopped;
m_image_sink = sink;
if (m_type == recording_mode::stopped)
{
m_active = false;
}
return true;
}
void video_provider::set_pause_time(usz pause_time_ms)
{
std::lock_guard lock(m_mutex);
m_pause_time_ms = pause_time_ms;
}
bool video_provider::can_consume_frame()
{
std::lock_guard lock(m_mutex);
if (!m_image_sink)
return false;
const usz timestamp_ms = std::chrono::duration_cast<std::chrono::milliseconds>(steady_clock::now() - m_encoder_start).count() - m_pause_time_ms;
const s64 pts = m_image_sink->get_pts(timestamp_ms);
return pts > m_last_pts_incoming;
}
void video_provider::present_frame(std::vector<u8>& data, const u32 width, const u32 height, bool is_bgra)
{
std::lock_guard lock(m_mutex);
if (!m_image_sink || m_image_sink->has_error)
{
g_recording_mode = recording_mode::stopped;
rsx::overlays::queue_message(localized_string_id::RECORDING_ABORTED);
}
if (g_recording_mode == recording_mode::stopped)
{
m_active = false;
return;
}
if (!m_active.exchange(true))
{
m_current_encoder_frame = 0;
m_last_pts_incoming = -1;
}
if (m_current_encoder_frame == 0)
{
m_encoder_start = steady_clock::now();
}
// Calculate presentation timestamp.
const usz timestamp_ms = std::chrono::duration_cast<std::chrono::milliseconds>(steady_clock::now() - m_encoder_start).count() - m_pause_time_ms;
const s64 pts = m_image_sink->get_pts(timestamp_ms);
// We can just skip this frame if it has the same timestamp.
if (pts <= m_last_pts_incoming)
{
return;
}
m_last_pts_incoming = pts;
m_current_encoder_frame++;
m_image_sink->add_frame(data, width, height, is_bgra ? AVPixelFormat::AV_PIX_FMT_BGRA : AVPixelFormat::AV_PIX_FMT_RGBA, timestamp_ms);
}
}

View File

@ -0,0 +1,36 @@
#pragma once
#include "image_sink.h"
enum class recording_mode
{
stopped = 0,
rpcs3,
cell
};
namespace utils
{
class video_provider
{
public:
video_provider() = default;
~video_provider();
bool set_image_sink(std::shared_ptr<image_sink> sink, recording_mode type);
void set_pause_time(usz pause_time_ms);
bool can_consume_frame();
void present_frame(std::vector<u8>& data, const u32 width, const u32 height, bool is_bgra);
private:
recording_mode m_type = recording_mode::stopped;
std::shared_ptr<image_sink> m_image_sink;
shared_mutex m_mutex{};
atomic_t<bool> m_active{false};
atomic_t<usz> m_current_encoder_frame{0};
steady_clock::time_point m_encoder_start{};
s64 m_last_pts_incoming = -1;
usz m_pause_time_ms = 0;
};
} // namespace utils