forked from ShuriZma/suyu
1
0
Fork 0

DSP/HLE: Implement Source processing

This commit is contained in:
MerryMage 2016-04-25 08:54:57 +01:00
parent c1f0044a4b
commit b242bdf945
7 changed files with 496 additions and 5 deletions

View File

@ -4,6 +4,7 @@ set(SRCS
hle/dsp.cpp hle/dsp.cpp
hle/filter.cpp hle/filter.cpp
hle/pipe.cpp hle/pipe.cpp
hle/source.cpp
interpolate.cpp interpolate.cpp
sink_details.cpp sink_details.cpp
) )
@ -15,6 +16,7 @@ set(HEADERS
hle/dsp.h hle/dsp.h
hle/filter.h hle/filter.h
hle/pipe.h hle/pipe.h
hle/source.h
interpolate.h interpolate.h
null_sink.h null_sink.h
sink.h sink.h

View File

@ -27,7 +27,7 @@ using QuadFrame32 = std::array<std::array<s32, 4>, samples_per_frame>;
*/ */
template<typename FrameT, typename FilterT> template<typename FrameT, typename FilterT>
void FilterFrame(FrameT& frame, FilterT& filter) { void FilterFrame(FrameT& frame, FilterT& filter) {
std::transform(frame.begin(), frame.end(), frame.begin(), [&filter](const typename FrameT::value_type& sample) { std::transform(frame.begin(), frame.end(), frame.begin(), [&filter](const auto& sample) {
return filter.ProcessSample(sample); return filter.ProcessSample(sample);
}); });
} }

View File

@ -2,10 +2,12 @@
// Licensed under GPLv2 or any later version // Licensed under GPLv2 or any later version
// Refer to the license.txt file included. // Refer to the license.txt file included.
#include <array>
#include <memory> #include <memory>
#include "audio_core/hle/dsp.h" #include "audio_core/hle/dsp.h"
#include "audio_core/hle/pipe.h" #include "audio_core/hle/pipe.h"
#include "audio_core/hle/source.h"
#include "audio_core/sink.h" #include "audio_core/sink.h"
namespace DSP { namespace DSP {
@ -38,16 +40,38 @@ static SharedMemory& WriteRegion() {
return g_regions[1 - CurrentRegionIndex()]; return g_regions[1 - CurrentRegionIndex()];
} }
static std::array<Source, num_sources> sources = {
Source(0), Source(1), Source(2), Source(3), Source(4), Source(5),
Source(6), Source(7), Source(8), Source(9), Source(10), Source(11),
Source(12), Source(13), Source(14), Source(15), Source(16), Source(17),
Source(18), Source(19), Source(20), Source(21), Source(22), Source(23)
};
static std::unique_ptr<AudioCore::Sink> sink; static std::unique_ptr<AudioCore::Sink> sink;
void Init() { void Init() {
DSP::HLE::ResetPipes(); DSP::HLE::ResetPipes();
for (auto& source : sources) {
source.Reset();
}
} }
void Shutdown() { void Shutdown() {
} }
bool Tick() { bool Tick() {
SharedMemory& read = ReadRegion();
SharedMemory& write = WriteRegion();
std::array<QuadFrame32, 3> intermediate_mixes = {};
for (size_t i = 0; i < num_sources; i++) {
write.source_statuses.status[i] = sources[i].Tick(read.source_configurations.config[i], read.adpcm_coefficients.coeff[i]);
for (size_t mix = 0; mix < 3; mix++) {
sources[i].MixInto(intermediate_mixes[mix], mix);
}
}
return true; return true;
} }

View File

@ -169,9 +169,9 @@ struct SourceConfiguration {
float_le rate_multiplier; float_le rate_multiplier;
enum class InterpolationMode : u8 { enum class InterpolationMode : u8 {
None = 0, Polyphase = 0,
Linear = 1, Linear = 1,
Polyphase = 2 None = 2
}; };
InterpolationMode interpolation_mode; InterpolationMode interpolation_mode;
@ -318,10 +318,10 @@ ASSERT_DSP_STRUCT(SourceConfiguration::Configuration::Buffer, 20);
struct SourceStatus { struct SourceStatus {
struct Status { struct Status {
u8 is_enabled; ///< Is this channel enabled? (Doesn't have to be playing anything.) u8 is_enabled; ///< Is this channel enabled? (Doesn't have to be playing anything.)
u8 previous_buffer_id_dirty; ///< Non-zero when previous_buffer_id changes u8 current_buffer_id_dirty; ///< Non-zero when current_buffer_id changes
u16_le sync; ///< Is set by the DSP to the value of SourceConfiguration::sync u16_le sync; ///< Is set by the DSP to the value of SourceConfiguration::sync
u32_dsp buffer_position; ///< Number of samples into the current buffer u32_dsp buffer_position; ///< Number of samples into the current buffer
u16_le previous_buffer_id; ///< Updated when a buffer finishes playing u16_le current_buffer_id; ///< Updated when a buffer finishes playing
INSERT_PADDING_DSPWORDS(1); INSERT_PADDING_DSPWORDS(1);
}; };

View File

@ -16,6 +16,7 @@ namespace HLE {
/// Preprocessing filters. There is an independent set of filters for each Source. /// Preprocessing filters. There is an independent set of filters for each Source.
class SourceFilters final { class SourceFilters final {
public:
SourceFilters() { Reset(); } SourceFilters() { Reset(); }
/// Reset internal state. /// Reset internal state.

View File

@ -0,0 +1,320 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <array>
#include "audio_core/codec.h"
#include "audio_core/hle/common.h"
#include "audio_core/hle/source.h"
#include "audio_core/interpolate.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "core/memory.h"
namespace DSP {
namespace HLE {
SourceStatus::Status Source::Tick(SourceConfiguration::Configuration& config, const s16_le (&adpcm_coeffs)[16]) {
ParseConfig(config, adpcm_coeffs);
if (state.enabled) {
GenerateFrame();
}
return GetCurrentStatus();
}
void Source::MixInto(QuadFrame32& dest, size_t intermediate_mix_id) const {
if (!state.enabled)
return;
const std::array<float, 4>& gains = state.gain.at(intermediate_mix_id);
for (size_t samplei = 0; samplei < samples_per_frame; samplei++) {
// Conversion from stereo (current_frame) to quadraphonic (dest) occurs here.
dest[samplei][0] += static_cast<s32>(gains[0] * current_frame[samplei][0]);
dest[samplei][1] += static_cast<s32>(gains[1] * current_frame[samplei][1]);
dest[samplei][2] += static_cast<s32>(gains[2] * current_frame[samplei][0]);
dest[samplei][3] += static_cast<s32>(gains[3] * current_frame[samplei][1]);
}
}
void Source::Reset() {
current_frame.fill({});
state = {};
}
void Source::ParseConfig(SourceConfiguration::Configuration& config, const s16_le (&adpcm_coeffs)[16]) {
if (!config.dirty_raw) {
return;
}
if (config.reset_flag) {
config.reset_flag.Assign(0);
Reset();
LOG_TRACE(Audio_DSP, "source_id=%zu reset", source_id);
}
if (config.partial_reset_flag) {
config.partial_reset_flag.Assign(0);
state.input_queue = std::priority_queue<Buffer, std::vector<Buffer>, BufferOrder>{};
LOG_TRACE(Audio_DSP, "source_id=%zu partial_reset", source_id);
}
if (config.enable_dirty) {
config.enable_dirty.Assign(0);
state.enabled = config.enable != 0;
LOG_TRACE(Audio_DSP, "source_id=%zu enable=%d", source_id, state.enabled);
}
if (config.sync_dirty) {
config.sync_dirty.Assign(0);
state.sync = config.sync;
LOG_TRACE(Audio_DSP, "source_id=%zu sync=%u", source_id, state.sync);
}
if (config.rate_multiplier_dirty) {
config.rate_multiplier_dirty.Assign(0);
state.rate_multiplier = config.rate_multiplier;
LOG_TRACE(Audio_DSP, "source_id=%zu rate=%f", source_id, state.rate_multiplier);
if (state.rate_multiplier <= 0) {
LOG_ERROR(Audio_DSP, "Was given an invalid rate multiplier: source_id=%zu rate=%f", source_id, state.rate_multiplier);
state.rate_multiplier = 1.0f;
// Note: Actual firmware starts producing garbage if this occurs.
}
}
if (config.adpcm_coefficients_dirty) {
config.adpcm_coefficients_dirty.Assign(0);
std::transform(adpcm_coeffs, adpcm_coeffs + state.adpcm_coeffs.size(), state.adpcm_coeffs.begin(),
[](const auto& coeff) { return static_cast<s16>(coeff); });
LOG_TRACE(Audio_DSP, "source_id=%zu adpcm update", source_id);
}
if (config.gain_0_dirty) {
config.gain_0_dirty.Assign(0);
std::transform(config.gain[0], config.gain[0] + state.gain[0].size(), state.gain[0].begin(),
[](const auto& coeff) { return static_cast<float>(coeff); });
LOG_TRACE(Audio_DSP, "source_id=%zu gain 0 update", source_id);
}
if (config.gain_1_dirty) {
config.gain_1_dirty.Assign(0);
std::transform(config.gain[1], config.gain[1] + state.gain[1].size(), state.gain[1].begin(),
[](const auto& coeff) { return static_cast<float>(coeff); });
LOG_TRACE(Audio_DSP, "source_id=%zu gain 1 update", source_id);
}
if (config.gain_2_dirty) {
config.gain_2_dirty.Assign(0);
std::transform(config.gain[2], config.gain[2] + state.gain[2].size(), state.gain[2].begin(),
[](const auto& coeff) { return static_cast<float>(coeff); });
LOG_TRACE(Audio_DSP, "source_id=%zu gain 2 update", source_id);
}
if (config.filters_enabled_dirty) {
config.filters_enabled_dirty.Assign(0);
state.filters.Enable(config.simple_filter_enabled.ToBool(), config.biquad_filter_enabled.ToBool());
LOG_TRACE(Audio_DSP, "source_id=%zu enable_simple=%hu enable_biquad=%hu",
source_id, config.simple_filter_enabled.Value(), config.biquad_filter_enabled.Value());
}
if (config.simple_filter_dirty) {
config.simple_filter_dirty.Assign(0);
state.filters.Configure(config.simple_filter);
LOG_TRACE(Audio_DSP, "source_id=%zu simple filter update");
}
if (config.biquad_filter_dirty) {
config.biquad_filter_dirty.Assign(0);
state.filters.Configure(config.biquad_filter);
LOG_TRACE(Audio_DSP, "source_id=%zu biquad filter update");
}
if (config.interpolation_dirty) {
config.interpolation_dirty.Assign(0);
state.interpolation_mode = config.interpolation_mode;
LOG_TRACE(Audio_DSP, "source_id=%zu interpolation_mode=%zu", source_id, static_cast<size_t>(state.interpolation_mode));
}
if (config.format_dirty || config.embedded_buffer_dirty) {
config.format_dirty.Assign(0);
state.format = config.format;
LOG_TRACE(Audio_DSP, "source_id=%zu format=%zu", source_id, static_cast<size_t>(state.format));
}
if (config.mono_or_stereo_dirty || config.embedded_buffer_dirty) {
config.mono_or_stereo_dirty.Assign(0);
state.mono_or_stereo = config.mono_or_stereo;
LOG_TRACE(Audio_DSP, "source_id=%zu mono_or_stereo=%zu", source_id, static_cast<size_t>(state.mono_or_stereo));
}
if (config.embedded_buffer_dirty) {
config.embedded_buffer_dirty.Assign(0);
state.input_queue.emplace(Buffer{
config.physical_address,
config.length,
static_cast<u8>(config.adpcm_ps),
{ config.adpcm_yn[0], config.adpcm_yn[1] },
config.adpcm_dirty.ToBool(),
config.is_looping.ToBool(),
config.buffer_id,
state.mono_or_stereo,
state.format,
false
});
LOG_TRACE(Audio_DSP, "enqueuing embedded addr=0x%08x len=%u id=%hu", config.physical_address, config.length, config.buffer_id);
}
if (config.buffer_queue_dirty) {
config.buffer_queue_dirty.Assign(0);
for (size_t i = 0; i < 4; i++) {
if (config.buffers_dirty & (1 << i)) {
const auto& b = config.buffers[i];
state.input_queue.emplace(Buffer{
b.physical_address,
b.length,
static_cast<u8>(b.adpcm_ps),
{ b.adpcm_yn[0], b.adpcm_yn[1] },
b.adpcm_dirty != 0,
b.is_looping != 0,
b.buffer_id,
state.mono_or_stereo,
state.format,
true
});
LOG_TRACE(Audio_DSP, "enqueuing queued %zu addr=0x%08x len=%u id=%hu", i, b.physical_address, b.length, b.buffer_id);
}
}
config.buffers_dirty = 0;
}
if (config.dirty_raw) {
LOG_DEBUG(Audio_DSP, "source_id=%zu remaining_dirty=%x", source_id, config.dirty_raw);
}
config.dirty_raw = 0;
}
void Source::GenerateFrame() {
current_frame.fill({});
if (state.current_buffer.empty() && !DequeueBuffer()) {
state.enabled = false;
state.buffer_update = true;
state.current_buffer_id = 0;
return;
}
size_t frame_position = 0;
state.current_sample_number = state.next_sample_number;
while (frame_position < current_frame.size()) {
if (state.current_buffer.empty() && !DequeueBuffer()) {
break;
}
const size_t size_to_copy = std::min(state.current_buffer.size(), current_frame.size() - frame_position);
std::copy(state.current_buffer.begin(), state.current_buffer.begin() + size_to_copy, current_frame.begin() + frame_position);
state.current_buffer.erase(state.current_buffer.begin(), state.current_buffer.begin() + size_to_copy);
frame_position += size_to_copy;
state.next_sample_number += static_cast<u32>(size_to_copy);
}
state.filters.ProcessFrame(current_frame);
}
bool Source::DequeueBuffer() {
ASSERT_MSG(state.current_buffer.empty(), "Shouldn't dequeue; we still have data in current_buffer");
if (state.input_queue.empty())
return false;
const Buffer buf = state.input_queue.top();
state.input_queue.pop();
if (buf.adpcm_dirty) {
state.adpcm_state.yn1 = buf.adpcm_yn[0];
state.adpcm_state.yn2 = buf.adpcm_yn[1];
}
if (buf.is_looping) {
LOG_ERROR(Audio_DSP, "Looped buffers are unimplemented at the moment");
}
const u8* const memory = Memory::GetPhysicalPointer(buf.physical_address);
if (memory) {
const unsigned num_channels = buf.mono_or_stereo == MonoOrStereo::Stereo ? 2 : 1;
switch (buf.format) {
case Format::PCM8:
state.current_buffer = Codec::DecodePCM8(num_channels, memory, buf.length);
break;
case Format::PCM16:
state.current_buffer = Codec::DecodePCM16(num_channels, memory, buf.length);
break;
case Format::ADPCM:
DEBUG_ASSERT(num_channels == 1);
state.current_buffer = Codec::DecodeADPCM(memory, buf.length, state.adpcm_coeffs, state.adpcm_state);
break;
default:
UNIMPLEMENTED();
break;
}
} else {
LOG_WARNING(Audio_DSP, "source_id=%zu buffer_id=%hu length=%u: Invalid physical address 0x%08X",
source_id, buf.buffer_id, buf.length, buf.physical_address);
state.current_buffer.clear();
return true;
}
switch (state.interpolation_mode) {
case InterpolationMode::None:
state.current_buffer = AudioInterp::None(state.interp_state, state.current_buffer, state.rate_multiplier);
break;
case InterpolationMode::Linear:
state.current_buffer = AudioInterp::Linear(state.interp_state, state.current_buffer, state.rate_multiplier);
break;
case InterpolationMode::Polyphase:
// TODO(merry): Implement polyphase interpolation
state.current_buffer = AudioInterp::Linear(state.interp_state, state.current_buffer, state.rate_multiplier);
break;
default:
UNIMPLEMENTED();
break;
}
state.current_sample_number = 0;
state.next_sample_number = 0;
state.current_buffer_id = buf.buffer_id;
state.buffer_update = buf.from_queue;
LOG_TRACE(Audio_DSP, "source_id=%zu buffer_id=%hu from_queue=%s current_buffer.size()=%zu",
source_id, buf.buffer_id, buf.from_queue ? "true" : "false", state.current_buffer.size());
return true;
}
SourceStatus::Status Source::GetCurrentStatus() {
SourceStatus::Status ret;
// Applications depend on the correct emulation of
// current_buffer_id_dirty and current_buffer_id to synchronise
// audio with video.
ret.is_enabled = state.enabled;
ret.current_buffer_id_dirty = state.buffer_update ? 1 : 0;
state.buffer_update = false;
ret.current_buffer_id = state.current_buffer_id;
ret.buffer_position = state.current_sample_number;
ret.sync = state.sync;
return ret;
}
} // namespace HLE
} // namespace DSP

144
src/audio_core/hle/source.h Normal file
View File

@ -0,0 +1,144 @@
// Copyright 2016 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <queue>
#include <vector>
#include "audio_core/codec.h"
#include "audio_core/hle/common.h"
#include "audio_core/hle/dsp.h"
#include "audio_core/hle/filter.h"
#include "audio_core/interpolate.h"
#include "common/common_types.h"
namespace DSP {
namespace HLE {
/**
* This module performs:
* - Buffer management
* - Decoding of buffers
* - Buffer resampling and interpolation
* - Per-source filtering (SimpleFilter, BiquadFilter)
* - Per-source gain
* - Other per-source processing
*/
class Source final {
public:
explicit Source(size_t source_id_) : source_id(source_id_) {
Reset();
}
/// Resets internal state.
void Reset();
/**
* This is called once every audio frame. This performs per-source processing every frame.
* @param config The new configuration we've got for this Source from the application.
* @param adpcm_coeffs ADPCM coefficients to use if config tells us to use them (may contain invalid values otherwise).
* @return The current status of this Source. This is given back to the emulated application via SharedMemory.
*/
SourceStatus::Status Tick(SourceConfiguration::Configuration& config, const s16_le (&adpcm_coeffs)[16]);
/**
* Mix this source's output into dest, using the gains for the `intermediate_mix_id`-th intermediate mixer.
* @param dest The QuadFrame32 to mix into.
* @param intermediate_mix_id The id of the intermediate mix whose gains we are using.
*/
void MixInto(QuadFrame32& dest, size_t intermediate_mix_id) const;
private:
const size_t source_id;
StereoFrame16 current_frame;
using Format = SourceConfiguration::Configuration::Format;
using InterpolationMode = SourceConfiguration::Configuration::InterpolationMode;
using MonoOrStereo = SourceConfiguration::Configuration::MonoOrStereo;
/// Internal representation of a buffer for our buffer queue
struct Buffer {
PAddr physical_address;
u32 length;
u8 adpcm_ps;
std::array<u16, 2> adpcm_yn;
bool adpcm_dirty;
bool is_looping;
u16 buffer_id;
MonoOrStereo mono_or_stereo;
Format format;
bool from_queue;
};
struct BufferOrder {
bool operator() (const Buffer& a, const Buffer& b) const {
// Lower buffer_id comes first.
return a.buffer_id > b.buffer_id;
}
};
struct {
// State variables
bool enabled = false;
u16 sync = 0;
// Mixing
std::array<std::array<float, 4>, 3> gain = {};
// Buffer queue
std::priority_queue<Buffer, std::vector<Buffer>, BufferOrder> input_queue;
MonoOrStereo mono_or_stereo = MonoOrStereo::Mono;
Format format = Format::ADPCM;
// Current buffer
u32 current_sample_number = 0;
u32 next_sample_number = 0;
std::vector<std::array<s16, 2>> current_buffer;
// buffer_id state
bool buffer_update = false;
u32 current_buffer_id = 0;
// Decoding state
std::array<s16, 16> adpcm_coeffs = {};
Codec::ADPCMState adpcm_state = {};
// Resampling state
float rate_multiplier = 1.0;
InterpolationMode interpolation_mode = InterpolationMode::Polyphase;
AudioInterp::State interp_state = {};
// Filter state
SourceFilters filters;
} state;
// Internal functions
/// INTERNAL: Update our internal state based on the current config.
void ParseConfig(SourceConfiguration::Configuration& config, const s16_le (&adpcm_coeffs)[16]);
/// INTERNAL: Generate the current audio output for this frame based on our internal state.
void GenerateFrame();
/// INTERNAL: Dequeues a buffer and does preprocessing on it (decoding, resampling). Puts it into current_buffer.
bool DequeueBuffer();
/// INTERNAL: Generates a SourceStatus::Status based on our internal state.
SourceStatus::Status GetCurrentStatus();
};
} // namespace HLE
} // namespace DSP