mirror of https://github.com/mgba-emu/mgba.git
Core: Switch from blip to mAudioResampler
This commit is contained in:
parent
89866aff95
commit
f51cb153d1
|
@ -32,6 +32,7 @@ enum mCoreChecksumType {
|
|||
mCHECKSUM_CRC32,
|
||||
};
|
||||
|
||||
struct mAudioBuffer;
|
||||
struct mCoreConfig;
|
||||
struct mCoreSync;
|
||||
struct mDebuggerSymbols;
|
||||
|
@ -79,7 +80,7 @@ struct mCore {
|
|||
void (*putPixels)(struct mCore*, const void* buffer, size_t stride);
|
||||
|
||||
unsigned (*audioSampleRate)(const struct mCore*);
|
||||
struct blip_t* (*getAudioChannel)(struct mCore*, int ch);
|
||||
struct mAudioBuffer* (*getAudioBuffer)(struct mCore*);
|
||||
void (*setAudioBufferSize)(struct mCore*, size_t samples);
|
||||
size_t (*getAudioBufferSize)(struct mCore*);
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ CXX_GUARD_START
|
|||
struct mCore;
|
||||
struct mStateExtdataItem;
|
||||
|
||||
struct blip_t;
|
||||
struct mAudioBuffer;
|
||||
|
||||
enum mCoreFeature {
|
||||
mCORE_FEATURE_OPENGL = 1,
|
||||
|
@ -41,7 +41,7 @@ struct mAVStream {
|
|||
void (*audioRateChanged)(struct mAVStream*, unsigned rate);
|
||||
void (*postVideoFrame)(struct mAVStream*, const color_t* buffer, size_t stride);
|
||||
void (*postAudioFrame)(struct mAVStream*, int16_t left, int16_t right);
|
||||
void (*postAudioBuffer)(struct mAVStream*, struct blip_t* left, struct blip_t* right);
|
||||
void (*postAudioBuffer)(struct mAVStream*, struct mAudioBuffer*);
|
||||
};
|
||||
|
||||
struct mStereoSample {
|
||||
|
|
|
@ -22,6 +22,7 @@ struct mCoreSync {
|
|||
bool audioWait;
|
||||
Condition audioRequiredCond;
|
||||
Mutex audioBufferMutex;
|
||||
size_t audioHighWater;
|
||||
|
||||
float fpsTarget;
|
||||
};
|
||||
|
@ -32,8 +33,8 @@ bool mCoreSyncWaitFrameStart(struct mCoreSync* sync);
|
|||
void mCoreSyncWaitFrameEnd(struct mCoreSync* sync);
|
||||
void mCoreSyncSetVideoSync(struct mCoreSync* sync, bool wait);
|
||||
|
||||
struct blip_t;
|
||||
bool mCoreSyncProduceAudio(struct mCoreSync* sync, const struct blip_t*, size_t samples);
|
||||
struct mAudioBuffer;
|
||||
bool mCoreSyncProduceAudio(struct mCoreSync* sync, const struct mAudioBuffer*);
|
||||
void mCoreSyncLockAudio(struct mCoreSync* sync);
|
||||
void mCoreSyncUnlockAudio(struct mCoreSync* sync);
|
||||
void mCoreSyncConsumeAudio(struct mCoreSync* sync);
|
||||
|
|
|
@ -12,6 +12,7 @@ CXX_GUARD_START
|
|||
|
||||
#include <mgba/core/interface.h>
|
||||
#include <mgba/core/timing.h>
|
||||
#include <mgba-util/audio-buffer.h>
|
||||
|
||||
#define GB_MAX_SAMPLES 32
|
||||
|
||||
|
@ -166,14 +167,9 @@ struct GBAudio {
|
|||
struct GBAudioWaveChannel ch3;
|
||||
struct GBAudioNoiseChannel ch4;
|
||||
|
||||
struct blip_t* left;
|
||||
struct blip_t* right;
|
||||
int16_t lastLeft;
|
||||
int16_t lastRight;
|
||||
struct mAudioBuffer buffer;
|
||||
int32_t capLeft;
|
||||
int32_t capRight;
|
||||
int clock;
|
||||
int32_t clockRate;
|
||||
|
||||
uint8_t volumeRight;
|
||||
uint8_t volumeLeft;
|
||||
|
|
|
@ -63,10 +63,6 @@ struct GBAAudio {
|
|||
struct GBAAudioFIFO chA;
|
||||
struct GBAAudioFIFO chB;
|
||||
|
||||
int16_t lastLeft;
|
||||
int16_t lastRight;
|
||||
int clock;
|
||||
|
||||
uint8_t volume;
|
||||
bool volumeChA;
|
||||
bool volumeChB;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include <mgba/core/sync.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba-util/audio-buffer.h>
|
||||
|
||||
static void _changeVideoSync(struct mCoreSync* sync, bool wait) {
|
||||
// Make sure the video thread can process events while the GBA thread is paused
|
||||
|
@ -79,17 +79,17 @@ void mCoreSyncSetVideoSync(struct mCoreSync* sync, bool wait) {
|
|||
_changeVideoSync(sync, wait);
|
||||
}
|
||||
|
||||
bool mCoreSyncProduceAudio(struct mCoreSync* sync, const struct blip_t* buf, size_t samples) {
|
||||
bool mCoreSyncProduceAudio(struct mCoreSync* sync, const struct mAudioBuffer* buf) {
|
||||
if (!sync) {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t produced = blip_samples_avail(buf);
|
||||
size_t produced = mAudioBufferAvailable(buf);
|
||||
size_t producedNew = produced;
|
||||
while (sync->audioWait && producedNew >= samples) {
|
||||
while (sync->audioWait && sync->audioHighWater && producedNew >= sync->audioHighWater) {
|
||||
ConditionWait(&sync->audioRequiredCond, &sync->audioBufferMutex);
|
||||
produced = producedNew;
|
||||
producedNew = blip_samples_avail(buf);
|
||||
producedNew = mAudioBufferAvailable(buf);
|
||||
}
|
||||
MutexUnlock(&sync->audioBufferMutex);
|
||||
return producedNew != produced;
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include <mgba/core/thread.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/core.h>
|
||||
#ifdef ENABLE_SCRIPTING
|
||||
#include <mgba/script/context.h>
|
||||
|
@ -368,7 +367,7 @@ static THREAD_ENTRY _mCoreThreadRun(void* context) {
|
|||
if (impl->sync.audioWait) {
|
||||
MutexUnlock(&impl->stateMutex);
|
||||
mCoreSyncLockAudio(&impl->sync);
|
||||
mCoreSyncProduceAudio(&impl->sync, core->getAudioChannel(core, 0), core->getAudioBufferSize(core));
|
||||
mCoreSyncProduceAudio(&impl->sync, core->getAudioBuffer(core));
|
||||
MutexLock(&impl->stateMutex);
|
||||
}
|
||||
}
|
||||
|
@ -498,6 +497,7 @@ bool mCoreThreadStart(struct mCoreThread* threadContext) {
|
|||
threadContext->impl->sync.audioWait = threadContext->core->opts.audioSync;
|
||||
threadContext->impl->sync.videoFrameWait = threadContext->core->opts.videoSync;
|
||||
threadContext->impl->sync.fpsTarget = threadContext->core->opts.fpsTarget;
|
||||
threadContext->impl->sync.audioHighWater = 512;
|
||||
|
||||
MutexLock(&threadContext->impl->stateMutex);
|
||||
ThreadCreate(&threadContext->impl->thread, _mCoreThreadRun, threadContext);
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include <mgba/internal/gb/audio.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/interface.h>
|
||||
#include <mgba/core/sync.h>
|
||||
#include <mgba/internal/gb/gb.h>
|
||||
|
@ -15,15 +14,10 @@
|
|||
#include <mgba/internal/gba/audio.h>
|
||||
#endif
|
||||
|
||||
#ifdef __3DS__
|
||||
#define blip_add_delta blip_add_delta_fast
|
||||
#endif
|
||||
|
||||
#define AUDIO_BUFFER_SAMPLES 0x4000
|
||||
#define FRAME_CYCLES (DMG_SM83_FREQUENCY >> 9)
|
||||
|
||||
const uint32_t DMG_SM83_FREQUENCY = 0x400000;
|
||||
static const int CLOCKS_PER_BLIP_FRAME = 0x1000;
|
||||
static const unsigned BLIP_BUFFER_SIZE = 0x4000;
|
||||
static const int SAMPLE_INTERVAL = 32;
|
||||
static const int FILTER = 65368;
|
||||
const int GB_AUDIO_VOLUME_MAX = 0x100;
|
||||
|
@ -57,12 +51,7 @@ static const int _squareChannelDuty[4][8] = {
|
|||
|
||||
void GBAudioInit(struct GBAudio* audio, size_t samples, uint8_t* nr52, enum GBAudioStyle style) {
|
||||
audio->samples = samples;
|
||||
audio->left = blip_new(BLIP_BUFFER_SIZE);
|
||||
audio->right = blip_new(BLIP_BUFFER_SIZE);
|
||||
audio->clockRate = DMG_SM83_FREQUENCY;
|
||||
// Guess too large; we hang producing extra samples if we guess too low
|
||||
blip_set_rates(audio->left, DMG_SM83_FREQUENCY, 96000);
|
||||
blip_set_rates(audio->right, DMG_SM83_FREQUENCY, 96000);
|
||||
mAudioBufferInit(&audio->buffer, AUDIO_BUFFER_SAMPLES, 2);
|
||||
audio->forceDisableCh[0] = false;
|
||||
audio->forceDisableCh[1] = false;
|
||||
audio->forceDisableCh[2] = false;
|
||||
|
@ -86,8 +75,7 @@ void GBAudioInit(struct GBAudio* audio, size_t samples, uint8_t* nr52, enum GBAu
|
|||
}
|
||||
|
||||
void GBAudioDeinit(struct GBAudio* audio) {
|
||||
blip_delete(audio->left);
|
||||
blip_delete(audio->right);
|
||||
mAudioBufferDeinit(&audio->buffer);
|
||||
}
|
||||
|
||||
void GBAudioReset(struct GBAudio* audio) {
|
||||
|
@ -123,11 +111,9 @@ void GBAudioReset(struct GBAudio* audio) {
|
|||
audio->sampleInterval = SAMPLE_INTERVAL * GB_MAX_SAMPLES;
|
||||
audio->lastSample = 0;
|
||||
audio->sampleIndex = 0;
|
||||
audio->lastLeft = 0;
|
||||
audio->lastRight = 0;
|
||||
audio->capLeft = 0;
|
||||
audio->capRight = 0;
|
||||
audio->clock = 0;
|
||||
mAudioBufferClear(&audio->buffer);
|
||||
audio->playingCh1 = false;
|
||||
audio->playingCh2 = false;
|
||||
audio->playingCh3 = false;
|
||||
|
@ -140,14 +126,8 @@ void GBAudioReset(struct GBAudio* audio) {
|
|||
}
|
||||
|
||||
void GBAudioResizeBuffer(struct GBAudio* audio, size_t samples) {
|
||||
if (samples > BLIP_BUFFER_SIZE / 2) {
|
||||
samples = BLIP_BUFFER_SIZE / 2;
|
||||
}
|
||||
mCoreSyncLockAudio(audio->p->sync);
|
||||
audio->samples = samples;
|
||||
blip_clear(audio->left);
|
||||
blip_clear(audio->right);
|
||||
audio->clock = 0;
|
||||
mCoreSyncConsumeAudio(audio->p->sync);
|
||||
}
|
||||
|
||||
|
@ -845,34 +825,25 @@ static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate) {
|
|||
unsigned produced;
|
||||
int i;
|
||||
for (i = 0; i < GB_MAX_SAMPLES; ++i) {
|
||||
int16_t sampleLeft = audio->currentSamples[i].left;
|
||||
int16_t sampleRight = audio->currentSamples[i].right;
|
||||
if ((size_t) blip_samples_avail(audio->left) < audio->samples) {
|
||||
blip_add_delta(audio->left, audio->clock, sampleLeft - audio->lastLeft);
|
||||
blip_add_delta(audio->right, audio->clock, sampleRight - audio->lastRight);
|
||||
audio->lastLeft = sampleLeft;
|
||||
audio->lastRight = sampleRight;
|
||||
audio->clock += SAMPLE_INTERVAL;
|
||||
if (audio->clock >= CLOCKS_PER_BLIP_FRAME) {
|
||||
blip_end_frame(audio->left, CLOCKS_PER_BLIP_FRAME);
|
||||
blip_end_frame(audio->right, CLOCKS_PER_BLIP_FRAME);
|
||||
audio->clock -= CLOCKS_PER_BLIP_FRAME;
|
||||
}
|
||||
}
|
||||
int16_t sample[2] = {
|
||||
audio->currentSamples[i].left,
|
||||
audio->currentSamples[i].right
|
||||
};
|
||||
mAudioBufferWrite(&audio->buffer, sample, 1);
|
||||
if (audio->p->stream && audio->p->stream->postAudioFrame) {
|
||||
audio->p->stream->postAudioFrame(audio->p->stream, sampleLeft, sampleRight);
|
||||
audio->p->stream->postAudioFrame(audio->p->stream, sample[0], sample[1]);
|
||||
}
|
||||
}
|
||||
|
||||
produced = blip_samples_avail(audio->left);
|
||||
produced = mAudioBufferAvailable(&audio->buffer);
|
||||
bool wait = produced >= audio->samples;
|
||||
if (!mCoreSyncProduceAudio(audio->p->sync, audio->left, audio->samples)) {
|
||||
if (!mCoreSyncProduceAudio(audio->p->sync, &audio->buffer)) {
|
||||
// Interrupted
|
||||
audio->p->earlyExit = true;
|
||||
}
|
||||
|
||||
if (wait && audio->p->stream && audio->p->stream->postAudioBuffer) {
|
||||
audio->p->stream->postAudioBuffer(audio->p->stream, audio->left, audio->right);
|
||||
audio->p->stream->postAudioBuffer(audio->p->stream, &audio->buffer);
|
||||
}
|
||||
mTimingSchedule(timing, &audio->sampleEvent, audio->sampleInterval * audio->timingFactor - cyclesLate);
|
||||
}
|
||||
|
|
|
@ -428,16 +428,9 @@ static void _GBCorePutPixels(struct mCore* core, const void* buffer, size_t stri
|
|||
gbcore->renderer.d.putPixels(&gbcore->renderer.d, stride, buffer);
|
||||
}
|
||||
|
||||
static struct blip_t* _GBCoreGetAudioChannel(struct mCore* core, int ch) {
|
||||
static struct mAudioBuffer* _GBCoreGetAudioBuffer(struct mCore* core) {
|
||||
struct GB* gb = core->board;
|
||||
switch (ch) {
|
||||
case 0:
|
||||
return gb->audio.left;
|
||||
case 1:
|
||||
return gb->audio.right;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
return &gb->audio.buffer;
|
||||
}
|
||||
|
||||
static void _GBCoreSetAudioBufferSize(struct mCore* core, size_t samples) {
|
||||
|
@ -1308,7 +1301,7 @@ struct mCore* GBCoreCreate(void) {
|
|||
core->getPixels = _GBCoreGetPixels;
|
||||
core->putPixels = _GBCorePutPixels;
|
||||
core->audioSampleRate = _GBCoreAudioSampleRate;
|
||||
core->getAudioChannel = _GBCoreGetAudioChannel;
|
||||
core->getAudioBuffer = _GBCoreGetAudioBuffer;
|
||||
core->setAudioBufferSize = _GBCoreSetAudioBufferSize;
|
||||
core->getAudioBufferSize = _GBCoreGetAudioBufferSize;
|
||||
core->setAVStream = _GBCoreSetAVStream;
|
||||
|
|
|
@ -6,7 +6,6 @@
|
|||
#include <mgba/internal/gba/audio.h>
|
||||
|
||||
#include <mgba/internal/arm/macros.h>
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/sync.h>
|
||||
#include <mgba/internal/gba/dma.h>
|
||||
#include <mgba/internal/gba/gba.h>
|
||||
|
@ -16,17 +15,12 @@
|
|||
|
||||
#define MP2K_LOCK_MAX 8
|
||||
|
||||
#ifdef __3DS__
|
||||
#define blip_add_delta blip_add_delta_fast
|
||||
#endif
|
||||
|
||||
mLOG_DEFINE_CATEGORY(GBA_AUDIO, "GBA Audio", "gba.audio");
|
||||
|
||||
const unsigned GBA_AUDIO_SAMPLES = 2048;
|
||||
const int GBA_AUDIO_VOLUME_MAX = 0x100;
|
||||
|
||||
static const int SAMPLE_INTERVAL = GBA_ARM7TDMI_FREQUENCY / 0x4000;
|
||||
static const int CLOCKS_PER_FRAME = 0x800;
|
||||
|
||||
static int _applyBias(struct GBAAudio* audio, int sample);
|
||||
static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate);
|
||||
|
@ -41,14 +35,10 @@ void GBAAudioInit(struct GBAAudio* audio, size_t samples) {
|
|||
#ifdef __BIG_ENDIAN__
|
||||
++nr52;
|
||||
#endif
|
||||
GBAudioInit(&audio->psg, 0, nr52, GB_AUDIO_GBA);
|
||||
GBAudioInit(&audio->psg, samples, nr52, GB_AUDIO_GBA);
|
||||
audio->psg.timing = &audio->p->timing;
|
||||
audio->psg.clockRate = GBA_ARM7TDMI_FREQUENCY;
|
||||
audio->psg.frameEvent.context = audio;
|
||||
audio->samples = samples;
|
||||
// Guess too large; we hang producing extra samples if we guess too low
|
||||
blip_set_rates(audio->psg.left, GBA_ARM7TDMI_FREQUENCY, 96000);
|
||||
blip_set_rates(audio->psg.right, GBA_ARM7TDMI_FREQUENCY, 96000);
|
||||
|
||||
audio->forceDisableChA = false;
|
||||
audio->forceDisableChB = false;
|
||||
|
@ -93,10 +83,6 @@ void GBAAudioReset(struct GBAAudio* audio) {
|
|||
audio->enable = false;
|
||||
audio->sampleInterval = GBA_ARM7TDMI_FREQUENCY / 0x8000;
|
||||
audio->psg.sampleInterval = audio->sampleInterval;
|
||||
|
||||
blip_clear(audio->psg.left);
|
||||
blip_clear(audio->psg.right);
|
||||
audio->clock = 0;
|
||||
}
|
||||
|
||||
void GBAAudioDeinit(struct GBAAudio* audio) {
|
||||
|
@ -104,14 +90,9 @@ void GBAAudioDeinit(struct GBAAudio* audio) {
|
|||
}
|
||||
|
||||
void GBAAudioResizeBuffer(struct GBAAudio* audio, size_t samples) {
|
||||
if (samples > 0x2000) {
|
||||
samples = 0x2000;
|
||||
}
|
||||
mCoreSyncLockAudio(audio->p->sync);
|
||||
audio->samples = samples;
|
||||
blip_clear(audio->psg.left);
|
||||
blip_clear(audio->psg.right);
|
||||
audio->clock = 0;
|
||||
audio->psg.samples = samples;
|
||||
mCoreSyncConsumeAudio(audio->p->sync);
|
||||
}
|
||||
|
||||
|
@ -414,34 +395,24 @@ static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate) {
|
|||
unsigned produced;
|
||||
int i;
|
||||
for (i = 0; i < samples; ++i) {
|
||||
int16_t sampleLeft = audio->currentSamples[i].left;
|
||||
int16_t sampleRight = audio->currentSamples[i].right;
|
||||
if ((size_t) blip_samples_avail(audio->psg.left) < audio->samples) {
|
||||
blip_add_delta(audio->psg.left, audio->clock, sampleLeft - audio->lastLeft);
|
||||
blip_add_delta(audio->psg.right, audio->clock, sampleRight - audio->lastRight);
|
||||
audio->lastLeft = sampleLeft;
|
||||
audio->lastRight = sampleRight;
|
||||
audio->clock += audio->sampleInterval;
|
||||
if (audio->clock >= CLOCKS_PER_FRAME) {
|
||||
blip_end_frame(audio->psg.left, CLOCKS_PER_FRAME);
|
||||
blip_end_frame(audio->psg.right, CLOCKS_PER_FRAME);
|
||||
audio->clock -= CLOCKS_PER_FRAME;
|
||||
}
|
||||
}
|
||||
|
||||
int16_t sample[2] = {
|
||||
audio->currentSamples[i].left,
|
||||
audio->currentSamples[i].right
|
||||
};
|
||||
mAudioBufferWrite(&audio->psg.buffer, sample, 1);
|
||||
if (audio->p->stream && audio->p->stream->postAudioFrame) {
|
||||
audio->p->stream->postAudioFrame(audio->p->stream, sampleLeft, sampleRight);
|
||||
audio->p->stream->postAudioFrame(audio->p->stream, sample[0], sample[1]);
|
||||
}
|
||||
}
|
||||
produced = blip_samples_avail(audio->psg.left);
|
||||
produced = mAudioBufferAvailable(&audio->psg.buffer);
|
||||
bool wait = produced >= audio->samples;
|
||||
if (!mCoreSyncProduceAudio(audio->p->sync, audio->psg.left, audio->samples)) {
|
||||
if (!mCoreSyncProduceAudio(audio->p->sync, &audio->psg.buffer)) {
|
||||
// Interrupted
|
||||
audio->p->earlyExit = true;
|
||||
}
|
||||
|
||||
if (wait && audio->p->stream && audio->p->stream->postAudioBuffer) {
|
||||
audio->p->stream->postAudioBuffer(audio->p->stream, audio->psg.left, audio->psg.right);
|
||||
audio->p->stream->postAudioBuffer(audio->p->stream, &audio->psg.buffer);
|
||||
}
|
||||
|
||||
mTimingSchedule(timing, &audio->sampleEvent, SAMPLE_INTERVAL - cyclesLate);
|
||||
|
|
|
@ -558,16 +558,9 @@ static unsigned _GBACoreAudioSampleRate(const struct mCore* core) {
|
|||
return 65536;
|
||||
}
|
||||
|
||||
static struct blip_t* _GBACoreGetAudioChannel(struct mCore* core, int ch) {
|
||||
static struct mAudioBuffer* _GBACoreGetAudioBuffer(struct mCore* core) {
|
||||
struct GBA* gba = core->board;
|
||||
switch (ch) {
|
||||
case 0:
|
||||
return gba->audio.psg.left;
|
||||
case 1:
|
||||
return gba->audio.psg.right;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
return &gba->audio.psg.buffer;
|
||||
}
|
||||
|
||||
static void _GBACoreSetAudioBufferSize(struct mCore* core, size_t samples) {
|
||||
|
@ -1505,7 +1498,7 @@ struct mCore* GBACoreCreate(void) {
|
|||
core->getPixels = _GBACoreGetPixels;
|
||||
core->putPixels = _GBACorePutPixels;
|
||||
core->audioSampleRate = _GBACoreAudioSampleRate;
|
||||
core->getAudioChannel = _GBACoreGetAudioChannel;
|
||||
core->getAudioBuffer = _GBACoreGetAudioBuffer;
|
||||
core->setAudioBufferSize = _GBACoreSetAudioBufferSize;
|
||||
core->getAudioBufferSize = _GBACoreGetAudioBufferSize;
|
||||
core->addCoreCallbacks = _GBACoreAddCoreCallbacks;
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/core.h>
|
||||
#include <mgba/core/serialize.h>
|
||||
#ifdef M_CORE_GBA
|
||||
|
@ -61,7 +60,7 @@ static enum DarkenMode {
|
|||
|
||||
#define _3DS_INPUT 0x3344534B
|
||||
|
||||
#define AUDIO_SAMPLES 384
|
||||
#define AUDIO_SAMPLES 1280
|
||||
#define AUDIO_SAMPLE_BUFFER (AUDIO_SAMPLES * 16)
|
||||
#define DSP_BUFFERS 4
|
||||
|
||||
|
@ -190,7 +189,7 @@ static void _map3DSKey(struct mInputMap* map, int ctrKey, int key) {
|
|||
mInputBindKey(map, _3DS_INPUT, __builtin_ctz(ctrKey), key);
|
||||
}
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right);
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer* buffer);
|
||||
|
||||
static void _drawStart(void) {
|
||||
if (frameStarted) {
|
||||
|
@ -343,12 +342,13 @@ static void _gameLoaded(struct mGUIRunner* runner) {
|
|||
}
|
||||
osSetSpeedupEnable(true);
|
||||
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 0), runner->core->frequency(runner->core), 32768);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 1), runner->core->frequency(runner->core), 32768);
|
||||
if (hasSound != NO_SOUND) {
|
||||
audioPos = 0;
|
||||
}
|
||||
if (hasSound == DSP_SUPPORTED) {
|
||||
unsigned sampleRate = runner->core->audioSampleRate(runner->core);
|
||||
double fauxClock = mCoreCalculateFramerateRatio(runner->core, 16756991. / 280095.);
|
||||
ndspChnSetRate(0, sampleRate * fauxClock);
|
||||
memset(audioLeft, 0, AUDIO_SAMPLE_BUFFER * 2 * sizeof(int16_t));
|
||||
}
|
||||
unsigned mode;
|
||||
|
@ -607,8 +607,7 @@ static void _drawFrame(struct mGUIRunner* runner, bool faded) {
|
|||
GX_TRANSFER_OUT_TILED(1) | GX_TRANSFER_FLIP_VERT(1));
|
||||
|
||||
if (hasSound == NO_SOUND) {
|
||||
blip_clear(runner->core->getAudioChannel(runner->core, 0));
|
||||
blip_clear(runner->core->getAudioChannel(runner->core, 1));
|
||||
mAudioBufferClear(runner->core->getAudioBuffer(runner->core));
|
||||
}
|
||||
|
||||
_drawTex(runner->core, faded, interframeBlending);
|
||||
|
@ -775,15 +774,14 @@ static void _requestImage(struct mImageSource* source, const void** buffer, size
|
|||
CAMU_SetReceiving(&imageSource->handles[0], imageSource->buffer, PORT_CAM1, imageSource->bufferSize, imageSource->transferSize);
|
||||
}
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right) {
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer* buffer) {
|
||||
UNUSED(stream);
|
||||
if (hasSound == DSP_SUPPORTED) {
|
||||
int startId = bufferId;
|
||||
while (dspBuffer[bufferId].status == NDSP_WBUF_QUEUED || dspBuffer[bufferId].status == NDSP_WBUF_PLAYING) {
|
||||
bufferId = (bufferId + 1) & (DSP_BUFFERS - 1);
|
||||
if (bufferId == startId) {
|
||||
blip_clear(left);
|
||||
blip_clear(right);
|
||||
mAudioBufferClear(buffer);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -791,8 +789,7 @@ static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* rig
|
|||
memset(&dspBuffer[bufferId], 0, sizeof(dspBuffer[bufferId]));
|
||||
dspBuffer[bufferId].data_pcm16 = tmpBuf;
|
||||
dspBuffer[bufferId].nsamples = AUDIO_SAMPLES;
|
||||
blip_read_samples(left, dspBuffer[bufferId].data_pcm16, AUDIO_SAMPLES, true);
|
||||
blip_read_samples(right, dspBuffer[bufferId].data_pcm16 + 1, AUDIO_SAMPLES, true);
|
||||
mAudioBufferRead(buffer, dspBuffer[bufferId].data_pcm16, AUDIO_SAMPLES);
|
||||
DSP_FlushDataCache(dspBuffer[bufferId].data_pcm16, AUDIO_SAMPLES * 2 * sizeof(int16_t));
|
||||
ndspChnWaveBufAdd(0, &dspBuffer[bufferId]);
|
||||
}
|
||||
|
@ -857,7 +854,6 @@ int main(int argc, char* argv[]) {
|
|||
ndspChnReset(0);
|
||||
ndspChnSetFormat(0, NDSP_FORMAT_STEREO_PCM16);
|
||||
ndspChnSetInterp(0, NDSP_INTERP_NONE);
|
||||
ndspChnSetRate(0, 32822);
|
||||
ndspChnWaveBufClear(0);
|
||||
audioLeft = linearMemAlign(AUDIO_SAMPLES * DSP_BUFFERS * 2 * sizeof(int16_t), 0x80);
|
||||
memset(dspBuffer, 0, sizeof(dspBuffer));
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
#include <mgba-util/common.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/cheats.h>
|
||||
#include <mgba/core/core.h>
|
||||
#include <mgba/core/log.h>
|
||||
|
@ -30,7 +29,6 @@
|
|||
#include "libretro_core_options.h"
|
||||
|
||||
#define GB_SAMPLES 512
|
||||
#define SAMPLE_RATE 32768
|
||||
/* An alpha factor of 1/180 is *somewhat* equivalent
|
||||
* to calculating the average for the last 180
|
||||
* frames, or 3 seconds of runtime... */
|
||||
|
@ -54,7 +52,7 @@ static retro_set_sensor_state_t sensorStateCallback;
|
|||
|
||||
static void GBARetroLog(struct mLogger* logger, int category, enum mLogLevel level, const char* format, va_list args);
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream*, blip_t* left, blip_t* right);
|
||||
static void _postAudioBuffer(struct mAVStream*, struct mAudioBuffer*);
|
||||
static void _setRumble(struct mRumble* rumble, int enable);
|
||||
static uint8_t _readLux(struct GBALuminanceSource* lux);
|
||||
static void _updateLux(struct GBALuminanceSource* lux);
|
||||
|
@ -424,7 +422,7 @@ void retro_get_system_av_info(struct retro_system_av_info* info) {
|
|||
|
||||
info->geometry.aspect_ratio = width / (double) height;
|
||||
info->timing.fps = core->frequency(core) / (float) core->frameCycles(core);
|
||||
info->timing.sample_rate = SAMPLE_RATE;
|
||||
info->timing.sample_rate = core->audioSampleRate(core);
|
||||
}
|
||||
|
||||
void retro_init(void) {
|
||||
|
@ -613,9 +611,8 @@ void retro_run(void) {
|
|||
|
||||
#ifdef M_CORE_GBA
|
||||
if (core->platform(core) == mPLATFORM_GBA) {
|
||||
blip_t *audioChannelLeft = core->getAudioChannel(core, 0);
|
||||
blip_t *audioChannelRight = core->getAudioChannel(core, 1);
|
||||
int samplesAvail = blip_samples_avail(audioChannelLeft);
|
||||
struct mAudioBuffer *buffer = core->getAudioBuffer(core);
|
||||
int samplesAvail = mAudioBufferAvailable(buffer);
|
||||
if (samplesAvail > 0) {
|
||||
/* Update 'running average' of number of
|
||||
* samples per frame.
|
||||
|
@ -632,8 +629,7 @@ void retro_run(void) {
|
|||
audioSampleBufferSize = (samplesToRead * 2);
|
||||
audioSampleBuffer = realloc(audioSampleBuffer, audioSampleBufferSize * sizeof(int16_t));
|
||||
}
|
||||
int produced = blip_read_samples(audioChannelLeft, audioSampleBuffer, samplesToRead, true);
|
||||
blip_read_samples(audioChannelRight, audioSampleBuffer + 1, samplesToRead, true);
|
||||
int produced = mAudioBufferRead(buffer, audioSampleBuffer, samplesToRead);
|
||||
if (produced > 0) {
|
||||
if (audioLowPassEnabled) {
|
||||
_audioLowPassFilter(audioSampleBuffer, produced);
|
||||
|
@ -884,9 +880,9 @@ bool retro_load_game(const struct retro_game_info* game) {
|
|||
* to nominal number of samples per frame.
|
||||
* Buffer will be resized as required in
|
||||
* retro_run(). */
|
||||
size_t audioSamplesPerFrame = (size_t)((float) SAMPLE_RATE * (float) core->frameCycles(core) /
|
||||
size_t audioSamplesPerFrame = (size_t)((float) core->audioSampleRate(core) * (float) core->frameCycles(core) /
|
||||
(float)core->frequency(core));
|
||||
audioSampleBufferSize = audioSamplesPerFrame * 2;
|
||||
audioSampleBufferSize = ceil(audioSamplesPerFrame) * 2;
|
||||
audioSampleBuffer = malloc(audioSampleBufferSize * sizeof(int16_t));
|
||||
audioSamplesPerFrameAvg = (float) audioSamplesPerFrame;
|
||||
/* Internal audio buffer size should be
|
||||
|
@ -918,9 +914,6 @@ bool retro_load_game(const struct retro_game_info* game) {
|
|||
core->setAudioBufferSize(core, GB_SAMPLES);
|
||||
}
|
||||
|
||||
blip_set_rates(core->getAudioChannel(core, 0), core->frequency(core), SAMPLE_RATE);
|
||||
blip_set_rates(core->getAudioChannel(core, 1), core->frequency(core), SAMPLE_RATE);
|
||||
|
||||
core->setPeripheral(core, mPERIPH_RUMBLE, &rumble);
|
||||
core->setPeripheral(core, mPERIPH_ROTATION, &rotation);
|
||||
|
||||
|
@ -1238,10 +1231,9 @@ void GBARetroLog(struct mLogger* logger, int category, enum mLogLevel level, con
|
|||
}
|
||||
|
||||
/* Used only for GB/GBC content */
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right) {
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer* buffer) {
|
||||
UNUSED(stream);
|
||||
int produced = blip_read_samples(left, audioSampleBuffer, GB_SAMPLES, true);
|
||||
blip_read_samples(right, audioSampleBuffer + 1, GB_SAMPLES, true);
|
||||
int produced = mAudioBufferRead(buffer, audioSampleBuffer, GB_SAMPLES);
|
||||
if (produced > 0) {
|
||||
if (audioLowPassEnabled) {
|
||||
_audioLowPassFilter(audioSampleBuffer, produced);
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include "psp2-context.h"
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/core.h>
|
||||
|
||||
#ifdef M_CORE_GBA
|
||||
|
@ -18,9 +17,10 @@
|
|||
#include "feature/gui/gui-runner.h"
|
||||
#include <mgba/internal/gba/input.h>
|
||||
|
||||
#include <mgba-util/memory.h>
|
||||
#include <mgba-util/audio-resampler.h>
|
||||
#include <mgba-util/circle-buffer.h>
|
||||
#include <mgba-util/math.h>
|
||||
#include <mgba-util/memory.h>
|
||||
#include <mgba-util/threading.h>
|
||||
#include <mgba-util/vfs.h>
|
||||
#include <mgba-util/platform/psp2/sce-vfs.h>
|
||||
|
@ -84,14 +84,21 @@ bool frameLimiter = true;
|
|||
extern const uint8_t _binary_backdrop_png_start[];
|
||||
static vita2d_texture* backdrop = 0;
|
||||
|
||||
#define BUFFERS 16
|
||||
#define PSP2_SAMPLES 512
|
||||
#define PSP2_AUDIO_BUFFER_SIZE (PSP2_SAMPLES * 16)
|
||||
#define PSP2_AUDIO_BUFFER_SIZE (PSP2_SAMPLES * BUFFERS)
|
||||
|
||||
struct mPSP2AudioBuffer {
|
||||
int16_t samples[PSP2_SAMPLES * 2] __attribute__((__aligned__(64)));
|
||||
bool full;
|
||||
};
|
||||
|
||||
static struct mPSP2AudioContext {
|
||||
struct mStereoSample buffer[PSP2_AUDIO_BUFFER_SIZE];
|
||||
size_t writeOffset;
|
||||
size_t readOffset;
|
||||
size_t samples;
|
||||
struct mPSP2AudioBuffer outputBuffers[BUFFERS];
|
||||
int currentAudioBuffer;
|
||||
int nextAudioBuffer;
|
||||
struct mAudioBuffer buffer;
|
||||
struct mAudioResampler resampler;
|
||||
Mutex mutex;
|
||||
Condition cond;
|
||||
bool running;
|
||||
|
@ -103,29 +110,26 @@ void mPSP2MapKey(struct mInputMap* map, int pspKey, int key) {
|
|||
|
||||
static THREAD_ENTRY _audioThread(void* context) {
|
||||
struct mPSP2AudioContext* audio = (struct mPSP2AudioContext*) context;
|
||||
uint32_t zeroBuffer[PSP2_SAMPLES] = {0};
|
||||
void* buffer = zeroBuffer;
|
||||
const int16_t zeroBuffer[PSP2_SAMPLES * 2] __attribute__((__aligned__(64))) = {0};
|
||||
const void* buffer = zeroBuffer;
|
||||
int audioPort = sceAudioOutOpenPort(SCE_AUDIO_OUT_PORT_TYPE_MAIN, PSP2_SAMPLES, 48000, SCE_AUDIO_OUT_MODE_STEREO);
|
||||
struct mPSP2AudioBuffer* outputBuffer = NULL;
|
||||
while (audio->running) {
|
||||
MutexLock(&audio->mutex);
|
||||
if (buffer != zeroBuffer) {
|
||||
if (outputBuffer) {
|
||||
// Can only happen in successive iterations
|
||||
audio->samples -= PSP2_SAMPLES;
|
||||
outputBuffer->full = false;
|
||||
ConditionWake(&audio->cond);
|
||||
}
|
||||
if (audio->samples >= PSP2_SAMPLES) {
|
||||
buffer = &audio->buffer[audio->readOffset];
|
||||
audio->readOffset += PSP2_SAMPLES;
|
||||
if (audio->readOffset >= PSP2_AUDIO_BUFFER_SIZE) {
|
||||
audio->readOffset = 0;
|
||||
}
|
||||
// Don't mark samples as read until the next loop iteration to prevent
|
||||
// writing to the buffer while being read (see above)
|
||||
outputBuffer = &audio->outputBuffers[audio->currentAudioBuffer];
|
||||
if (outputBuffer->full) {
|
||||
buffer = outputBuffer->samples;
|
||||
audio->currentAudioBuffer = (audio->currentAudioBuffer + 1) % BUFFERS;
|
||||
} else {
|
||||
buffer = zeroBuffer;
|
||||
outputBuffer = NULL;
|
||||
}
|
||||
MutexUnlock(&audio->mutex);
|
||||
|
||||
sceAudioOutOutput(audioPort, buffer);
|
||||
}
|
||||
sceAudioOutReleasePort(audioPort);
|
||||
|
@ -243,25 +247,21 @@ static void _requestImage(struct mImageSource* source, const void** buffer, size
|
|||
sceCameraRead(imageSource->cam - 1, &read);
|
||||
}
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right) {
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer* buf) {
|
||||
UNUSED(stream);
|
||||
MutexLock(&audioContext.mutex);
|
||||
while (audioContext.samples + PSP2_SAMPLES >= PSP2_AUDIO_BUFFER_SIZE) {
|
||||
if (!frameLimiter) {
|
||||
blip_clear(left);
|
||||
blip_clear(right);
|
||||
MutexUnlock(&audioContext.mutex);
|
||||
return;
|
||||
mAudioResamplerProcess(&audioContext.resampler);
|
||||
while (mAudioBufferAvailable(&audioContext.buffer) >= PSP2_SAMPLES) {
|
||||
struct mPSP2AudioBuffer* buffer = &audioContext.outputBuffers[audioContext.nextAudioBuffer];
|
||||
while (buffer->full) {
|
||||
if (!frameLimiter) {
|
||||
break;
|
||||
}
|
||||
ConditionWait(&audioContext.cond, &audioContext.mutex);
|
||||
}
|
||||
ConditionWait(&audioContext.cond, &audioContext.mutex);
|
||||
}
|
||||
struct mStereoSample* samples = &audioContext.buffer[audioContext.writeOffset];
|
||||
blip_read_samples(left, &samples[0].left, PSP2_SAMPLES, true);
|
||||
blip_read_samples(right, &samples[0].right, PSP2_SAMPLES, true);
|
||||
audioContext.samples += PSP2_SAMPLES;
|
||||
audioContext.writeOffset += PSP2_SAMPLES;
|
||||
if (audioContext.writeOffset >= PSP2_AUDIO_BUFFER_SIZE) {
|
||||
audioContext.writeOffset = 0;
|
||||
mAudioBufferRead(&audioContext.buffer, buffer->samples, PSP2_SAMPLES);
|
||||
buffer->full = true;
|
||||
audioContext.nextAudioBuffer = (audioContext.nextAudioBuffer + 1) % BUFFERS;
|
||||
}
|
||||
MutexUnlock(&audioContext.mutex);
|
||||
}
|
||||
|
@ -294,7 +294,11 @@ void mPSP2SetFrameLimiter(struct mGUIRunner* runner, bool limit) {
|
|||
UNUSED(runner);
|
||||
if (!frameLimiter && limit) {
|
||||
MutexLock(&audioContext.mutex);
|
||||
while (audioContext.samples) {
|
||||
while (true) {
|
||||
struct mPSP2AudioBuffer* buffer = &audioContext.outputBuffers[audioContext.currentAudioBuffer];
|
||||
if (!buffer->full) {
|
||||
break;
|
||||
}
|
||||
ConditionWait(&audioContext.cond, &audioContext.mutex);
|
||||
}
|
||||
MutexUnlock(&audioContext.mutex);
|
||||
|
@ -334,6 +338,9 @@ void mPSP2Setup(struct mGUIRunner* runner) {
|
|||
|
||||
runner->core->setVideoBuffer(runner->core, vita2d_texture_get_datap(tex[currentTex]), 256);
|
||||
runner->core->setAudioBufferSize(runner->core, PSP2_SAMPLES);
|
||||
mAudioBufferInit(&audioContext.buffer, PSP2_AUDIO_BUFFER_SIZE, 2);
|
||||
mAudioResamplerInit(&audioContext.resampler, mINTERPOLATOR_COSINE);
|
||||
mAudioResamplerSetDestination(&audioContext.resampler, &audioContext.buffer, 48000);
|
||||
|
||||
rotation.d.sample = _sampleRotation;
|
||||
rotation.d.readTiltX = _readTiltX;
|
||||
|
@ -374,12 +381,6 @@ void mPSP2Setup(struct mGUIRunner* runner) {
|
|||
}
|
||||
|
||||
void mPSP2LoadROM(struct mGUIRunner* runner) {
|
||||
float rate = 60.0f / 1.001f;
|
||||
sceDisplayGetRefreshRate(&rate);
|
||||
double ratio = mCoreCalculateFramerateRatio(runner->core, rate);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 0), runner->core->frequency(runner->core), 48000 * ratio);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 1), runner->core->frequency(runner->core), 48000 * ratio);
|
||||
|
||||
switch (runner->core->platform(runner->core)) {
|
||||
#ifdef M_CORE_GBA
|
||||
case mPLATFORM_GBA:
|
||||
|
@ -415,10 +416,17 @@ void mPSP2LoadROM(struct mGUIRunner* runner) {
|
|||
|
||||
MutexInit(&audioContext.mutex);
|
||||
ConditionInit(&audioContext.cond);
|
||||
memset(audioContext.buffer, 0, sizeof(audioContext.buffer));
|
||||
audioContext.readOffset = 0;
|
||||
audioContext.writeOffset = 0;
|
||||
mAudioBufferClear(&audioContext.buffer);
|
||||
audioContext.nextAudioBuffer = 0;
|
||||
audioContext.currentAudioBuffer = 0;
|
||||
audioContext.running = true;
|
||||
|
||||
float rate = 60.0f / 1.001f;
|
||||
sceDisplayGetRefreshRate(&rate);
|
||||
double ratio = mCoreCalculateFramerateRatio(runner->core, rate);
|
||||
unsigned sampleRate = runner->core->audioSampleRate(runner->core);
|
||||
mAudioBufferClear(&audioContext.buffer);
|
||||
mAudioResamplerSetSource(&audioContext.resampler, runner->core->getAudioBuffer(runner->core), sampleRate / ratio, true);
|
||||
ThreadCreate(&audioThread, _audioThread, &audioContext);
|
||||
}
|
||||
|
||||
|
@ -483,6 +491,8 @@ void mPSP2Unpaused(struct mGUIRunner* runner) {
|
|||
void mPSP2Teardown(struct mGUIRunner* runner) {
|
||||
UNUSED(runner);
|
||||
mCircleBufferDeinit(&rumble.history);
|
||||
mAudioResamplerDeinit(&audioContext.resampler);
|
||||
mAudioBufferDeinit(&audioContext.buffer);
|
||||
vita2d_free_texture(tex[0]);
|
||||
vita2d_free_texture(tex[1]);
|
||||
vita2d_free_texture(screenshot);
|
||||
|
|
|
@ -36,7 +36,6 @@ void free(void*);
|
|||
|
||||
#include <mgba/flags.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/cache-set.h>
|
||||
#include <mgba/core/core.h>
|
||||
#include <mgba/core/map-cache.h>
|
||||
|
|
|
@ -21,7 +21,6 @@ ffi.set_source("mgba._pylib", """
|
|||
#define MGBA_EXPORT
|
||||
#include <mgba/flags.h>
|
||||
#define OPAQUE_THREADING
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/cache-set.h>
|
||||
#include <mgba-util/common.h>
|
||||
#include <mgba/core/core.h>
|
||||
|
|
|
@ -1,57 +0,0 @@
|
|||
# Copyright (c) 2013-2018 Jeffrey Pfau
|
||||
#
|
||||
# This Source Code Form is subject to the terms of the Mozilla Public
|
||||
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
from ._pylib import ffi, lib # pylint: disable=no-name-in-module
|
||||
|
||||
|
||||
class Buffer(object):
|
||||
def __init__(self, native, internal_rate):
|
||||
self._native = native
|
||||
self._internal_rate = internal_rate
|
||||
|
||||
@property
|
||||
def available(self):
|
||||
return lib.blip_samples_avail(self._native)
|
||||
|
||||
def set_rate(self, rate):
|
||||
lib.blip_set_rates(self._native, self._internal_rate, rate)
|
||||
|
||||
def read(self, samples):
|
||||
buffer = ffi.new("short[%i]" % samples)
|
||||
count = self.read_into(buffer, samples, 1, 0)
|
||||
return buffer[:count]
|
||||
|
||||
def read_into(self, buffer, samples, channels=1, interleave=0):
|
||||
return lib.blip_read_samples(self._native, ffi.addressof(buffer, interleave), samples, channels == 2)
|
||||
|
||||
def clear(self):
|
||||
lib.blip_clear(self._native)
|
||||
|
||||
|
||||
class StereoBuffer(object):
|
||||
def __init__(self, left, right):
|
||||
self._left = left
|
||||
self._right = right
|
||||
|
||||
@property
|
||||
def available(self):
|
||||
return min(self._left.available, self._right.available)
|
||||
|
||||
def set_rate(self, rate):
|
||||
self._left.set_rate(rate)
|
||||
self._right.set_rate(rate)
|
||||
|
||||
def read(self, samples):
|
||||
buffer = ffi.new("short[%i]" % (2 * samples))
|
||||
count = self.read_into(buffer, samples)
|
||||
return buffer[0:2 * count]
|
||||
|
||||
def read_into(self, buffer, samples):
|
||||
samples = self._left.read_into(buffer, samples, 2, 0)
|
||||
return self._right.read_into(buffer, samples, 2, 1)
|
||||
|
||||
def clear(self):
|
||||
self._left.clear()
|
||||
self._right.clear()
|
|
@ -250,14 +250,6 @@ class Core(object):
|
|||
def audio_buffer_size(self):
|
||||
return self._core.getAudioBufferSize(self._core)
|
||||
|
||||
@protected
|
||||
def get_audio_channels(self):
|
||||
return audio.StereoBuffer(self.get_audio_channel(0), self.get_audio_channel(1));
|
||||
|
||||
@protected
|
||||
def get_audio_channel(self, channel):
|
||||
return audio.Buffer(self._core.getAudioChannel(self._core, channel), self.frequency)
|
||||
|
||||
@protected
|
||||
def reset(self):
|
||||
self._core.reset(self._core)
|
||||
|
|
|
@ -7,11 +7,12 @@
|
|||
|
||||
#include "LogController.h"
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/core.h>
|
||||
#include <mgba/core/thread.h>
|
||||
#include <mgba/internal/gba/audio.h>
|
||||
|
||||
#include <QDebug>
|
||||
|
||||
using namespace QGBA;
|
||||
|
||||
AudioDevice::AudioDevice(QObject* parent)
|
||||
|
@ -19,6 +20,13 @@ AudioDevice::AudioDevice(QObject* parent)
|
|||
, m_context(nullptr)
|
||||
{
|
||||
setOpenMode(ReadOnly);
|
||||
mAudioBufferInit(&m_buffer, 0x4000, 2);
|
||||
mAudioResamplerInit(&m_resampler, mINTERPOLATOR_SINC);
|
||||
}
|
||||
|
||||
AudioDevice::~AudioDevice() {
|
||||
mAudioResamplerDeinit(&m_resampler);
|
||||
mAudioBufferDeinit(&m_buffer);
|
||||
}
|
||||
|
||||
void AudioDevice::setFormat(const QAudioFormat& format) {
|
||||
|
@ -26,15 +34,18 @@ void AudioDevice::setFormat(const QAudioFormat& format) {
|
|||
LOG(QT, INFO) << tr("Can't set format of context-less audio device");
|
||||
return;
|
||||
}
|
||||
double fauxClock = mCoreCalculateFramerateRatio(m_context->core, m_context->impl->sync.fpsTarget);
|
||||
mCoreSyncLockAudio(&m_context->impl->sync);
|
||||
blip_set_rates(m_context->core->getAudioChannel(m_context->core, 0),
|
||||
m_context->core->frequency(m_context->core), format.sampleRate() * fauxClock);
|
||||
blip_set_rates(m_context->core->getAudioChannel(m_context->core, 1),
|
||||
m_context->core->frequency(m_context->core), format.sampleRate() * fauxClock);
|
||||
mCore* core = m_context->core;
|
||||
mAudioResamplerSetSource(&m_resampler, core->getAudioBuffer(core), core->audioSampleRate(core), true);
|
||||
m_format = format;
|
||||
adjustResampler();
|
||||
mCoreSyncUnlockAudio(&m_context->impl->sync);
|
||||
}
|
||||
|
||||
void AudioDevice::setBufferSamples(int samples) {
|
||||
m_samples = samples;
|
||||
}
|
||||
|
||||
void AudioDevice::setInput(mCoreThread* input) {
|
||||
m_context = input;
|
||||
}
|
||||
|
@ -45,15 +56,25 @@ qint64 AudioDevice::readData(char* data, qint64 maxSize) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
maxSize /= sizeof(mStereoSample);
|
||||
if (!maxSize) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
mCoreSyncLockAudio(&m_context->impl->sync);
|
||||
int available = std::min<qint64>({
|
||||
blip_samples_avail(m_context->core->getAudioChannel(m_context->core, 0)),
|
||||
maxSize,
|
||||
mAudioResamplerProcess(&m_resampler);
|
||||
if (mAudioBufferAvailable(&m_buffer) < 128) {
|
||||
mCoreSyncConsumeAudio(&m_context->impl->sync);
|
||||
// Audio is running slow...let's wait a tiny bit for more to come in
|
||||
QThread::usleep(100);
|
||||
mCoreSyncLockAudio(&m_context->impl->sync);
|
||||
mAudioResamplerProcess(&m_resampler);
|
||||
}
|
||||
quint64 available = std::min<quint64>({
|
||||
mAudioBufferAvailable(&m_buffer),
|
||||
static_cast<quint64>(maxSize / sizeof(mStereoSample)),
|
||||
std::numeric_limits<int>::max()
|
||||
});
|
||||
blip_read_samples(m_context->core->getAudioChannel(m_context->core, 0), &reinterpret_cast<mStereoSample*>(data)->left, available, true);
|
||||
blip_read_samples(m_context->core->getAudioChannel(m_context->core, 1), &reinterpret_cast<mStereoSample*>(data)->right, available, true);
|
||||
mAudioBufferRead(&m_buffer, reinterpret_cast<int16_t*>(data), available);
|
||||
mCoreSyncConsumeAudio(&m_context->impl->sync);
|
||||
return available * sizeof(mStereoSample);
|
||||
}
|
||||
|
@ -64,15 +85,33 @@ qint64 AudioDevice::writeData(const char*, qint64) {
|
|||
}
|
||||
|
||||
bool AudioDevice::atEnd() const {
|
||||
return !bytesAvailable();
|
||||
return false;
|
||||
}
|
||||
|
||||
qint64 AudioDevice::bytesAvailable() const {
|
||||
if (!m_context->core) {
|
||||
return true;
|
||||
}
|
||||
int available = mAudioBufferAvailable(&m_buffer);
|
||||
return available * sizeof(mStereoSample);
|
||||
}
|
||||
|
||||
qint64 AudioDevice::bytesAvailable() {
|
||||
if (!m_context->core) {
|
||||
return true;
|
||||
}
|
||||
mCoreSyncLockAudio(&m_context->impl->sync);
|
||||
int available = blip_samples_avail(m_context->core->getAudioChannel(m_context->core, 0));
|
||||
adjustResampler();
|
||||
mAudioResamplerProcess(&m_resampler);
|
||||
int available = mAudioBufferAvailable(&m_buffer);
|
||||
mCoreSyncUnlockAudio(&m_context->impl->sync);
|
||||
return available * sizeof(mStereoSample);
|
||||
}
|
||||
|
||||
void AudioDevice::adjustResampler() {
|
||||
mCore* core = m_context->core;
|
||||
double fauxClock = mCoreCalculateFramerateRatio(m_context->core, m_context->impl->sync.fpsTarget);
|
||||
mAudioResamplerSetDestination(&m_resampler, &m_buffer, m_format.sampleRate() * fauxClock);
|
||||
m_context->impl->sync.audioHighWater = m_samples + m_resampler.highWaterMark + m_resampler.lowWaterMark;
|
||||
m_context->impl->sync.audioHighWater *= core->audioSampleRate(core) / (m_format.sampleRate() * fauxClock);
|
||||
}
|
||||
|
|
|
@ -8,6 +8,9 @@
|
|||
#include <QAudioFormat>
|
||||
#include <QIODevice>
|
||||
|
||||
#include <mgba-util/audio-buffer.h>
|
||||
#include <mgba-util/audio-resampler.h>
|
||||
|
||||
struct mCoreThread;
|
||||
|
||||
namespace QGBA {
|
||||
|
@ -17,18 +20,28 @@ Q_OBJECT
|
|||
|
||||
public:
|
||||
AudioDevice(QObject* parent = nullptr);
|
||||
virtual ~AudioDevice();
|
||||
|
||||
void setInput(mCoreThread* input);
|
||||
void setFormat(const QAudioFormat& format);
|
||||
void setBufferSamples(int samples);
|
||||
bool atEnd() const override;
|
||||
qint64 bytesAvailable() const override;
|
||||
qint64 bytesAvailable();
|
||||
bool isSequential() const override { return true; }
|
||||
|
||||
protected:
|
||||
virtual qint64 readData(char* data, qint64 maxSize) override;
|
||||
virtual qint64 writeData(const char* data, qint64 maxSize) override;
|
||||
|
||||
private:
|
||||
size_t m_samples = 512;
|
||||
QAudioFormat m_format;
|
||||
mCoreThread* m_context;
|
||||
mAudioBuffer m_buffer;
|
||||
mAudioResampler m_resampler;
|
||||
|
||||
void adjustResampler();
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -83,6 +83,7 @@ bool AudioProcessorQt::start() {
|
|||
if (state != QAudio::IdleState) {
|
||||
return;
|
||||
}
|
||||
recheckUnderflow();
|
||||
m_recheckTimer.start();
|
||||
});
|
||||
#endif
|
||||
|
@ -91,6 +92,7 @@ bool AudioProcessorQt::start() {
|
|||
if (m_audioOutput->state() == QAudio::SuspendedState) {
|
||||
m_audioOutput->resume();
|
||||
} else {
|
||||
m_device->setBufferSamples(m_samples);
|
||||
m_device->setInput(input());
|
||||
m_device->setFormat(m_audioOutput->format());
|
||||
m_audioOutput->start(m_device.get());
|
||||
|
@ -107,12 +109,17 @@ void AudioProcessorQt::pause() {
|
|||
}
|
||||
}
|
||||
|
||||
void AudioProcessorQt::setBufferSamples(int) {
|
||||
void AudioProcessorQt::setBufferSamples(int samples) {
|
||||
m_samples = samples;
|
||||
if (m_device) {
|
||||
m_device->setBufferSamples(samples);
|
||||
}
|
||||
}
|
||||
|
||||
void AudioProcessorQt::inputParametersChanged() {
|
||||
if (m_device) {
|
||||
m_device->setFormat(m_audioOutput->format());
|
||||
m_device->setBufferSamples(m_samples);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -138,7 +145,7 @@ void AudioProcessorQt::recheckUnderflow() {
|
|||
m_recheckTimer.stop();
|
||||
return;
|
||||
}
|
||||
if (!m_device->atEnd()) {
|
||||
if (m_device->bytesAvailable()) {
|
||||
start();
|
||||
m_recheckTimer.stop();
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ private:
|
|||
std::unique_ptr<QAudioOutput> m_audioOutput;
|
||||
#endif
|
||||
std::unique_ptr<AudioDevice> m_device;
|
||||
size_t m_samples = 1024;
|
||||
unsigned m_sampleRate = 44100;
|
||||
};
|
||||
|
||||
|
|
|
@ -7,12 +7,6 @@
|
|||
|
||||
#include <mgba/core/core.h>
|
||||
#include <mgba/core/thread.h>
|
||||
#include <mgba/internal/gba/audio.h>
|
||||
#include <mgba/internal/gba/gba.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
|
||||
#define BUFFER_SIZE (GBA_AUDIO_SAMPLES >> 2)
|
||||
|
||||
mLOG_DEFINE_CATEGORY(SDL_AUDIO, "SDL Audio", "platform.sdl.audio");
|
||||
|
||||
|
@ -47,6 +41,10 @@ bool mSDLInitAudio(struct mSDLAudio* context, struct mCoreThread* threadContext)
|
|||
}
|
||||
context->core = 0;
|
||||
|
||||
mAudioBufferInit(&context->buffer, context->samples, context->obtainedSpec.channels);
|
||||
mAudioResamplerInit(&context->resampler, mINTERPOLATOR_SINC);
|
||||
mAudioResamplerSetDestination(&context->resampler, &context->buffer, context->obtainedSpec.freq);
|
||||
|
||||
if (threadContext) {
|
||||
context->core = threadContext->core;
|
||||
context->sync = &threadContext->impl->sync;
|
||||
|
@ -70,6 +68,8 @@ void mSDLDeinitAudio(struct mSDLAudio* context) {
|
|||
SDL_PauseAudio(1);
|
||||
SDL_CloseAudio();
|
||||
#endif
|
||||
mAudioBufferDeinit(&context->buffer);
|
||||
mAudioResamplerDeinit(&context->resampler);
|
||||
SDL_QuitSubSystem(SDL_INIT_AUDIO);
|
||||
}
|
||||
|
||||
|
@ -97,13 +97,11 @@ static void _mSDLAudioCallback(void* context, Uint8* data, int len) {
|
|||
memset(data, 0, len);
|
||||
return;
|
||||
}
|
||||
blip_t* left = NULL;
|
||||
blip_t* right = NULL;
|
||||
int32_t clockRate = 1;
|
||||
struct mAudioBuffer* buffer = NULL;
|
||||
unsigned sampleRate = 32768;
|
||||
if (audioContext->core) {
|
||||
clockRate = audioContext->core->frequency(audioContext->core);
|
||||
left = audioContext->core->getAudioChannel(audioContext->core, 0);
|
||||
right = audioContext->core->getAudioChannel(audioContext->core, 1);
|
||||
buffer = audioContext->core->getAudioBuffer(audioContext->core);
|
||||
sampleRate = audioContext->core->audioSampleRate(audioContext->core);
|
||||
}
|
||||
double fauxClock = 1;
|
||||
if (audioContext->sync) {
|
||||
|
@ -111,18 +109,13 @@ static void _mSDLAudioCallback(void* context, Uint8* data, int len) {
|
|||
fauxClock = mCoreCalculateFramerateRatio(audioContext->core, audioContext->sync->fpsTarget);
|
||||
}
|
||||
mCoreSyncLockAudio(audioContext->sync);
|
||||
audioContext->sync->audioHighWater = audioContext->samples + audioContext->resampler.highWaterMark + audioContext->resampler.lowWaterMark;
|
||||
audioContext->sync->audioHighWater *= sampleRate / (fauxClock * audioContext->obtainedSpec.freq);
|
||||
}
|
||||
blip_set_rates(left, clockRate, audioContext->obtainedSpec.freq * fauxClock);
|
||||
blip_set_rates(right, clockRate, audioContext->obtainedSpec.freq * fauxClock);
|
||||
mAudioResamplerSetSource(&audioContext->resampler, buffer, sampleRate / fauxClock, true);
|
||||
mAudioResamplerProcess(&audioContext->resampler);
|
||||
len /= 2 * audioContext->obtainedSpec.channels;
|
||||
int available = blip_samples_avail(left);
|
||||
if (available > len) {
|
||||
available = len;
|
||||
}
|
||||
blip_read_samples(left, (short*) data, available, audioContext->obtainedSpec.channels == 2);
|
||||
if (audioContext->obtainedSpec.channels == 2) {
|
||||
blip_read_samples(right, ((short*) data) + 1, available, 1);
|
||||
}
|
||||
int available = mAudioBufferRead(&audioContext->buffer, (int16_t*) data, len);
|
||||
|
||||
if (audioContext->sync) {
|
||||
mCoreSyncConsumeAudio(audioContext->sync);
|
||||
|
|
|
@ -11,6 +11,8 @@
|
|||
CXX_GUARD_START
|
||||
|
||||
#include <mgba/core/log.h>
|
||||
#include <mgba-util/audio-buffer.h>
|
||||
#include <mgba-util/audio-resampler.h>
|
||||
|
||||
#include <SDL.h>
|
||||
// Altivec sometimes defines this
|
||||
|
@ -30,6 +32,8 @@ struct mSDLAudio {
|
|||
unsigned sampleRate;
|
||||
|
||||
// State
|
||||
struct mAudioBuffer buffer;
|
||||
struct mAudioResampler resampler;
|
||||
SDL_AudioSpec desiredSpec;
|
||||
SDL_AudioSpec obtainedSpec;
|
||||
#if SDL_VERSION_ATLEAST(2, 0, 0)
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include "feature/gui/gui-runner.h"
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/core.h>
|
||||
#include <mgba/internal/gb/video.h>
|
||||
#include <mgba/internal/gba/audio.h>
|
||||
|
@ -305,9 +304,6 @@ static void _setup(struct mGUIRunner* runner) {
|
|||
|
||||
u32 samplerate = runner->core->audioSampleRate(runner->core);
|
||||
double ratio = mCoreCalculateFramerateRatio(runner->core, 60.0);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 0), runner->core->frequency(runner->core), samplerate);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 1), runner->core->frequency(runner->core), samplerate);
|
||||
|
||||
audrvVoiceInit(&audrenDriver, 0, 2, PcmFormat_Int16, samplerate / ratio);
|
||||
audrvVoiceSetDestinationMix(&audrenDriver, 0, AUDREN_FINAL_MIX_ID);
|
||||
audrvVoiceSetMixFactor(&audrenDriver, 0, 1.0f, 0, 0);
|
||||
|
@ -576,7 +572,7 @@ static bool _running(struct mGUIRunner* runner) {
|
|||
return appletMainLoop();
|
||||
}
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right) {
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer* buffer) {
|
||||
UNUSED(stream);
|
||||
int i;
|
||||
while (true) {
|
||||
|
@ -590,15 +586,13 @@ static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* rig
|
|||
break;
|
||||
}
|
||||
if (!frameLimiter) {
|
||||
blip_clear(left);
|
||||
blip_clear(right);
|
||||
mAudioBufferClear(buffer);
|
||||
return;
|
||||
}
|
||||
audrenWaitFrame();
|
||||
}
|
||||
struct mStereoSample* samples = audioBuffer[i];
|
||||
blip_read_samples(left, &samples[0].left, SAMPLES, true);
|
||||
blip_read_samples(right, &samples[0].right, SAMPLES, true);
|
||||
mAudioBufferRead(buffer, (int16_t*) samples, SAMPLES);
|
||||
armDCacheFlush(samples, SAMPLES * sizeof(struct mStereoSample));
|
||||
audrvVoiceAddWaveBuf(&audrenDriver, 0, &audrvBuffer[i]);
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/cheats.h>
|
||||
#include <mgba/core/config.h>
|
||||
#include <mgba/core/core.h>
|
||||
|
@ -153,9 +152,6 @@ int main(int argc, char** argv) {
|
|||
savestate = 0;
|
||||
}
|
||||
|
||||
blip_set_rates(core->getAudioChannel(core, 0), core->frequency(core), 0x8000);
|
||||
blip_set_rates(core->getAudioChannel(core, 1), core->frequency(core), 0x8000);
|
||||
|
||||
_fuzzRunloop(core, fuzzOpts.frames);
|
||||
|
||||
if (hasDebugger) {
|
||||
|
@ -188,8 +184,7 @@ static void _fuzzRunloop(struct mCore* core, int frames) {
|
|||
do {
|
||||
core->runFrame(core);
|
||||
--frames;
|
||||
blip_clear(core->getAudioChannel(core, 0));
|
||||
blip_clear(core->getAudioChannel(core, 1));
|
||||
mAudioBufferClear(core->getAudioBuffer(core));
|
||||
} while (frames > 0 && !_dispatchExiting);
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/cheats.h>
|
||||
#include <mgba/core/config.h>
|
||||
#include <mgba/core/core.h>
|
||||
|
|
|
@ -14,13 +14,13 @@
|
|||
|
||||
#include <mgba-util/common.h>
|
||||
|
||||
#include <mgba/core/blip_buf.h>
|
||||
#include <mgba/core/core.h>
|
||||
#include "feature/gui/gui-runner.h"
|
||||
#include <mgba/internal/gb/video.h>
|
||||
#include <mgba/internal/gba/audio.h>
|
||||
#include <mgba/internal/gba/gba.h>
|
||||
#include <mgba/internal/gba/input.h>
|
||||
#include <mgba-util/audio-resampler.h>
|
||||
#include <mgba-util/gui.h>
|
||||
#include <mgba-util/gui/file-select.h>
|
||||
#include <mgba-util/gui/font.h>
|
||||
|
@ -76,7 +76,7 @@ static enum VideoMode {
|
|||
|
||||
static void _retraceCallback(u32 count);
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right);
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer*);
|
||||
static void _audioDMA(void);
|
||||
static void _setRumble(struct mRumble* rumble, int enable);
|
||||
static void _sampleRotation(struct mRotationSource* source);
|
||||
|
@ -141,12 +141,14 @@ static void* framebuffer[2] = { 0, 0 };
|
|||
static int whichFb = 0;
|
||||
|
||||
static struct AudioBuffer {
|
||||
struct mStereoSample samples[SAMPLES] __attribute__((__aligned__(32)));
|
||||
volatile size_t size;
|
||||
} audioBuffer[BUFFERS] = {0};
|
||||
int16_t samples[SAMPLES * 2] __attribute__((__aligned__(32)));
|
||||
volatile bool full;
|
||||
} audioBuffers[BUFFERS] = {0};
|
||||
static struct mAudioBuffer audioBuffer;
|
||||
static volatile int currentAudioBuffer = 0;
|
||||
static volatile int nextAudioBuffer = 0;
|
||||
static double audioSampleRate = 60.0 / 1.001;
|
||||
static struct mAudioResampler resampler;
|
||||
|
||||
static struct GUIFont* font;
|
||||
|
||||
|
@ -246,11 +248,6 @@ static void reconfigureScreen(struct mGUIRunner* runner) {
|
|||
if (runner) {
|
||||
runner->params.width = vmode->fbWidth * guiScale * wAdjust;
|
||||
runner->params.height = vmode->efbHeight * guiScale * hAdjust;
|
||||
if (runner->core) {
|
||||
double ratio = mCoreCalculateFramerateRatio(runner->core, audioSampleRate);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 0), runner->core->frequency(runner->core), 48000 * ratio);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 1), runner->core->frequency(runner->core), 48000 * ratio);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -269,7 +266,10 @@ int main(int argc, char* argv[]) {
|
|||
AUDIO_SetDSPSampleRate(AI_SAMPLERATE_48KHZ);
|
||||
AUDIO_RegisterDMACallback(_audioDMA);
|
||||
|
||||
memset(audioBuffer, 0, sizeof(audioBuffer));
|
||||
memset(audioBuffers, 0, sizeof(audioBuffers));
|
||||
mAudioBufferInit(&audioBuffer, SAMPLES * BUFFERS, 2);
|
||||
mAudioResamplerInit(&resampler, mINTERPOLATOR_COSINE);
|
||||
mAudioResamplerSetDestination(&resampler, &audioBuffer, 48000);
|
||||
#ifdef FIXED_ROM_BUFFER
|
||||
romBufferSize = GBA_SIZE_ROM0;
|
||||
romBuffer = SYS_GetArena2Lo();
|
||||
|
@ -663,6 +663,9 @@ int main(int argc, char* argv[]) {
|
|||
VIDEO_WaitVSync();
|
||||
mGUIDeinit(&runner);
|
||||
|
||||
mAudioResamplerDeinit(&resampler);
|
||||
mAudioBufferDeinit(&audioBuffer);
|
||||
|
||||
free(fifo);
|
||||
free(texmem);
|
||||
free(rescaleTexmem);
|
||||
|
@ -678,41 +681,38 @@ int main(int argc, char* argv[]) {
|
|||
}
|
||||
|
||||
static void _audioDMA(void) {
|
||||
struct AudioBuffer* buffer = &audioBuffer[currentAudioBuffer];
|
||||
if (buffer->size != SAMPLES) {
|
||||
struct AudioBuffer* buffer = &audioBuffers[currentAudioBuffer];
|
||||
if (!buffer->full) {
|
||||
printf("Recv %i %i%s", currentAudioBuffer, buffer->full, buffer->full ? "" : "!");
|
||||
return;
|
||||
}
|
||||
DCFlushRange(buffer->samples, SAMPLES * sizeof(struct mStereoSample));
|
||||
AUDIO_InitDMA((u32) buffer->samples, SAMPLES * sizeof(struct mStereoSample));
|
||||
buffer->size = 0;
|
||||
buffer->full = false;
|
||||
currentAudioBuffer = (currentAudioBuffer + 1) % BUFFERS;
|
||||
}
|
||||
|
||||
static void _postAudioBuffer(struct mAVStream* stream, blip_t* left, blip_t* right) {
|
||||
static void _postAudioBuffer(struct mAVStream* stream, struct mAudioBuffer* buf) {
|
||||
UNUSED(stream);
|
||||
|
||||
UNUSED(buf);
|
||||
mAudioResamplerProcess(&resampler);
|
||||
u32 level = 0;
|
||||
bool gotAudio = false;
|
||||
_CPU_ISR_Disable(level);
|
||||
struct AudioBuffer* buffer = &audioBuffer[nextAudioBuffer];
|
||||
int available = blip_samples_avail(left);
|
||||
if (available + buffer->size > SAMPLES) {
|
||||
available = SAMPLES - buffer->size;
|
||||
}
|
||||
if (available > 0) {
|
||||
// These appear to be reversed for AUDIO_InitDMA
|
||||
blip_read_samples(left, &buffer->samples[buffer->size].right, available, true);
|
||||
blip_read_samples(right, &buffer->samples[buffer->size].left, available, true);
|
||||
buffer->size += available;
|
||||
}
|
||||
if (buffer->size == SAMPLES) {
|
||||
int next = (nextAudioBuffer + 1) % BUFFERS;
|
||||
if ((currentAudioBuffer + BUFFERS - next) % BUFFERS != 1) {
|
||||
nextAudioBuffer = next;
|
||||
}
|
||||
if (!AUDIO_GetDMAEnableFlag()) {
|
||||
_audioDMA();
|
||||
AUDIO_StartDMA();
|
||||
while (mAudioBufferAvailable(&audioBuffer) >= SAMPLES) {
|
||||
struct AudioBuffer* buffer = &audioBuffers[nextAudioBuffer];
|
||||
if (buffer->full) {
|
||||
printf("Send %i %i%s", nextAudioBuffer, buffer->full, buffer->full ? "!!" : "");
|
||||
break;
|
||||
}
|
||||
mAudioBufferRead(&audioBuffer, buffer->samples, SAMPLES);
|
||||
buffer->full = true;
|
||||
nextAudioBuffer = (nextAudioBuffer + 1) % BUFFERS;
|
||||
gotAudio = true;
|
||||
}
|
||||
if (gotAudio && !AUDIO_GetDMAEnableFlag()) {
|
||||
_audioDMA();
|
||||
AUDIO_StartDMA();
|
||||
}
|
||||
_CPU_ISR_Restore(level);
|
||||
}
|
||||
|
@ -1416,15 +1416,11 @@ void _setup(struct mGUIRunner* runner) {
|
|||
|
||||
nextAudioBuffer = 0;
|
||||
currentAudioBuffer = 0;
|
||||
int i;
|
||||
for (i = 0; i < BUFFERS; ++i) {
|
||||
audioBuffer[i].size = 0;
|
||||
}
|
||||
memset(audioBuffers, 0, sizeof(audioBuffers));
|
||||
runner->core->setAudioBufferSize(runner->core, SAMPLES);
|
||||
|
||||
double ratio = mCoreCalculateFramerateRatio(runner->core, audioSampleRate);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 0), runner->core->frequency(runner->core), 48000 * ratio);
|
||||
blip_set_rates(runner->core->getAudioChannel(runner->core, 1), runner->core->frequency(runner->core), 48000 * ratio);
|
||||
mAudioResamplerSetSource(&resampler, runner->core->getAudioBuffer(runner->core), runner->core->audioSampleRate(runner->core) / ratio, true);
|
||||
|
||||
frameLimiter = true;
|
||||
}
|
||||
|
@ -1433,6 +1429,7 @@ void _gameUnloaded(struct mGUIRunner* runner) {
|
|||
UNUSED(runner);
|
||||
AUDIO_StopDMA();
|
||||
frameLimiter = true;
|
||||
mAudioBufferClear(&audioBuffer);
|
||||
VIDEO_SetBlack(true);
|
||||
VIDEO_Flush();
|
||||
VIDEO_WaitVSync();
|
||||
|
|
Loading…
Reference in New Issue