mirror of https://github.com/mgba-emu/mgba.git
GBA Audio: Adjust PSG sampling rate with SOUNDBIAS
This commit is contained in:
parent
cbbaa42641
commit
cbbcf7478e
1
CHANGES
1
CHANGES
|
@ -28,6 +28,7 @@ Emulation fixes:
|
||||||
- GBA: Improve timing when not booting from BIOS
|
- GBA: Improve timing when not booting from BIOS
|
||||||
- GBA: Fix expected entry point for multiboot ELFs (fixes mgba.io/i/2450)
|
- GBA: Fix expected entry point for multiboot ELFs (fixes mgba.io/i/2450)
|
||||||
- GBA: Fix booting multiboot ROMs with no JOY entrypoint
|
- GBA: Fix booting multiboot ROMs with no JOY entrypoint
|
||||||
|
- GBA Audio: Adjust PSG sampling rate with SOUNDBIAS
|
||||||
- GBA BIOS: Work around IRQ handling hiccup in Mario & Luigi (fixes mgba.io/i/1059)
|
- GBA BIOS: Work around IRQ handling hiccup in Mario & Luigi (fixes mgba.io/i/1059)
|
||||||
- GBA BIOS: Initial HLE timing estimation of UnLz77 functions (fixes mgba.io/i/2141)
|
- GBA BIOS: Initial HLE timing estimation of UnLz77 functions (fixes mgba.io/i/2141)
|
||||||
- GBA DMA: Fix DMA source direction bits being cleared (fixes mgba.io/i/2410)
|
- GBA DMA: Fix DMA source direction bits being cleared (fixes mgba.io/i/2410)
|
||||||
|
|
|
@ -674,7 +674,6 @@ void GBAudioUpdateFrame(struct GBAudio* audio) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBAudioSamplePSG(struct GBAudio* audio, int16_t* left, int16_t* right) {
|
void GBAudioSamplePSG(struct GBAudio* audio, int16_t* left, int16_t* right) {
|
||||||
GBAudioRun(audio, mTimingCurrentTime(audio->timing), 0xF);
|
|
||||||
int dcOffset = audio->style == GB_AUDIO_GBA ? 0 : -0x8;
|
int dcOffset = audio->style == GB_AUDIO_GBA ? 0 : -0x8;
|
||||||
int sampleLeft = dcOffset;
|
int sampleLeft = dcOffset;
|
||||||
int sampleRight = dcOffset;
|
int sampleRight = dcOffset;
|
||||||
|
@ -731,6 +730,7 @@ static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate) {
|
||||||
struct GBAudio* audio = user;
|
struct GBAudio* audio = user;
|
||||||
int16_t sampleLeft = 0;
|
int16_t sampleLeft = 0;
|
||||||
int16_t sampleRight = 0;
|
int16_t sampleRight = 0;
|
||||||
|
GBAudioRun(audio, mTimingCurrentTime(audio->timing), 0xF);
|
||||||
GBAudioSamplePSG(audio, &sampleLeft, &sampleRight);
|
GBAudioSamplePSG(audio, &sampleLeft, &sampleRight);
|
||||||
sampleLeft = (sampleLeft * audio->masterVolume * 6) >> 7;
|
sampleLeft = (sampleLeft * audio->masterVolume * 6) >> 7;
|
||||||
sampleRight = (sampleRight * audio->masterVolume * 6) >> 7;
|
sampleRight = (sampleRight * audio->masterVolume * 6) >> 7;
|
||||||
|
|
|
@ -25,6 +25,7 @@ mLOG_DEFINE_CATEGORY(GBA_AUDIO, "GBA Audio", "gba.audio");
|
||||||
const unsigned GBA_AUDIO_SAMPLES = 2048;
|
const unsigned GBA_AUDIO_SAMPLES = 2048;
|
||||||
const int GBA_AUDIO_VOLUME_MAX = 0x100;
|
const int GBA_AUDIO_VOLUME_MAX = 0x100;
|
||||||
|
|
||||||
|
static const int SAMPLE_INTERVAL = GBA_ARM7TDMI_FREQUENCY / 0x8000;
|
||||||
static const int CLOCKS_PER_FRAME = 0x800;
|
static const int CLOCKS_PER_FRAME = 0x800;
|
||||||
|
|
||||||
static int _applyBias(struct GBAAudio* audio, int sample);
|
static int _applyBias(struct GBAAudio* audio, int sample);
|
||||||
|
@ -218,6 +219,7 @@ void GBAAudioWriteSOUNDCNT_X(struct GBAAudio* audio, uint16_t value) {
|
||||||
|
|
||||||
void GBAAudioWriteSOUNDBIAS(struct GBAAudio* audio, uint16_t value) {
|
void GBAAudioWriteSOUNDBIAS(struct GBAAudio* audio, uint16_t value) {
|
||||||
audio->soundbias = value;
|
audio->soundbias = value;
|
||||||
|
audio->sampleInterval = 0x200 >> GBARegisterSOUNDBIASGetResolution(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBAAudioWriteWaveRAM(struct GBAAudio* audio, int address, uint32_t value) {
|
void GBAAudioWriteWaveRAM(struct GBAAudio* audio, int address, uint32_t value) {
|
||||||
|
@ -318,59 +320,74 @@ static int _applyBias(struct GBAAudio* audio, int sample) {
|
||||||
|
|
||||||
static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate) {
|
static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate) {
|
||||||
struct GBAAudio* audio = user;
|
struct GBAAudio* audio = user;
|
||||||
int16_t sampleLeft = 0;
|
int16_t samplesLeft[8];
|
||||||
int16_t sampleRight = 0;
|
int16_t samplesRight[8];
|
||||||
int psgShift = 4 - audio->volume;
|
int32_t timestamp = mTimingCurrentTime(&audio->p->timing) - cyclesLate - SAMPLE_INTERVAL;
|
||||||
GBAudioSamplePSG(&audio->psg, &sampleLeft, &sampleRight);
|
int sample;
|
||||||
sampleLeft >>= psgShift;
|
for (sample = 0; sample * audio->sampleInterval < (int32_t) SAMPLE_INTERVAL; ++sample) {
|
||||||
sampleRight >>= psgShift;
|
int16_t sampleLeft = 0;
|
||||||
|
int16_t sampleRight = 0;
|
||||||
|
int psgShift = 4 - audio->volume;
|
||||||
|
GBAudioRun(&audio->psg, timestamp + (sample + 1) * audio->sampleInterval, 0xF);
|
||||||
|
GBAudioSamplePSG(&audio->psg, &sampleLeft, &sampleRight);
|
||||||
|
sampleLeft >>= psgShift;
|
||||||
|
sampleRight >>= psgShift;
|
||||||
|
|
||||||
if (audio->mixer) {
|
if (audio->mixer) {
|
||||||
audio->mixer->step(audio->mixer);
|
audio->mixer->step(audio->mixer);
|
||||||
}
|
}
|
||||||
if (!audio->externalMixing) {
|
if (!audio->externalMixing) {
|
||||||
if (!audio->forceDisableChA) {
|
if (!audio->forceDisableChA) {
|
||||||
if (audio->chALeft) {
|
if (audio->chALeft) {
|
||||||
sampleLeft += (audio->chA.sample << 2) >> !audio->volumeChA;
|
sampleLeft += (audio->chA.sample << 2) >> !audio->volumeChA;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (audio->chARight) {
|
||||||
|
sampleRight += (audio->chA.sample << 2) >> !audio->volumeChA;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (audio->chARight) {
|
if (!audio->forceDisableChB) {
|
||||||
sampleRight += (audio->chA.sample << 2) >> !audio->volumeChA;
|
if (audio->chBLeft) {
|
||||||
|
sampleLeft += (audio->chB.sample << 2) >> !audio->volumeChB;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (audio->chBRight) {
|
||||||
|
sampleRight += (audio->chB.sample << 2) >> !audio->volumeChB;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!audio->forceDisableChB) {
|
sampleLeft = _applyBias(audio, sampleLeft);
|
||||||
if (audio->chBLeft) {
|
sampleRight = _applyBias(audio, sampleRight);
|
||||||
sampleLeft += (audio->chB.sample << 2) >> !audio->volumeChB;
|
samplesLeft[sample] = sampleLeft;
|
||||||
}
|
samplesRight[sample] = sampleRight;
|
||||||
|
|
||||||
if (audio->chBRight) {
|
|
||||||
sampleRight += (audio->chB.sample << 2) >> !audio->volumeChB;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sampleLeft = _applyBias(audio, sampleLeft);
|
|
||||||
sampleRight = _applyBias(audio, sampleRight);
|
|
||||||
|
|
||||||
mCoreSyncLockAudio(audio->p->sync);
|
mCoreSyncLockAudio(audio->p->sync);
|
||||||
unsigned produced;
|
unsigned produced;
|
||||||
if ((size_t) blip_samples_avail(audio->psg.left) < audio->samples) {
|
int i;
|
||||||
blip_add_delta(audio->psg.left, audio->clock, sampleLeft - audio->lastLeft);
|
for (i = 0; i < sample; ++i) {
|
||||||
blip_add_delta(audio->psg.right, audio->clock, sampleRight - audio->lastRight);
|
int16_t sampleLeft = samplesLeft[i];
|
||||||
audio->lastLeft = sampleLeft;
|
int16_t sampleRight = samplesRight[i];
|
||||||
audio->lastRight = sampleRight;
|
if ((size_t) blip_samples_avail(audio->psg.left) < audio->samples) {
|
||||||
audio->clock += audio->sampleInterval;
|
blip_add_delta(audio->psg.left, audio->clock, sampleLeft - audio->lastLeft);
|
||||||
if (audio->clock >= CLOCKS_PER_FRAME) {
|
blip_add_delta(audio->psg.right, audio->clock, sampleRight - audio->lastRight);
|
||||||
blip_end_frame(audio->psg.left, CLOCKS_PER_FRAME);
|
audio->lastLeft = sampleLeft;
|
||||||
blip_end_frame(audio->psg.right, CLOCKS_PER_FRAME);
|
audio->lastRight = sampleRight;
|
||||||
audio->clock -= CLOCKS_PER_FRAME;
|
audio->clock += audio->sampleInterval;
|
||||||
|
if (audio->clock >= CLOCKS_PER_FRAME) {
|
||||||
|
blip_end_frame(audio->psg.left, CLOCKS_PER_FRAME);
|
||||||
|
blip_end_frame(audio->psg.right, CLOCKS_PER_FRAME);
|
||||||
|
audio->clock -= CLOCKS_PER_FRAME;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
produced = blip_samples_avail(audio->psg.left);
|
// TODO: Post all frames
|
||||||
if (audio->p->stream && audio->p->stream->postAudioFrame) {
|
if (audio->p->stream && audio->p->stream->postAudioFrame) {
|
||||||
audio->p->stream->postAudioFrame(audio->p->stream, sampleLeft, sampleRight);
|
audio->p->stream->postAudioFrame(audio->p->stream, samplesLeft[sample - 1], samplesRight[sample - 1]);
|
||||||
}
|
}
|
||||||
|
produced = blip_samples_avail(audio->psg.left);
|
||||||
bool wait = produced >= audio->samples;
|
bool wait = produced >= audio->samples;
|
||||||
if (!mCoreSyncProduceAudio(audio->p->sync, audio->psg.left, audio->samples)) {
|
if (!mCoreSyncProduceAudio(audio->p->sync, audio->psg.left, audio->samples)) {
|
||||||
// Interrupted
|
// Interrupted
|
||||||
|
@ -381,7 +398,7 @@ static void _sample(struct mTiming* timing, void* user, uint32_t cyclesLate) {
|
||||||
audio->p->stream->postAudioBuffer(audio->p->stream, audio->psg.left, audio->psg.right);
|
audio->p->stream->postAudioBuffer(audio->p->stream, audio->psg.left, audio->psg.right);
|
||||||
}
|
}
|
||||||
|
|
||||||
mTimingSchedule(timing, &audio->sampleEvent, audio->sampleInterval - cyclesLate);
|
mTimingSchedule(timing, &audio->sampleEvent, SAMPLE_INTERVAL - cyclesLate);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GBAAudioSerialize(const struct GBAAudio* audio, struct GBASerializedState* state) {
|
void GBAAudioSerialize(const struct GBAAudio* audio, struct GBASerializedState* state) {
|
||||||
|
|
Loading…
Reference in New Issue