some small cleanup of audio output code

This commit is contained in:
nitsuja 2009-10-22 08:59:13 +00:00
parent c5e9f51523
commit a3fd06019e
2 changed files with 100 additions and 132 deletions

View File

@ -217,85 +217,63 @@ private:
class NitsujaSynchronizer : public ISynchronizingAudioBuffer class NitsujaSynchronizer : public ISynchronizingAudioBuffer
{ {
private: private:
template<typename T> struct ssamp
struct ssampT
{ {
T l, r; s16 l, r;
enum { TMAX = (1 << ((sizeof(T) * 8) - 1)) - 1 }; ssamp() {}
ssampT() {} ssamp(s16 ll, s16 rr) : l(ll), r(rr) {}
ssampT(T ll, T rr) : l(ll), r(rr) {}
template<typename T2>
ssampT(ssampT<T2> s) : l(s.l), r(s.r) {}
ssampT operator+(const ssampT& rhs)
{
s32 l2 = l+rhs.l;
s32 r2 = r+rhs.r;
if(l2 > TMAX) l2 = TMAX;
if(l2 < -TMAX) l2 = -TMAX;
if(r2 > TMAX) r2 = TMAX;
if(r2 < -TMAX) r2 = -TMAX;
return ssampT(l2, r2);
}
ssampT operator/(int rhs)
{
return ssampT(l/rhs,r/rhs);
}
ssampT operator*(int rhs)
{
s32 l2 = l*rhs;
s32 r2 = r*rhs;
if(l2 > TMAX) l2 = TMAX;
if(l2 < -TMAX) l2 = -TMAX;
if(r2 > TMAX) r2 = TMAX;
if(r2 < -TMAX) r2 = -TMAX;
return ssampT(l2, r2);
}
ssampT muldiv (int num, int den)
{
num = std::max<T>(0,num);
return ssampT(((s32)l * num) / den, ((s32)r * num) / den);
}
ssampT faded (ssampT rhs, int cur, int start, int end)
{
if(cur <= start)
return *this;
if(cur >= end)
return rhs;
//float ang = 3.14159f * (float)(cur - start) / (float)(end - start);
//float amt = (1-cosf(ang))*0.5f;
//cur = start + (int)(amt * (end - start));
int inNum = cur - start;
int outNum = end - cur;
int denom = end - start;
int lrv = ((int)l * outNum + (int)rhs.l * inNum) / denom;
int rrv = ((int)r * outNum + (int)rhs.r * inNum) / denom;
return ssampT<T>(lrv,rrv);
}
}; };
typedef ssampT<s16> ssamp;
std::vector<ssamp> sampleQueue; std::vector<ssamp> sampleQueue;
// reflects x about y if x exceeds y. // returns values going between 0 and y-1 in a saw wave pattern, based on x
FORCEINLINE int reflectAbout(int x, int y) static FORCEINLINE int pingpong(int x, int y)
{ {
//return (x)+(x)/(y)*(2*((y)-(x))-1); x %= 2*y;
return (x<y) ? x : (2*y-x-1); if(x >= y)
x = 2*y - x - 1;
return x;
// in case we want to switch to odd buffer sizes for more sharpness
//x %= 2*(y-1);
//if(x >= y)
// x = 2*(y-1) - x;
//return x;
} }
void emit_samples(s16* outbuf, ssamp* samplebuf, int samples) static FORCEINLINE ssamp crossfade (ssamp lhs, ssamp rhs, int cur, int start, int end)
{ {
for(int i=0;i<samples;i++) { if(cur <= start)
*outbuf++ = samplebuf[i].l; return lhs;
*outbuf++ = samplebuf[i].r; if(cur >= end)
} return rhs;
// in case we want sine wave interpolation instead of linear here
//float ang = 3.14159f * (float)(cur - start) / (float)(end - start);
//cur = start + (int)((1-cosf(ang))*0.5f * (end - start));
int inNum = cur - start;
int outNum = end - cur;
int denom = end - start;
int lrv = ((int)lhs.l * outNum + (int)rhs.l * inNum) / denom;
int rrv = ((int)lhs.r * outNum + (int)rhs.r * inNum) / denom;
return ssamp(lrv,rrv);
} }
static FORCEINLINE void emit_sample(s16*& outbuf, ssamp sample)
{
*outbuf++ = sample.l;
*outbuf++ = sample.r;
}
static FORCEINLINE void emit_samples(s16*& outbuf, const ssamp* samplebuf, int samples)
{
for(int i=0;i<samples;i++)
emit_sample(outbuf,samplebuf[i]);
}
public: public:
NitsujaSynchronizer() NitsujaSynchronizer()
{} {}
@ -313,7 +291,8 @@ public:
{ {
int audiosize = samples_requested; int audiosize = samples_requested;
int queued = sampleQueue.size(); int queued = sampleQueue.size();
// truncate input and output sizes to 8 because I am too lazy to deal with odd numbers
// truncate input and output sizes to multiples of 8 because I am too lazy to deal with odd numbers
audiosize &= ~7; audiosize &= ~7;
queued &= ~7; queued &= ~7;
@ -324,8 +303,6 @@ public:
if(queued > 900 || audiosize > queued * 2) if(queued > 900 || audiosize > queued * 2)
{ {
// not normal speed. we have to resample it somehow in this case. // not normal speed. we have to resample it somehow in this case.
static std::vector<ssamp> outsamples;
outsamples.clear();
if(audiosize <= queued) if(audiosize <= queued)
{ {
// fast forward speed // fast forward speed
@ -333,8 +310,8 @@ public:
for(int i = 0; i < audiosize; i++) for(int i = 0; i < audiosize; i++)
{ {
int j = i + queued - audiosize; int j = i + queued - audiosize;
ssamp outsamp = sampleQueue[i].faded(sampleQueue[j], i,0,audiosize); ssamp outsamp = crossfade(sampleQueue[i],sampleQueue[j], i,0,audiosize);
outsamples.push_back(ssamp(outsamp)); emit_sample(buf,outsamp);
} }
} }
else else
@ -362,7 +339,9 @@ public:
// //
// yes, this means we are spending some stretches of time playing the sound backwards, // yes, this means we are spending some stretches of time playing the sound backwards,
// but the stretches are short enough that this doesn't sound weird. // but the stretches are short enough that this doesn't sound weird.
// apparently this also sounds less "echoey" or "robotic" than only playing it forwards. // this lets us avoid most crackling problems due to the endpoints matching up.
// TODO: it might help to calculate the approximate fundamental frequency
// and reduce either buffer size such that the reflections line up with it.
int midpointX = audiosize >> 1; int midpointX = audiosize >> 1;
int midpointY = queued >> 1; int midpointY = queued >> 1;
@ -377,7 +356,7 @@ public:
int midpointXOffset = queued/2; int midpointXOffset = queued/2;
while(true) while(true)
{ {
int a = abs(reflectAbout((midpointX - midpointXOffset) % (queued*2), queued) - midpointY) - midpointXOffset; int a = abs(pingpong(midpointX - midpointXOffset, queued) - midpointY) - midpointXOffset;
if(((a > 0) != (prevA > 0) || (a < 0) != (prevA < 0)) && prevA != 999999) if(((a > 0) != (prevA > 0) || (a < 0) != (prevA < 0)) && prevA != 999999)
{ {
if((a + prevA)&1) // there's some sort of off-by-one problem with this search since we're moving diagonally... if((a + prevA)&1) // there's some sort of off-by-one problem with this search since we're moving diagonally...
@ -389,19 +368,20 @@ public:
if(midpointXOffset < 0) if(midpointXOffset < 0)
{ {
midpointXOffset = 0; midpointXOffset = 0;
break; // failed somehow? let's just omit the "B" stretch in this case. break; // failed to find it. the two sides probably meet exactly in the center.
} }
} }
int leftMidpointX = midpointX - midpointXOffset; int leftMidpointX = midpointX - midpointXOffset;
int rightMidpointX = midpointX + midpointXOffset; int rightMidpointX = midpointX + midpointXOffset;
int leftMidpointY = reflectAbout((leftMidpointX) % (queued*2), queued); int leftMidpointY = pingpong(leftMidpointX, queued);
int rightMidpointY = (queued-1) - reflectAbout((((int)audiosize-1 - rightMidpointX + queued*2) % (queued*2)), queued); int rightMidpointY = (queued-1) - pingpong((int)audiosize-1 - rightMidpointX + queued*2, queued);
// output the left almost-half of the sound (section "A") // output the left almost-half of the sound (section "A")
for(int x = 0; x < leftMidpointX; x++) for(int x = 0; x < leftMidpointX; x++)
{ {
int i = reflectAbout(x % (queued*2), queued); int i = pingpong(x, queued);
outsamples.push_back(sampleQueue[i]); emit_sample(buf,sampleQueue[i]);
} }
// output the middle stretch (section "B") // output the middle stretch (section "B")
@ -409,27 +389,18 @@ public:
int dyMidLeft = (leftMidpointY < midpointY) ? 1 : -1; int dyMidLeft = (leftMidpointY < midpointY) ? 1 : -1;
int dyMidRight = (rightMidpointY > midpointY) ? 1 : -1; int dyMidRight = (rightMidpointY > midpointY) ? 1 : -1;
for(int x = leftMidpointX; x < midpointX; x++, y+=dyMidLeft) for(int x = leftMidpointX; x < midpointX; x++, y+=dyMidLeft)
outsamples.push_back(sampleQueue[y]); emit_sample(buf,sampleQueue[y]);
for(int x = midpointX; x < rightMidpointX; x++, y+=dyMidRight) for(int x = midpointX; x < rightMidpointX; x++, y+=dyMidRight)
outsamples.push_back(sampleQueue[y]); emit_sample(buf,sampleQueue[y]);
// output the end of the queued sound (section "C") // output the end of the queued sound (section "C")
for(int x = rightMidpointX; x < audiosize; x++) for(int x = rightMidpointX; x < audiosize; x++)
{ {
int i = (queued-1) - reflectAbout((((int)audiosize-1 - x + queued*2) % (queued*2)), queued); int i = (queued-1) - pingpong((int)audiosize-1 - x + queued*2, queued);
outsamples.push_back(sampleQueue[i]); emit_sample(buf,sampleQueue[i]);
} }
assert(outsamples.back().l == sampleQueue[queued-1].l);
} //end else } //end else
// if the user SPU mixed some channels, mix them in with our output now
#ifdef HYBRID_SPU
SPU_MixAudio<2>(SPU_user,audiosize);
for(int i = 0; i < audiosize; i++)
outsamples[i] = outsamples[i] + *(ssamp*)(&SPU_user->outbuf[i*2]);
#endif
emit_samples(buf,&outsamples[0],audiosize);
sampleQueue.erase(sampleQueue.begin(), sampleQueue.begin() + queued); sampleQueue.erase(sampleQueue.begin(), sampleQueue.begin() + queued);
return audiosize; return audiosize;
} }
@ -446,22 +417,12 @@ public:
if(audiosize >= queued) if(audiosize >= queued)
{ {
#ifdef HYBRID_SPU
SPU_MixAudio<2>(SPU_user,queued);
for(int i = 0; i < queued; i++)
sampleQueue[i] = sampleQueue[i] + *(ssamp*)(&SPU_user->outbuf[i*2]);
#endif
emit_samples(buf,&sampleQueue[0],queued); emit_samples(buf,&sampleQueue[0],queued);
sampleQueue.erase(sampleQueue.begin(), sampleQueue.begin() + queued); sampleQueue.erase(sampleQueue.begin(), sampleQueue.begin() + queued);
return queued; return queued;
} }
else else
{ {
#ifdef HYBRID_SPU
SPU_MixAudio<2>(SPU_user,audiosize);
for(int i = 0; i < audiosize; i++)
sampleQueue[i] = sampleQueue[i] + *(ssamp*)(&SPU_user->outbuf[i*2]);
#endif
emit_samples(buf,&sampleQueue[0],audiosize); emit_samples(buf,&sampleQueue[0],audiosize);
sampleQueue.erase(sampleQueue.begin(), sampleQueue.begin()+audiosize); sampleQueue.erase(sampleQueue.begin(), sampleQueue.begin()+audiosize);
return audiosize; return audiosize;

View File

@ -62,7 +62,7 @@ LPDIRECTSOUNDBUFFER lpDSB, lpDSB2;
extern WINCLASS *MainWindow; extern WINCLASS *MainWindow;
static s16 *stereodata16; static s16 *stereodata16=0;
static u32 soundoffset=0; static u32 soundoffset=0;
static u32 soundbufsize; static u32 soundbufsize;
static LONG soundvolume; static LONG soundvolume;
@ -95,14 +95,14 @@ int SNDDXInit(int buffersize)
HRESULT ret; HRESULT ret;
char tempstr[512]; char tempstr[512];
if ((ret = DirectSoundCreate8(NULL, &lpDS8, NULL)) != DS_OK) if (FAILED(ret = DirectSoundCreate8(NULL, &lpDS8, NULL)))
{ {
sprintf(tempstr, "DirectSound8Create error: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret)); sprintf(tempstr, "DirectSound8Create error: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret));
MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION); MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION);
return -1; return -1;
} }
if ((ret = lpDS8->SetCooperativeLevel(MainWindow->getHWnd(), DSSCL_PRIORITY)) != DS_OK) if (FAILED(ret = lpDS8->SetCooperativeLevel(MainWindow->getHWnd(), DSSCL_PRIORITY)))
{ {
sprintf(tempstr, "IDirectSound8_SetCooperativeLevel error: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret)); sprintf(tempstr, "IDirectSound8_SetCooperativeLevel error: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret));
MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION); MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION);
@ -115,7 +115,7 @@ int SNDDXInit(int buffersize)
dsbdesc.dwBufferBytes = 0; dsbdesc.dwBufferBytes = 0;
dsbdesc.lpwfxFormat = NULL; dsbdesc.lpwfxFormat = NULL;
if ((ret = lpDS8->CreateSoundBuffer(&dsbdesc, &lpDSB, NULL)) != DS_OK) if (FAILED(ret = lpDS8->CreateSoundBuffer(&dsbdesc, &lpDSB, NULL)))
{ {
sprintf(tempstr, "Error when creating primary sound buffer: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret)); sprintf(tempstr, "Error when creating primary sound buffer: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret));
MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION); MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION);
@ -132,7 +132,7 @@ int SNDDXInit(int buffersize)
wfx.nBlockAlign = (wfx.wBitsPerSample / 8) * wfx.nChannels; wfx.nBlockAlign = (wfx.wBitsPerSample / 8) * wfx.nChannels;
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign; wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign;
if ((ret = lpDSB->SetFormat(&wfx)) != DS_OK) if (FAILED(ret = lpDSB->SetFormat(&wfx)))
{ {
sprintf(tempstr, "IDirectSoundBuffer8_SetFormat error: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret)); sprintf(tempstr, "IDirectSoundBuffer8_SetFormat error: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret));
MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION); MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION);
@ -147,7 +147,7 @@ int SNDDXInit(int buffersize)
dsbdesc.dwBufferBytes = soundbufsize; dsbdesc.dwBufferBytes = soundbufsize;
dsbdesc.lpwfxFormat = &wfx; dsbdesc.lpwfxFormat = &wfx;
if ((ret = lpDS8->CreateSoundBuffer(&dsbdesc, &lpDSB2, NULL)) != DS_OK) if (FAILED(ret = lpDS8->CreateSoundBuffer(&dsbdesc, &lpDSB2, NULL)))
{ {
if (ret == DSERR_CONTROLUNAVAIL || if (ret == DSERR_CONTROLUNAVAIL ||
ret == DSERR_INVALIDCALL || ret == DSERR_INVALIDCALL ||
@ -159,7 +159,7 @@ int SNDDXInit(int buffersize)
DSBCAPS_CTRLVOLUME | DSBCAPS_GETCURRENTPOSITION2 | DSBCAPS_CTRLVOLUME | DSBCAPS_GETCURRENTPOSITION2 |
DSBCAPS_LOCSOFTWARE; DSBCAPS_LOCSOFTWARE;
if ((ret = lpDS8->CreateSoundBuffer(&dsbdesc, &lpDSB2, NULL)) != DS_OK) if (FAILED(ret = lpDS8->CreateSoundBuffer(&dsbdesc, &lpDSB2, NULL)))
{ {
sprintf(tempstr, "Error when creating secondary sound buffer: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret)); sprintf(tempstr, "Error when creating secondary sound buffer: %s - %s", DXGetErrorString8(ret), DXGetErrorDescription8(ret));
MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION); MessageBox (NULL, tempstr, "Error", MB_OK | MB_ICONINFORMATION);
@ -226,6 +226,7 @@ void SNDDXDeInit()
} }
delete stereodata16; delete stereodata16;
stereodata16=0;
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
@ -235,7 +236,6 @@ void SNDDXUpdateAudio(s16 *buffer, u32 num_samples)
LPVOID buffer1; LPVOID buffer1;
LPVOID buffer2; LPVOID buffer2;
DWORD buffer1_size, buffer2_size; DWORD buffer1_size, buffer2_size;
DWORD status;
int samplecounter; int samplecounter;
{ {
@ -245,12 +245,14 @@ void SNDDXUpdateAudio(s16 *buffer, u32 num_samples)
bool silence = (samplecounter<-44100*15/60); //behind by more than a quarter second -> silence bool silence = (samplecounter<-44100*15/60); //behind by more than a quarter second -> silence
IDirectSoundBuffer8_GetStatus(lpDSB2, &status); HRESULT hr = lpDSB2->Lock(soundoffset, num_samples * sizeof(s16) * 2,
&buffer1, &buffer1_size, &buffer2, &buffer2_size, 0);
if (status & DSBSTATUS_BUFFERLOST) if(FAILED(hr))
return; // fix me {
if(hr == DSBSTATUS_BUFFERLOST)
IDirectSoundBuffer8_Lock(lpDSB2, soundoffset, num_samples * sizeof(s16) * 2, &buffer1, &buffer1_size, &buffer2, &buffer2_size, 0); lpDSB2->Restore();
return;
}
if(silence) { if(silence) {
memset(buffer1, 0, buffer1_size); memset(buffer1, 0, buffer1_size);
@ -260,37 +262,42 @@ void SNDDXUpdateAudio(s16 *buffer, u32 num_samples)
else else
{ {
memcpy(buffer1, buffer, buffer1_size); memcpy(buffer1, buffer, buffer1_size);
if (buffer2) if(buffer2)
memcpy(buffer2, ((u8 *)buffer)+buffer1_size, buffer2_size); memcpy(buffer2, ((u8 *)buffer)+buffer1_size, buffer2_size);
} }
soundoffset += buffer1_size + buffer2_size; soundoffset += buffer1_size + buffer2_size;
soundoffset %= soundbufsize; soundoffset %= soundbufsize;
IDirectSoundBuffer8_Unlock(lpDSB2, buffer1, buffer1_size, buffer2, buffer2_size); lpDSB2->Unlock(buffer1, buffer1_size, buffer2, buffer2_size);
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
static inline u32 circularDist(u32 from, u32 to, u32 size)
{
if(size == 0)
return 0;
s32 diff = (s32)(to - from);
while(diff < 0)
diff += size;
return (u32)diff;
}
u32 SNDDXGetAudioSpace() u32 SNDDXGetAudioSpace()
{ {
//return 735;
DWORD playcursor, writecursor; DWORD playcursor, writecursor;
u32 freespace=0; if(FAILED(lpDSB2->GetCurrentPosition(&playcursor, &writecursor)))
if (lpDSB2->GetCurrentPosition(&playcursor, &writecursor) != DS_OK)
return 0; return 0;
if (soundoffset > playcursor) u32 curToWrite = circularDist(soundoffset, writecursor, soundbufsize);
freespace = soundbufsize - soundoffset + playcursor; u32 curToPlay = circularDist(soundoffset, playcursor, soundbufsize);
else
freespace = playcursor - soundoffset;
// if (freespace > 512) if(curToWrite < curToPlay)
return (freespace / 2 / 2); return 0; // in-between the two cursors. we shouldn't write anything during this time.
// else
// return 0; return curToPlay / (sizeof(s16) * 2);
} }
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////