mirror of https://github.com/PCSX2/pcsx2.git
commit
e57a75ac55
|
@ -190,7 +190,7 @@ public:
|
||||||
// (I think it's unsigned int vs signed int)
|
// (I think it's unsigned int vs signed int)
|
||||||
#include <wx/filefn.h>
|
#include <wx/filefn.h>
|
||||||
#define HAVE_MODE_T
|
#define HAVE_MODE_T
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include <stdexcept>
|
#include <stdexcept>
|
||||||
#include <cstring> // string.h under c++
|
#include <cstring> // string.h under c++
|
||||||
|
@ -200,6 +200,7 @@ public:
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <atomic>
|
||||||
|
|
||||||
#include "Pcsx2Defs.h"
|
#include "Pcsx2Defs.h"
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ protected:
|
||||||
class BaseDeletableObject : public virtual IDeletableObject
|
class BaseDeletableObject : public virtual IDeletableObject
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
volatile vol_t m_IsBeingDeleted;
|
std::atomic<bool> m_IsBeingDeleted;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
BaseDeletableObject();
|
BaseDeletableObject();
|
||||||
|
|
|
@ -103,8 +103,8 @@ namespace Threading
|
||||||
MutexRecursive m_mtx_start; // used to lock the Start() code from starting simultaneous threads accidentally.
|
MutexRecursive m_mtx_start; // used to lock the Start() code from starting simultaneous threads accidentally.
|
||||||
Mutex m_mtx_ThreadName;
|
Mutex m_mtx_ThreadName;
|
||||||
|
|
||||||
volatile vol_t m_detached; // a boolean value which indicates if the m_thread handle is valid
|
std::atomic<bool> m_detached; // a boolean value which indicates if the m_thread handle is valid
|
||||||
volatile s32 m_running; // set true by Start(), and set false by Cancel(), Block(), etc.
|
std::atomic<bool> m_running; // set true by Start(), and set false by Cancel(), Block(), etc.
|
||||||
|
|
||||||
// exception handle, set non-NULL if the thread terminated with an exception
|
// exception handle, set non-NULL if the thread terminated with an exception
|
||||||
// Use RethrowException() to re-throw the exception using its original exception type.
|
// Use RethrowException() to re-throw the exception using its original exception type.
|
||||||
|
@ -243,8 +243,8 @@ namespace Threading
|
||||||
class BaseTaskThread : public pxThread
|
class BaseTaskThread : public pxThread
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
volatile bool m_Done;
|
std::atomic<bool> m_Done;
|
||||||
volatile bool m_TaskPending;
|
std::atomic<bool> m_TaskPending;
|
||||||
Semaphore m_post_TaskComplete;
|
Semaphore m_post_TaskComplete;
|
||||||
Mutex m_lock_TaskComplete;
|
Mutex m_lock_TaskComplete;
|
||||||
|
|
||||||
|
|
|
@ -27,25 +27,23 @@ class ScopedPtrMT
|
||||||
{
|
{
|
||||||
DeclareNoncopyableObject(ScopedPtrMT);
|
DeclareNoncopyableObject(ScopedPtrMT);
|
||||||
|
|
||||||
typedef T* TPtr;
|
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
volatile TPtr m_ptr;
|
std::atomic<T*> m_ptr;
|
||||||
Threading::Mutex m_mtx;
|
Threading::Mutex m_mtx;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
typedef T element_type;
|
typedef T element_type;
|
||||||
|
|
||||||
wxEXPLICIT ScopedPtrMT(T * ptr = NULL)
|
wxEXPLICIT ScopedPtrMT(T * ptr = nullptr)
|
||||||
{
|
{
|
||||||
m_ptr = ptr;
|
m_ptr = ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
~ScopedPtrMT() throw() { _Delete_unlocked(); }
|
~ScopedPtrMT() throw() { _Delete_unlocked(); }
|
||||||
|
|
||||||
ScopedPtrMT& Reassign(T * ptr = NULL)
|
ScopedPtrMT& Reassign(T * ptr = nullptr)
|
||||||
{
|
{
|
||||||
TPtr doh = (TPtr)Threading::AtomicExchangePointer( m_ptr, ptr );
|
T* doh = m_ptr.exchange(ptr);
|
||||||
if ( ptr != doh ) delete doh;
|
if ( ptr != doh ) delete doh;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
@ -55,19 +53,17 @@ public:
|
||||||
ScopedLock lock( m_mtx );
|
ScopedLock lock( m_mtx );
|
||||||
_Delete_unlocked();
|
_Delete_unlocked();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Removes the pointer from scoped management, but does not delete!
|
// Removes the pointer from scoped management, but does not delete!
|
||||||
// (ScopedPtr will be NULL after this method)
|
// (ScopedPtr will be nullptr after this method)
|
||||||
T *DetachPtr()
|
T *DetachPtr()
|
||||||
{
|
{
|
||||||
ScopedLock lock( m_mtx );
|
ScopedLock lock( m_mtx );
|
||||||
|
|
||||||
T *ptr = m_ptr;
|
return m_ptr.exchange(nullptr);
|
||||||
m_ptr = NULL;
|
|
||||||
return ptr;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns the managed pointer. Can return NULL as a valid result if the ScopedPtrMT
|
// Returns the managed pointer. Can return nullptr as a valid result if the ScopedPtrMT
|
||||||
// has no object in management.
|
// has no object in management.
|
||||||
T* GetPtr() const
|
T* GetPtr() const
|
||||||
{
|
{
|
||||||
|
@ -77,6 +73,7 @@ public:
|
||||||
void SwapPtr(ScopedPtrMT& other)
|
void SwapPtr(ScopedPtrMT& other)
|
||||||
{
|
{
|
||||||
ScopedLock lock( m_mtx );
|
ScopedLock lock( m_mtx );
|
||||||
|
m_ptr.exchange(other.m_ptr.exchange(m_ptr.load()));
|
||||||
T * const tmp = other.m_ptr;
|
T * const tmp = other.m_ptr;
|
||||||
other.m_ptr = m_ptr;
|
other.m_ptr = m_ptr;
|
||||||
m_ptr = tmp;
|
m_ptr = tmp;
|
||||||
|
@ -91,7 +88,7 @@ public:
|
||||||
|
|
||||||
bool operator!() const throw()
|
bool operator!() const throw()
|
||||||
{
|
{
|
||||||
return m_ptr == NULL;
|
return m_ptr.load() == nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Equality
|
// Equality
|
||||||
|
@ -106,7 +103,7 @@ public:
|
||||||
return !operator==(pT);
|
return !operator==(pT);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convenient assignment operator. ScopedPtrMT = NULL will issue an automatic deletion
|
// Convenient assignment operator. ScopedPtrMT = nullptr will issue an automatic deletion
|
||||||
// of the managed pointer.
|
// of the managed pointer.
|
||||||
ScopedPtrMT& operator=( T* src )
|
ScopedPtrMT& operator=( T* src )
|
||||||
{
|
{
|
||||||
|
@ -120,16 +117,16 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
// Dereference operator, returns a handle to the managed pointer.
|
// Dereference operator, returns a handle to the managed pointer.
|
||||||
// Generates a debug assertion if the object is NULL!
|
// Generates a debug assertion if the object is nullptr!
|
||||||
T& operator*() const
|
T& operator*() const
|
||||||
{
|
{
|
||||||
pxAssert(m_ptr != NULL);
|
pxAssert(m_ptr != nullptr);
|
||||||
return *m_ptr;
|
return *m_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
T* operator->() const
|
T* operator->() const
|
||||||
{
|
{
|
||||||
pxAssert(m_ptr != NULL);
|
pxAssert(m_ptr != nullptr);
|
||||||
return m_ptr;
|
return m_ptr;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -137,7 +134,6 @@ public:
|
||||||
protected:
|
protected:
|
||||||
void _Delete_unlocked() throw()
|
void _Delete_unlocked() throw()
|
||||||
{
|
{
|
||||||
delete m_ptr;
|
delete m_ptr.exchange(nullptr);
|
||||||
m_ptr = NULL;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -158,7 +158,7 @@ namespace Threading
|
||||||
|
|
||||||
// For use in spin/wait loops.
|
// For use in spin/wait loops.
|
||||||
extern void SpinWait();
|
extern void SpinWait();
|
||||||
|
|
||||||
// Use prior to committing data to another thread
|
// Use prior to committing data to another thread
|
||||||
extern void StoreFence();
|
extern void StoreFence();
|
||||||
|
|
||||||
|
@ -181,22 +181,6 @@ namespace Threading
|
||||||
extern s32 AtomicRead( volatile s32& Target );
|
extern s32 AtomicRead( volatile s32& Target );
|
||||||
extern u32 AtomicExchange( volatile u32& Target, u32 value );
|
extern u32 AtomicExchange( volatile u32& Target, u32 value );
|
||||||
extern s32 AtomicExchange( volatile s32& Target, s32 value );
|
extern s32 AtomicExchange( volatile s32& Target, s32 value );
|
||||||
extern u32 AtomicExchangeAdd( volatile u32& Target, u32 value );
|
|
||||||
extern s32 AtomicExchangeAdd( volatile s32& Target, s32 value );
|
|
||||||
extern s32 AtomicExchangeSub( volatile s32& Target, s32 value );
|
|
||||||
extern u32 AtomicIncrement( volatile u32& Target );
|
|
||||||
extern s32 AtomicIncrement( volatile s32& Target );
|
|
||||||
extern u32 AtomicDecrement( volatile u32& Target );
|
|
||||||
extern s32 AtomicDecrement( volatile s32& Target );
|
|
||||||
|
|
||||||
extern bool AtomicBitTestAndReset( volatile u32& bitset, u8 bit );
|
|
||||||
extern bool AtomicBitTestAndReset( volatile s32& bitset, u8 bit );
|
|
||||||
|
|
||||||
extern void* _AtomicExchangePointer( volatile uptr& target, uptr value );
|
|
||||||
extern void* _AtomicCompareExchangePointer( volatile uptr& target, uptr value, uptr comparand );
|
|
||||||
|
|
||||||
#define AtomicExchangePointer( dest, src ) _AtomicExchangePointer( (uptr&)dest, (uptr)src )
|
|
||||||
#define AtomicCompareExchangePointer( dest, comp, src ) _AtomicExchangePointer( (uptr&)dest, (uptr)comp, (uptr)src )
|
|
||||||
|
|
||||||
// pthread Cond is an evil api that is not suited for Pcsx2 needs.
|
// pthread Cond is an evil api that is not suited for Pcsx2 needs.
|
||||||
// Let's not use it. Use mutexes and semaphores instead to create waits. (Air)
|
// Let's not use it. Use mutexes and semaphores instead to create waits. (Air)
|
||||||
|
@ -228,23 +212,24 @@ namespace Threading
|
||||||
class NonblockingMutex
|
class NonblockingMutex
|
||||||
{
|
{
|
||||||
protected:
|
protected:
|
||||||
volatile int val;
|
std::atomic_flag val;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
NonblockingMutex() : val( false ) {}
|
NonblockingMutex() { val.clear(); }
|
||||||
virtual ~NonblockingMutex() throw() {}
|
virtual ~NonblockingMutex() throw() {}
|
||||||
|
|
||||||
bool TryAcquire() throw()
|
bool TryAcquire() throw()
|
||||||
{
|
{
|
||||||
return !AtomicExchange( val, true );
|
return !val.test_and_set();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Can be done with a TryAcquire/Release but it is likely better to do it outside of the object
|
||||||
bool IsLocked()
|
bool IsLocked()
|
||||||
{ return !!val; }
|
{ pxAssertMsg(0, "IsLocked isn't supported for NonblockingMutex"); return false; }
|
||||||
|
|
||||||
void Release()
|
void Release()
|
||||||
{
|
{
|
||||||
AtomicExchange( val, false );
|
val.clear();
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -407,15 +392,11 @@ namespace Threading
|
||||||
// Note that the isLockedBool should only be used as an indicator for the locked status,
|
// Note that the isLockedBool should only be used as an indicator for the locked status,
|
||||||
// and not actually depended on for thread synchronization...
|
// and not actually depended on for thread synchronization...
|
||||||
|
|
||||||
struct ScopedLockBool {
|
struct ScopedLockBool {
|
||||||
ScopedLock m_lock;
|
ScopedLock m_lock;
|
||||||
volatile __aligned(4) bool& m_bool;
|
std::atomic<bool>& m_bool;
|
||||||
|
|
||||||
#ifdef __linux__
|
ScopedLockBool(Mutex& mutexToLock, std::atomic<bool>& isLockedBool)
|
||||||
ScopedLockBool(Mutex& mutexToLock, volatile bool& isLockedBool)
|
|
||||||
#else
|
|
||||||
ScopedLockBool(Mutex& mutexToLock, volatile __aligned(4) bool& isLockedBool)
|
|
||||||
#endif
|
|
||||||
: m_lock(mutexToLock),
|
: m_lock(mutexToLock),
|
||||||
m_bool(isLockedBool) {
|
m_bool(isLockedBool) {
|
||||||
m_bool = m_lock.IsLocked();
|
m_bool = m_lock.IsLocked();
|
||||||
|
|
|
@ -54,16 +54,6 @@
|
||||||
|
|
||||||
/*** Atomic operations ***/
|
/*** Atomic operations ***/
|
||||||
|
|
||||||
static __inline__ __attribute__((always_inline)) s32 _InterlockedCompareExchange(volatile s32 * const Destination, const s32 Exchange, const s32 Comperand)
|
|
||||||
{
|
|
||||||
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ __attribute__((always_inline)) s64 _InterlockedCompareExchange64(volatile s64 * const Destination, const s64 Exchange, const s64 Comperand)
|
|
||||||
{
|
|
||||||
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ __attribute__((always_inline)) s32 _InterlockedExchange(volatile s32 * const Target, const s32 Value)
|
static __inline__ __attribute__((always_inline)) s32 _InterlockedExchange(volatile s32 * const Target, const s32 Value)
|
||||||
{
|
{
|
||||||
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
|
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
|
||||||
|
@ -78,21 +68,6 @@ static __inline__ __attribute__((always_inline)) s64 _InterlockedExchange64(vola
|
||||||
return __sync_lock_test_and_set(Target, Value);
|
return __sync_lock_test_and_set(Target, Value);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __inline__ __attribute__((always_inline)) s32 _InterlockedExchangeAdd(volatile s32 * const Addend, const s32 Value)
|
|
||||||
{
|
|
||||||
return __sync_fetch_and_add(Addend, Value);
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ __attribute__((always_inline)) s32 _InterlockedDecrement(volatile s32 * const lpAddend)
|
|
||||||
{
|
|
||||||
return _InterlockedExchangeAdd(lpAddend, -1) - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static __inline__ __attribute__((always_inline)) s32 _InterlockedIncrement(volatile s32 * const lpAddend)
|
|
||||||
{
|
|
||||||
return _InterlockedExchangeAdd(lpAddend, 1) + 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*** System information ***/
|
/*** System information ***/
|
||||||
static __inline__ __attribute__((always_inline)) void __cpuid(int CPUInfo[], const int InfoType)
|
static __inline__ __attribute__((always_inline)) void __cpuid(int CPUInfo[], const int InfoType)
|
||||||
{
|
{
|
||||||
|
|
|
@ -23,7 +23,7 @@
|
||||||
|
|
||||||
namespace Threading
|
namespace Threading
|
||||||
{
|
{
|
||||||
static vol_t _attr_refcount = 0;
|
static std::atomic<int> _attr_refcount(0);
|
||||||
static pthread_mutexattr_t _attr_recursive;
|
static pthread_mutexattr_t _attr_recursive;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ Threading::Mutex::~Mutex() throw()
|
||||||
|
|
||||||
Threading::MutexRecursive::MutexRecursive() : Mutex( false )
|
Threading::MutexRecursive::MutexRecursive() : Mutex( false )
|
||||||
{
|
{
|
||||||
if( _InterlockedIncrement( &_attr_refcount ) == 1 )
|
if( _attr_refcount.fetch_add(1) == 1 )
|
||||||
{
|
{
|
||||||
if( 0 != pthread_mutexattr_init( &_attr_recursive ) )
|
if( 0 != pthread_mutexattr_init( &_attr_recursive ) )
|
||||||
throw Exception::OutOfMemory(L"Recursive mutexing attributes");
|
throw Exception::OutOfMemory(L"Recursive mutexing attributes");
|
||||||
|
@ -134,7 +134,7 @@ Threading::MutexRecursive::MutexRecursive() : Mutex( false )
|
||||||
|
|
||||||
Threading::MutexRecursive::~MutexRecursive() throw()
|
Threading::MutexRecursive::~MutexRecursive() throw()
|
||||||
{
|
{
|
||||||
if( _InterlockedDecrement( &_attr_refcount ) == 0 )
|
if( _attr_refcount.fetch_sub(1) == 0 )
|
||||||
pthread_mutexattr_destroy( &_attr_recursive );
|
pthread_mutexattr_destroy( &_attr_recursive );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -291,7 +291,7 @@ bool Threading::pxThread::Detach()
|
||||||
{
|
{
|
||||||
AffinityAssert_DisallowFromSelf(pxDiagSpot);
|
AffinityAssert_DisallowFromSelf(pxDiagSpot);
|
||||||
|
|
||||||
if( _InterlockedExchange( &m_detached, true ) ) return false;
|
if( m_detached.exchange(true) ) return false;
|
||||||
pthread_detach( m_thread );
|
pthread_detach( m_thread );
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -386,7 +386,7 @@ bool Threading::pxThread::IsSelf() const
|
||||||
|
|
||||||
bool Threading::pxThread::IsRunning() const
|
bool Threading::pxThread::IsRunning() const
|
||||||
{
|
{
|
||||||
return !!m_running;
|
return m_running;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Threading::pxThread::AddListener( EventListener_Thread& evt )
|
void Threading::pxThread::AddListener( EventListener_Thread& evt )
|
||||||
|
@ -669,7 +669,7 @@ void Threading::pxThread::OnCleanupInThread()
|
||||||
|
|
||||||
m_native_handle = 0;
|
m_native_handle = 0;
|
||||||
m_native_id = 0;
|
m_native_id = 0;
|
||||||
|
|
||||||
m_evtsrc_OnDelete.Dispatch( 0 );
|
m_evtsrc_OnDelete.Dispatch( 0 );
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -803,49 +803,6 @@ __fi s32 Threading::AtomicExchange( volatile s32& Target, s32 value ) {
|
||||||
return _InterlockedExchange( (volatile vol_t*)&Target, value );
|
return _InterlockedExchange( (volatile vol_t*)&Target, value );
|
||||||
}
|
}
|
||||||
|
|
||||||
__fi u32 Threading::AtomicExchangeAdd( volatile u32& Target, u32 value ) {
|
|
||||||
return _InterlockedExchangeAdd( (volatile vol_t*)&Target, value );
|
|
||||||
}
|
|
||||||
__fi s32 Threading::AtomicExchangeAdd( volatile s32& Target, s32 value ) {
|
|
||||||
return _InterlockedExchangeAdd( (volatile vol_t*)&Target, value );
|
|
||||||
}
|
|
||||||
|
|
||||||
__fi s32 Threading::AtomicExchangeSub( volatile s32& Target, s32 value ) {
|
|
||||||
return _InterlockedExchangeAdd( (volatile vol_t*)&Target, -value );
|
|
||||||
}
|
|
||||||
|
|
||||||
__fi u32 Threading::AtomicIncrement( volatile u32& Target ) {
|
|
||||||
return _InterlockedExchangeAdd( (volatile vol_t*)&Target, 1 );
|
|
||||||
}
|
|
||||||
__fi s32 Threading::AtomicIncrement( volatile s32& Target) {
|
|
||||||
return _InterlockedExchangeAdd( (volatile vol_t*)&Target, 1 );
|
|
||||||
}
|
|
||||||
|
|
||||||
__fi u32 Threading::AtomicDecrement( volatile u32& Target ) {
|
|
||||||
return _InterlockedExchangeAdd( (volatile vol_t*)&Target, -1 );
|
|
||||||
}
|
|
||||||
__fi s32 Threading::AtomicDecrement(volatile s32& Target) {
|
|
||||||
return _InterlockedExchangeAdd((volatile vol_t*)&Target, -1);
|
|
||||||
}
|
|
||||||
|
|
||||||
__fi void* Threading::_AtomicExchangePointer(volatile uptr& target, uptr value)
|
|
||||||
{
|
|
||||||
#ifdef _M_X86_64 // high-level atomic ops, please leave these 64 bit checks in place.
|
|
||||||
return (void*)_InterlockedExchange64((volatile s64*)&target, value);
|
|
||||||
#else
|
|
||||||
return (void*)_InterlockedExchange((volatile vol_t*)&target, value);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
__fi void* Threading::_AtomicCompareExchangePointer(volatile uptr& target, uptr value, uptr comparand)
|
|
||||||
{
|
|
||||||
#ifdef _M_X86_64 // high-level atomic ops, please leave these 64 bit checks in place.
|
|
||||||
return (void*)_InterlockedCompareExchange64((volatile s64*)&target, value, comparand);
|
|
||||||
#else
|
|
||||||
return (void*)_InterlockedCompareExchange((volatile vol_t*)&target, value, comparand);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
// --------------------------------------------------------------------------------------
|
// --------------------------------------------------------------------------------------
|
||||||
// BaseThreadError
|
// BaseThreadError
|
||||||
// --------------------------------------------------------------------------------------
|
// --------------------------------------------------------------------------------------
|
||||||
|
|
|
@ -49,7 +49,7 @@ pxDialogCreationFlags pxDialogFlags()
|
||||||
//
|
//
|
||||||
bool BaseDeletableObject::MarkForDeletion()
|
bool BaseDeletableObject::MarkForDeletion()
|
||||||
{
|
{
|
||||||
return !_InterlockedExchange( &m_IsBeingDeleted, true );
|
return !m_IsBeingDeleted.exchange(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void BaseDeletableObject::DeleteSelf()
|
void BaseDeletableObject::DeleteSelf()
|
||||||
|
|
14
pcsx2/GS.h
14
pcsx2/GS.h
|
@ -270,12 +270,12 @@ public:
|
||||||
__aligned(4) uint m_ReadPos; // cur pos gs is reading from
|
__aligned(4) uint m_ReadPos; // cur pos gs is reading from
|
||||||
__aligned(4) uint m_WritePos; // cur pos ee thread is writing to
|
__aligned(4) uint m_WritePos; // cur pos ee thread is writing to
|
||||||
|
|
||||||
volatile bool m_RingBufferIsBusy;
|
std::atomic<bool> m_RingBufferIsBusy;
|
||||||
volatile u32 m_SignalRingEnable;
|
std::atomic<bool> m_SignalRingEnable;
|
||||||
volatile s32 m_SignalRingPosition;
|
std::atomic<int> m_SignalRingPosition;
|
||||||
|
|
||||||
volatile s32 m_QueuedFrameCount;
|
std::atomic<int> m_QueuedFrameCount;
|
||||||
volatile u32 m_VsyncSignalListener;
|
std::atomic<bool> m_VsyncSignalListener;
|
||||||
|
|
||||||
Mutex m_mtx_RingBufferBusy; // Is obtained while processing ring-buffer data
|
Mutex m_mtx_RingBufferBusy; // Is obtained while processing ring-buffer data
|
||||||
Mutex m_mtx_RingBufferBusy2; // This one gets released on semaXGkick waiting...
|
Mutex m_mtx_RingBufferBusy2; // This one gets released on semaXGkick waiting...
|
||||||
|
@ -291,8 +291,8 @@ public:
|
||||||
// has more than one command in it when the thread is kicked.
|
// has more than one command in it when the thread is kicked.
|
||||||
int m_CopyDataTally;
|
int m_CopyDataTally;
|
||||||
|
|
||||||
Semaphore m_sem_OpenDone;
|
Semaphore m_sem_OpenDone;
|
||||||
volatile bool m_PluginOpened;
|
std::atomic<bool> m_PluginOpened;
|
||||||
|
|
||||||
// These vars maintain instance data for sending Data Packets.
|
// These vars maintain instance data for sending Data Packets.
|
||||||
// Only one data packet can be constructed and uploaded at a time.
|
// Only one data packet can be constructed and uploaded at a time.
|
||||||
|
|
|
@ -118,14 +118,14 @@ void Gif_AddCompletedGSPacket(GS_Packet& gsPack, GIF_PATH path) {
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
pxAssertDev(!gsPack.readAmount, "Gif Unit - gsPack.readAmount only valid for MTVU path 1!");
|
pxAssertDev(!gsPack.readAmount, "Gif Unit - gsPack.readAmount only valid for MTVU path 1!");
|
||||||
AtomicExchangeAdd(gifUnit.gifPath[path].readAmount, gsPack.size);
|
gifUnit.gifPath[path].readAmount.fetch_add(gsPack.size);
|
||||||
GetMTGS().SendSimpleGSPacket(GS_RINGTYPE_GSPACKET, gsPack.offset, gsPack.size, path);
|
GetMTGS().SendSimpleGSPacket(GS_RINGTYPE_GSPACKET, gsPack.offset, gsPack.size, path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Gif_AddBlankGSPacket(u32 size, GIF_PATH path) {
|
void Gif_AddBlankGSPacket(u32 size, GIF_PATH path) {
|
||||||
//DevCon.WriteLn("Adding Blank Gif Packet [size=%x]", size);
|
//DevCon.WriteLn("Adding Blank Gif Packet [size=%x]", size);
|
||||||
AtomicExchangeAdd(gifUnit.gifPath[path].readAmount, size);
|
gifUnit.gifPath[path].readAmount.fetch_add(size);
|
||||||
GetMTGS().SendSimpleGSPacket(GS_RINGTYPE_GSPACKET, ~0u, size, path);
|
GetMTGS().SendSimpleGSPacket(GS_RINGTYPE_GSPACKET, ~0u, size, path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ struct Gif_Path_MTVU {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct Gif_Path {
|
struct Gif_Path {
|
||||||
__aligned(4) volatile s32 readAmount; // Amount of data MTGS still needs to read
|
std::atomic<int> readAmount; // Amount of data MTGS still needs to read
|
||||||
u8* buffer; // Path packet buffer
|
u8* buffer; // Path packet buffer
|
||||||
u32 buffSize; // Full size of buffer
|
u32 buffSize; // Full size of buffer
|
||||||
u32 buffLimit; // Cut off limit to wrap around
|
u32 buffLimit; // Cut off limit to wrap around
|
||||||
|
@ -195,7 +195,7 @@ struct Gif_Path {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isMTVU() const { return !idx && THREAD_VU1; }
|
bool isMTVU() const { return !idx && THREAD_VU1; }
|
||||||
s32 getReadAmount() { return AtomicRead(readAmount) + gsPack.readAmount; }
|
s32 getReadAmount() { return readAmount.load() + gsPack.readAmount; }
|
||||||
bool hasDataRemaining() const { return curOffset < curSize; }
|
bool hasDataRemaining() const { return curOffset < curSize; }
|
||||||
bool isDone() const { return isMTVU() ? !mtvu.fakePackets : (!hasDataRemaining() && (state == GIF_PATH_IDLE || state == GIF_PATH_WAIT)); }
|
bool isDone() const { return isMTVU() ? !mtvu.fakePackets : (!hasDataRemaining() && (state == GIF_PATH_IDLE || state == GIF_PATH_WAIT)); }
|
||||||
|
|
||||||
|
@ -380,7 +380,7 @@ struct Gif_Path {
|
||||||
void FinishGSPacketMTVU() {
|
void FinishGSPacketMTVU() {
|
||||||
if (1) {
|
if (1) {
|
||||||
ScopedLock lock(mtvu.gsPackMutex);
|
ScopedLock lock(mtvu.gsPackMutex);
|
||||||
AtomicExchangeAdd(readAmount, gsPack.size + gsPack.readAmount);
|
readAmount.fetch_add(gsPack.size + gsPack.readAmount);
|
||||||
mtvu.gsPackQueue.push_back(gsPack);
|
mtvu.gsPackQueue.push_back(gsPack);
|
||||||
}
|
}
|
||||||
gsPack.Reset();
|
gsPack.Reset();
|
||||||
|
|
|
@ -78,10 +78,10 @@ void SysMtgsThread::OnStart()
|
||||||
m_packet_size = 0;
|
m_packet_size = 0;
|
||||||
m_packet_writepos = 0;
|
m_packet_writepos = 0;
|
||||||
|
|
||||||
m_QueuedFrameCount = 0;
|
m_QueuedFrameCount = 0;
|
||||||
m_VsyncSignalListener = false;
|
m_VsyncSignalListener = false;
|
||||||
m_SignalRingEnable = 0;
|
m_SignalRingEnable = false;
|
||||||
m_SignalRingPosition= 0;
|
m_SignalRingPosition = 0;
|
||||||
|
|
||||||
m_CopyDataTally = 0;
|
m_CopyDataTally = 0;
|
||||||
|
|
||||||
|
@ -155,13 +155,13 @@ void SysMtgsThread::PostVsyncStart()
|
||||||
// in the ringbuffer. The queue limit is disabled when both FrameLimiting and Vsync are
|
// in the ringbuffer. The queue limit is disabled when both FrameLimiting and Vsync are
|
||||||
// disabled, since the queue can have perverse effects on framerate benchmarking.
|
// disabled, since the queue can have perverse effects on framerate benchmarking.
|
||||||
|
|
||||||
// Edit: It's possible that MTGS is that much faster than the GS plugin that it creates so much lag,
|
// Edit: It's possible that MTGS is that much faster than the GS plugin that it creates so much lag,
|
||||||
// a game becomes uncontrollable (software rendering for example).
|
// a game becomes uncontrollable (software rendering for example).
|
||||||
// For that reason it's better to have the limit always in place, at the cost of a few max FPS in benchmarks.
|
// For that reason it's better to have the limit always in place, at the cost of a few max FPS in benchmarks.
|
||||||
// If those are needed back, it's better to increase the VsyncQueueSize via PCSX_vm.ini.
|
// If those are needed back, it's better to increase the VsyncQueueSize via PCSX_vm.ini.
|
||||||
// (The Xenosaga engine is known to run into this, due to it throwing bulks of data in one frame followed by 2 empty frames.)
|
// (The Xenosaga engine is known to run into this, due to it throwing bulks of data in one frame followed by 2 empty frames.)
|
||||||
|
|
||||||
if ((AtomicIncrement(m_QueuedFrameCount) < EmuConfig.GS.VsyncQueueSize) /*|| (!EmuConfig.GS.VsyncEnable && !EmuConfig.GS.FrameLimitEnable)*/) return;
|
if ((m_QueuedFrameCount.fetch_add(1) < EmuConfig.GS.VsyncQueueSize) /*|| (!EmuConfig.GS.VsyncEnable && !EmuConfig.GS.FrameLimitEnable)*/) return;
|
||||||
|
|
||||||
m_VsyncSignalListener = true;
|
m_VsyncSignalListener = true;
|
||||||
//Console.WriteLn( Color_Blue, "(EEcore Sleep) Vsync\t\tringpos=0x%06x, writepos=0x%06x", volatize(m_ReadPos), m_WritePos );
|
//Console.WriteLn( Color_Blue, "(EEcore Sleep) Vsync\t\tringpos=0x%06x, writepos=0x%06x", volatize(m_ReadPos), m_WritePos );
|
||||||
|
@ -238,7 +238,7 @@ void SysMtgsThread::OpenPlugin()
|
||||||
GSsetGameCRC( ElfCRC, 0 );
|
GSsetGameCRC( ElfCRC, 0 );
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RingBufferLock {
|
struct RingBufferLock {
|
||||||
ScopedLock m_lock1;
|
ScopedLock m_lock1;
|
||||||
ScopedLock m_lock2;
|
ScopedLock m_lock2;
|
||||||
SysMtgsThread& m_mtgs;
|
SysMtgsThread& m_mtgs;
|
||||||
|
@ -395,7 +395,7 @@ void SysMtgsThread::ExecuteTaskInThread()
|
||||||
u32 offset = tag.data[0];
|
u32 offset = tag.data[0];
|
||||||
u32 size = tag.data[1];
|
u32 size = tag.data[1];
|
||||||
if (offset != ~0u) GSgifTransfer((u32*)&path.buffer[offset], size/16);
|
if (offset != ~0u) GSgifTransfer((u32*)&path.buffer[offset], size/16);
|
||||||
AtomicExchangeSub(path.readAmount, size);
|
path.readAmount.fetch_sub(size);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -409,7 +409,7 @@ void SysMtgsThread::ExecuteTaskInThread()
|
||||||
Gif_Path& path = gifUnit.gifPath[GIF_PATH_1];
|
Gif_Path& path = gifUnit.gifPath[GIF_PATH_1];
|
||||||
GS_Packet gsPack = path.GetGSPacketMTVU(); // Get vu1 program's xgkick packet(s)
|
GS_Packet gsPack = path.GetGSPacketMTVU(); // Get vu1 program's xgkick packet(s)
|
||||||
if (gsPack.size) GSgifTransfer((u32*)&path.buffer[gsPack.offset], gsPack.size/16);
|
if (gsPack.size) GSgifTransfer((u32*)&path.buffer[gsPack.offset], gsPack.size/16);
|
||||||
AtomicExchangeSub(path.readAmount, gsPack.size + gsPack.readAmount);
|
path.readAmount.fetch_sub(gsPack.size + gsPack.readAmount);
|
||||||
path.PopGSPacketMTVU(); // Should be done last, for proper Gif_MTGS_Wait()
|
path.PopGSPacketMTVU(); // Should be done last, for proper Gif_MTGS_Wait()
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -446,8 +446,8 @@ void SysMtgsThread::ExecuteTaskInThread()
|
||||||
if( (GSopen2 == NULL) && (PADupdate != NULL) )
|
if( (GSopen2 == NULL) && (PADupdate != NULL) )
|
||||||
PADupdate(0);
|
PADupdate(0);
|
||||||
|
|
||||||
AtomicDecrement( m_QueuedFrameCount );
|
m_QueuedFrameCount.fetch_sub(1);
|
||||||
if (!!AtomicExchange(m_VsyncSignalListener, false))
|
if (m_VsyncSignalListener.exchange(false))
|
||||||
m_sem_Vsync.Post();
|
m_sem_Vsync.Post();
|
||||||
|
|
||||||
busy.Release();
|
busy.Release();
|
||||||
|
@ -522,16 +522,16 @@ void SysMtgsThread::ExecuteTaskInThread()
|
||||||
{
|
{
|
||||||
pxAssert( m_WritePos == newringpos );
|
pxAssert( m_WritePos == newringpos );
|
||||||
}
|
}
|
||||||
|
|
||||||
m_ReadPos = newringpos;
|
m_ReadPos = newringpos;
|
||||||
|
|
||||||
if( m_SignalRingEnable != 0 )
|
if( m_SignalRingEnable )
|
||||||
{
|
{
|
||||||
// The EEcore has requested a signal after some amount of processed data.
|
// The EEcore has requested a signal after some amount of processed data.
|
||||||
if( AtomicExchangeSub( m_SignalRingPosition, ringposinc ) <= 0 )
|
if( m_SignalRingPosition.fetch_sub( ringposinc ) <= 0 )
|
||||||
{
|
{
|
||||||
// Make sure to post the signal after the m_ReadPos has been updated...
|
// Make sure to post the signal after the m_ReadPos has been updated...
|
||||||
AtomicExchange( m_SignalRingEnable, 0 );
|
m_SignalRingEnable = false;
|
||||||
m_sem_OnRingReset.Post();
|
m_sem_OnRingReset.Post();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -544,14 +544,14 @@ void SysMtgsThread::ExecuteTaskInThread()
|
||||||
// won't sleep the eternity, even if SignalRingPosition didn't reach 0 for some reason.
|
// won't sleep the eternity, even if SignalRingPosition didn't reach 0 for some reason.
|
||||||
// Important: Need to unlock the MTGS busy signal PRIOR, so that EEcore SetEvent() calls
|
// Important: Need to unlock the MTGS busy signal PRIOR, so that EEcore SetEvent() calls
|
||||||
// parallel to this handler aren't accidentally blocked.
|
// parallel to this handler aren't accidentally blocked.
|
||||||
if( AtomicExchange( m_SignalRingEnable, 0 ) != 0 )
|
if( m_SignalRingEnable.exchange(false) )
|
||||||
{
|
{
|
||||||
//Console.Warning( "(MTGS Thread) Dangling RingSignal on empty buffer! signalpos=0x%06x", AtomicExchange( m_SignalRingPosition, 0 ) );
|
//Console.Warning( "(MTGS Thread) Dangling RingSignal on empty buffer! signalpos=0x%06x", m_SignalRingPosition.exchange(0) ) );
|
||||||
AtomicExchange( m_SignalRingPosition, 0 );
|
m_SignalRingPosition = 0;
|
||||||
m_sem_OnRingReset.Post();
|
m_sem_OnRingReset.Post();
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!!AtomicExchange(m_VsyncSignalListener, false))
|
if (m_VsyncSignalListener.exchange(false))
|
||||||
m_sem_Vsync.Post();
|
m_sem_Vsync.Post();
|
||||||
|
|
||||||
//Console.Warning( "(MTGS Thread) Nothing to do! ringpos=0x%06x", m_ReadPos );
|
//Console.Warning( "(MTGS Thread) Nothing to do! ringpos=0x%06x", m_ReadPos );
|
||||||
|
@ -617,7 +617,7 @@ void SysMtgsThread::WaitGS(bool syncRegs, bool weakWait, bool isMTVU)
|
||||||
// hence it has been avoided...
|
// hence it has been avoided...
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (syncRegs) {
|
if (syncRegs) {
|
||||||
ScopedLock lock(m_mtx_WaitGS);
|
ScopedLock lock(m_mtx_WaitGS);
|
||||||
// Completely synchronize GS and MTGS register states.
|
// Completely synchronize GS and MTGS register states.
|
||||||
|
@ -719,7 +719,7 @@ void SysMtgsThread::GenericStall( uint size )
|
||||||
//Console.WriteLn( Color_Blue, "(EEcore Sleep) PrepDataPacker \tringpos=0x%06x, writepos=0x%06x, signalpos=0x%06x", readpos, writepos, m_SignalRingPosition );
|
//Console.WriteLn( Color_Blue, "(EEcore Sleep) PrepDataPacker \tringpos=0x%06x, writepos=0x%06x, signalpos=0x%06x", readpos, writepos, m_SignalRingPosition );
|
||||||
|
|
||||||
while(true) {
|
while(true) {
|
||||||
AtomicExchange( m_SignalRingEnable, 1 );
|
m_SignalRingEnable = true;
|
||||||
SetEvent();
|
SetEvent();
|
||||||
m_sem_OnRingReset.WaitWithoutYield();
|
m_sem_OnRingReset.WaitWithoutYield();
|
||||||
readpos = volatize(m_ReadPos);
|
readpos = volatize(m_ReadPos);
|
||||||
|
@ -729,7 +729,7 @@ void SysMtgsThread::GenericStall( uint size )
|
||||||
freeroom = readpos - writepos;
|
freeroom = readpos - writepos;
|
||||||
else
|
else
|
||||||
freeroom = RingBufferSize - (writepos - readpos);
|
freeroom = RingBufferSize - (writepos - readpos);
|
||||||
|
|
||||||
if (freeroom > size) break;
|
if (freeroom > size) break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -47,7 +47,7 @@ static void MTVU_Unpack(void* data, VIFregisters& vifRegs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Called on Saving/Loading states...
|
// Called on Saving/Loading states...
|
||||||
void SaveStateBase::mtvuFreeze()
|
void SaveStateBase::mtvuFreeze()
|
||||||
{
|
{
|
||||||
FreezeTag("MTVU");
|
FreezeTag("MTVU");
|
||||||
pxAssert(vu1Thread.IsDone());
|
pxAssert(vu1Thread.IsDone());
|
||||||
|
@ -142,7 +142,7 @@ void VU_Thread::ExecuteRingBuffer()
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case MTVU_NULL_PACKET:
|
case MTVU_NULL_PACKET:
|
||||||
AtomicExchange(read_pos, 0);
|
read_pos = 0;
|
||||||
break;
|
break;
|
||||||
jNO_DEFAULT;
|
jNO_DEFAULT;
|
||||||
}
|
}
|
||||||
|
@ -187,7 +187,7 @@ void VU_Thread::ReserveSpace(s32 size)
|
||||||
// Use this when reading read_pos from ee thread
|
// Use this when reading read_pos from ee thread
|
||||||
__fi s32 VU_Thread::GetReadPos()
|
__fi s32 VU_Thread::GetReadPos()
|
||||||
{
|
{
|
||||||
return AtomicRead(read_pos);
|
return read_pos.load();
|
||||||
}
|
}
|
||||||
// Use this when reading write_pos from vu thread
|
// Use this when reading write_pos from vu thread
|
||||||
__fi s32 VU_Thread::GetWritePos()
|
__fi s32 VU_Thread::GetWritePos()
|
||||||
|
@ -202,8 +202,7 @@ __fi u32* VU_Thread::GetWritePtr()
|
||||||
|
|
||||||
__fi void VU_Thread::incReadPos(s32 offset)
|
__fi void VU_Thread::incReadPos(s32 offset)
|
||||||
{ // Offset in u32 sizes
|
{ // Offset in u32 sizes
|
||||||
s32 temp = (read_pos + offset) & buffer_mask;
|
read_pos = (read_pos + offset) & buffer_mask;
|
||||||
AtomicExchange(read_pos, temp);
|
|
||||||
}
|
}
|
||||||
__fi void VU_Thread::incWritePos()
|
__fi void VU_Thread::incWritePos()
|
||||||
{ // Adds write_offset
|
{ // Adds write_offset
|
||||||
|
|
|
@ -30,8 +30,8 @@ class VU_Thread : public pxThread {
|
||||||
static const s32 buffer_size = (_1mb * 16) / sizeof(s32);
|
static const s32 buffer_size = (_1mb * 16) / sizeof(s32);
|
||||||
static const u32 buffer_mask = buffer_size - 1;
|
static const u32 buffer_mask = buffer_size - 1;
|
||||||
__aligned(4) u32 buffer[buffer_size];
|
__aligned(4) u32 buffer[buffer_size];
|
||||||
__aligned(4) volatile s32 read_pos; // Only modified by VU thread
|
__aligned(4) std::atomic<int> read_pos; // Only modified by VU thread
|
||||||
__aligned(4) volatile bool isBusy; // Is thread processing data?
|
__aligned(4) std::atomic<bool> isBusy; // Is thread processing data?
|
||||||
__aligned(4) s32 write_pos; // Only modified by EE thread
|
__aligned(4) s32 write_pos; // Only modified by EE thread
|
||||||
__aligned(4) s32 write_offset; // Only modified by EE thread
|
__aligned(4) s32 write_offset; // Only modified by EE thread
|
||||||
__aligned(4) Mutex mtxBusy;
|
__aligned(4) Mutex mtxBusy;
|
||||||
|
|
|
@ -1213,7 +1213,7 @@ void SysCorePlugins::Open()
|
||||||
|
|
||||||
if (GSopen2) GetMTGS().WaitForOpen();
|
if (GSopen2) GetMTGS().WaitForOpen();
|
||||||
|
|
||||||
if( !AtomicExchange( m_mcdOpen, true ) )
|
if( !m_mcdOpen.exchange(true) )
|
||||||
{
|
{
|
||||||
DbgCon.Indent().WriteLn( "Opening Memorycards");
|
DbgCon.Indent().WriteLn( "Opening Memorycards");
|
||||||
OpenPlugin_Mcd();
|
OpenPlugin_Mcd();
|
||||||
|
@ -1313,7 +1313,7 @@ void SysCorePlugins::Close()
|
||||||
|
|
||||||
Console.WriteLn( Color_StrongBlue, "Closing plugins..." );
|
Console.WriteLn( Color_StrongBlue, "Closing plugins..." );
|
||||||
|
|
||||||
if( AtomicExchange( m_mcdOpen, false ) )
|
if( m_mcdOpen.exchange(false) )
|
||||||
{
|
{
|
||||||
DbgCon.Indent().WriteLn( "Closing Memorycards");
|
DbgCon.Indent().WriteLn( "Closing Memorycards");
|
||||||
ClosePlugin_Mcd();
|
ClosePlugin_Mcd();
|
||||||
|
|
|
@ -295,7 +295,7 @@ protected:
|
||||||
Threading::MutexRecursive m_mtx_PluginStatus;
|
Threading::MutexRecursive m_mtx_PluginStatus;
|
||||||
|
|
||||||
// Lovely hack until the new PS2E API is completed.
|
// Lovely hack until the new PS2E API is completed.
|
||||||
volatile u32 m_mcdOpen;
|
std::atomic<bool> m_mcdOpen;
|
||||||
|
|
||||||
public: // hack until we unsuck plugins...
|
public: // hack until we unsuck plugins...
|
||||||
std::unique_ptr<PluginStatus_t> m_info[PluginId_AllocCount];
|
std::unique_ptr<PluginStatus_t> m_info[PluginId_AllocCount];
|
||||||
|
|
|
@ -52,7 +52,6 @@ struct DECI2_DBGP_BRK{
|
||||||
|
|
||||||
extern DECI2_DBGP_BRK ebrk[32], ibrk[32];
|
extern DECI2_DBGP_BRK ebrk[32], ibrk[32];
|
||||||
extern s32 ebrk_count, ibrk_count;
|
extern s32 ebrk_count, ibrk_count;
|
||||||
extern volatile long runStatus;
|
|
||||||
extern s32 runCode, runCount;
|
extern s32 runCode, runCount;
|
||||||
|
|
||||||
extern Threading::Semaphore* runEvent;
|
extern Threading::Semaphore* runEvent;
|
||||||
|
|
|
@ -24,6 +24,8 @@ using namespace Threading;
|
||||||
|
|
||||||
using namespace R5900;
|
using namespace R5900;
|
||||||
|
|
||||||
|
std::atomic<int> runStatus;
|
||||||
|
|
||||||
struct DECI2_DBGP_HEADER{
|
struct DECI2_DBGP_HEADER{
|
||||||
DECI2_HEADER h; //+00
|
DECI2_HEADER h; //+00
|
||||||
u16 id; //+08
|
u16 id; //+08
|
||||||
|
@ -358,7 +360,7 @@ void D2_DBGP(const u8 *inbuffer, u8 *outbuffer, char *message, char *eepc, char
|
||||||
if (in->h.destination=='I')
|
if (in->h.destination=='I')
|
||||||
;
|
;
|
||||||
else{
|
else{
|
||||||
out->result = ( pcsx2_InterlockedExchange(&runStatus, STOP)==STOP ?
|
out->result = ( runStatus.exchange(STOP)==STOP ?
|
||||||
0x20 : 0x21 );
|
0x20 : 0x21 );
|
||||||
out->code=0xFF;
|
out->code=0xFF;
|
||||||
Sleep(50);
|
Sleep(50);
|
||||||
|
@ -371,7 +373,7 @@ void D2_DBGP(const u8 *inbuffer, u8 *outbuffer, char *message, char *eepc, char
|
||||||
if (in->h.destination=='I')
|
if (in->h.destination=='I')
|
||||||
;
|
;
|
||||||
else{
|
else{
|
||||||
pcsx2_InterlockedExchange(&runStatus, STOP);
|
runStatus = STOP;
|
||||||
Sleep(100);//first get the run thread to Wait state
|
Sleep(100);//first get the run thread to Wait state
|
||||||
runCount=in->count;
|
runCount=in->count;
|
||||||
runCode=in->code;
|
runCode=in->code;
|
||||||
|
@ -390,7 +392,7 @@ void D2_DBGP(const u8 *inbuffer, u8 *outbuffer, char *message, char *eepc, char
|
||||||
for (i=0, s=0; i<(int)run->argc; i++, argv++) s+=argv[i];
|
for (i=0, s=0; i<(int)run->argc; i++, argv++) s+=argv[i];
|
||||||
memcpy(PSM(0), argv, s);
|
memcpy(PSM(0), argv, s);
|
||||||
// threads_array[0].argstring = 0;
|
// threads_array[0].argstring = 0;
|
||||||
pcsx2_InterlockedExchange((volatile long*)&runStatus, (u32)STOP);
|
runStatus = STOP;
|
||||||
Sleep(1000);//first get the run thread to Wait state
|
Sleep(1000);//first get the run thread to Wait state
|
||||||
runCount=0;
|
runCount=0;
|
||||||
runCode=0xFF;
|
runCode=0xFF;
|
||||||
|
|
|
@ -91,7 +91,7 @@ void SysThreadBase::Suspend( bool isBlocking )
|
||||||
{
|
{
|
||||||
ScopedLock locker( m_ExecModeMutex );
|
ScopedLock locker( m_ExecModeMutex );
|
||||||
|
|
||||||
switch( m_ExecMode )
|
switch( m_ExecMode.load() )
|
||||||
{
|
{
|
||||||
// Invalid thread state, nothing to suspend
|
// Invalid thread state, nothing to suspend
|
||||||
case ExecMode_NoThreadYet:
|
case ExecMode_NoThreadYet:
|
||||||
|
@ -196,7 +196,7 @@ void SysThreadBase::Resume()
|
||||||
// sanity checks against m_ExecMode/m_Running status, and if something doesn't feel
|
// sanity checks against m_ExecMode/m_Running status, and if something doesn't feel
|
||||||
// right, we should abort; the user may have canceled the action before it even finished.
|
// right, we should abort; the user may have canceled the action before it even finished.
|
||||||
|
|
||||||
switch( m_ExecMode )
|
switch( m_ExecMode.load() )
|
||||||
{
|
{
|
||||||
case ExecMode_Opened: return;
|
case ExecMode_Opened: return;
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ void SysThreadBase::OnResumeInThread( bool isSuspended ) {}
|
||||||
// continued execution unimpeded.
|
// continued execution unimpeded.
|
||||||
bool SysThreadBase::StateCheckInThread()
|
bool SysThreadBase::StateCheckInThread()
|
||||||
{
|
{
|
||||||
switch( m_ExecMode )
|
switch( m_ExecMode.load() )
|
||||||
{
|
{
|
||||||
|
|
||||||
#ifdef PCSX2_DEVBUILD // optimize out handlers for these cases in release builds.
|
#ifdef PCSX2_DEVBUILD // optimize out handlers for these cases in release builds.
|
||||||
|
|
|
@ -63,7 +63,7 @@ public:
|
||||||
};
|
};
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
volatile ExecutionMode m_ExecMode;
|
std::atomic<ExecutionMode> m_ExecMode;
|
||||||
|
|
||||||
// This lock is used to avoid simultaneous requests to Suspend/Resume/Pause from
|
// This lock is used to avoid simultaneous requests to Suspend/Resume/Pause from
|
||||||
// contending threads.
|
// contending threads.
|
||||||
|
@ -71,7 +71,7 @@ protected:
|
||||||
|
|
||||||
// Used to wake up the thread from sleeping when it's in a suspended state.
|
// Used to wake up the thread from sleeping when it's in a suspended state.
|
||||||
Semaphore m_sem_Resume;
|
Semaphore m_sem_Resume;
|
||||||
|
|
||||||
// Used to synchronize inline changes from paused to suspended status.
|
// Used to synchronize inline changes from paused to suspended status.
|
||||||
Semaphore m_sem_ChangingExecMode;
|
Semaphore m_sem_ChangingExecMode;
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ protected:
|
||||||
// Issue a Wait against this mutex for performing actions that require the thread
|
// Issue a Wait against this mutex for performing actions that require the thread
|
||||||
// to be suspended.
|
// to be suspended.
|
||||||
Mutex m_RunningLock;
|
Mutex m_RunningLock;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
explicit SysThreadBase();
|
explicit SysThreadBase();
|
||||||
virtual ~SysThreadBase() throw();
|
virtual ~SysThreadBase() throw();
|
||||||
|
@ -99,7 +99,7 @@ public:
|
||||||
|
|
||||||
bool IsClosing() const
|
bool IsClosing() const
|
||||||
{
|
{
|
||||||
return !IsRunning() || (m_ExecMode <= ExecMode_Closed) || (m_ExecMode == ExecMode_Closing);
|
return !IsRunning() || (m_ExecMode <= ExecMode_Closed) || (m_ExecMode == ExecMode_Closing);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool HasPendingStateChangeRequest() const
|
bool HasPendingStateChangeRequest() const
|
||||||
|
@ -107,7 +107,7 @@ public:
|
||||||
return m_ExecMode >= ExecMode_Closing;
|
return m_ExecMode >= ExecMode_Closing;
|
||||||
}
|
}
|
||||||
|
|
||||||
ExecutionMode GetExecutionMode() const { return m_ExecMode; }
|
ExecutionMode GetExecutionMode() const { return m_ExecMode.load(); }
|
||||||
Mutex& ExecutionModeMutex() { return m_ExecModeMutex; }
|
Mutex& ExecutionModeMutex() { return m_ExecModeMutex; }
|
||||||
|
|
||||||
virtual void Suspend( bool isBlocking = true );
|
virtual void Suspend( bool isBlocking = true );
|
||||||
|
@ -172,10 +172,10 @@ protected:
|
||||||
// true anytime between plugins being initialized and plugins being shutdown. Gets
|
// true anytime between plugins being initialized and plugins being shutdown. Gets
|
||||||
// set false when plugins are shutdown, the corethread is canceled, or when an error
|
// set false when plugins are shutdown, the corethread is canceled, or when an error
|
||||||
// occurs while trying to upload a new state into the VM.
|
// occurs while trying to upload a new state into the VM.
|
||||||
volatile bool m_hasActiveMachine;
|
std::atomic<bool> m_hasActiveMachine;
|
||||||
|
|
||||||
wxString m_elf_override;
|
wxString m_elf_override;
|
||||||
|
|
||||||
SSE_MXCSR m_mxcsr_saved;
|
SSE_MXCSR m_mxcsr_saved;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -198,10 +198,10 @@ public:
|
||||||
virtual void UploadStateCopy( const VmStateBuffer& copy );
|
virtual void UploadStateCopy( const VmStateBuffer& copy );
|
||||||
|
|
||||||
virtual bool HasActiveMachine() const { return m_hasActiveMachine; }
|
virtual bool HasActiveMachine() const { return m_hasActiveMachine; }
|
||||||
|
|
||||||
virtual const wxString& GetElfOverride() const { return m_elf_override; }
|
virtual const wxString& GetElfOverride() const { return m_elf_override; }
|
||||||
virtual void SetElfOverride( const wxString& elf );
|
virtual void SetElfOverride( const wxString& elf );
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void _reset_stuff_as_needed();
|
void _reset_stuff_as_needed();
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ class BaseCpuProvider
|
||||||
protected:
|
protected:
|
||||||
// allocation counter for multiple calls to Reserve. Most implementations should utilize
|
// allocation counter for multiple calls to Reserve. Most implementations should utilize
|
||||||
// this variable for sake of robustness.
|
// this variable for sake of robustness.
|
||||||
u32 m_Reserved;
|
std::atomic<int> m_Reserved;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
// this boolean indicates to some generic logging facilities if the VU's registers
|
// this boolean indicates to some generic logging facilities if the VU's registers
|
||||||
|
@ -68,7 +68,7 @@ public:
|
||||||
{
|
{
|
||||||
try {
|
try {
|
||||||
if( m_Reserved != 0 )
|
if( m_Reserved != 0 )
|
||||||
Console.Warning( "Cleanup miscount detected on CPU provider. Count=%d", m_Reserved );
|
Console.Warning( "Cleanup miscount detected on CPU provider. Count=%d", m_Reserved.load() );
|
||||||
}
|
}
|
||||||
DESTRUCTOR_CATCHALL
|
DESTRUCTOR_CATCHALL
|
||||||
}
|
}
|
||||||
|
|
|
@ -1319,11 +1319,11 @@ void AppSaveSettings()
|
||||||
// If multiple SaveSettings messages are requested, we want to ignore most of them.
|
// If multiple SaveSettings messages are requested, we want to ignore most of them.
|
||||||
// Saving settings once when the GUI is idle should be fine. :)
|
// Saving settings once when the GUI is idle should be fine. :)
|
||||||
|
|
||||||
static u32 isPosted = false;
|
static std::atomic<bool> isPosted(false);
|
||||||
|
|
||||||
if( !wxThread::IsMain() )
|
if( !wxThread::IsMain() )
|
||||||
{
|
{
|
||||||
if( !AtomicExchange(isPosted, true) )
|
if( !isPosted.exchange(true) )
|
||||||
wxGetApp().PostIdleMethod( AppSaveSettings );
|
wxGetApp().PostIdleMethod( AppSaveSettings );
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
@ -1335,7 +1335,7 @@ void AppSaveSettings()
|
||||||
SaveVmSettings();
|
SaveVmSettings();
|
||||||
SaveRegSettings(); // save register because of PluginsFolder change
|
SaveRegSettings(); // save register because of PluginsFolder change
|
||||||
|
|
||||||
AtomicExchange( isPosted, false );
|
isPosted = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -483,7 +483,7 @@ void AppCoreThread::OnResumeInThread( bool isSuspended )
|
||||||
GetCorePlugins().Close( PluginId_CDVD );
|
GetCorePlugins().Close( PluginId_CDVD );
|
||||||
CDVDsys_ChangeSource( g_Conf->CdvdSource );
|
CDVDsys_ChangeSource( g_Conf->CdvdSource );
|
||||||
cdvdCtrlTrayOpen();
|
cdvdCtrlTrayOpen();
|
||||||
m_resetCdvd = false;
|
m_resetCdvd = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
_parent::OnResumeInThread( isSuspended );
|
_parent::OnResumeInThread( isSuspended );
|
||||||
|
|
|
@ -123,12 +123,12 @@ class AppCoreThread : public SysCoreThread
|
||||||
typedef SysCoreThread _parent;
|
typedef SysCoreThread _parent;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
volatile bool m_resetCdvd;
|
std::atomic<bool> m_resetCdvd;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AppCoreThread();
|
AppCoreThread();
|
||||||
virtual ~AppCoreThread() throw();
|
virtual ~AppCoreThread() throw();
|
||||||
|
|
||||||
void ResetCdvd() { m_resetCdvd = true; }
|
void ResetCdvd() { m_resetCdvd = true; }
|
||||||
|
|
||||||
virtual void Suspend( bool isBlocking=false );
|
virtual void Suspend( bool isBlocking=false );
|
||||||
|
|
|
@ -77,7 +77,7 @@ class ConsoleTestThread : public Threading::pxThread
|
||||||
typedef pxThread _parent;
|
typedef pxThread _parent;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
volatile bool m_done;
|
std::atomic<bool> m_done;
|
||||||
void ExecuteTaskInThread();
|
void ExecuteTaskInThread();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -176,22 +176,22 @@ protected:
|
||||||
|
|
||||||
int m_flushevent_counter;
|
int m_flushevent_counter;
|
||||||
bool m_FlushRefreshLocked;
|
bool m_FlushRefreshLocked;
|
||||||
|
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
// Queue State Management Vars
|
// Queue State Management Vars
|
||||||
// ----------------------------------------------------------------------------
|
// ----------------------------------------------------------------------------
|
||||||
|
|
||||||
// Boolean indicating if a flush message is already in the Main message queue. Used
|
// Boolean indicating if a flush message is already in the Main message queue. Used
|
||||||
// to prevent spamming the main thread with redundant messages.
|
// to prevent spamming the main thread with redundant messages.
|
||||||
volatile bool m_pendingFlushMsg;
|
std::atomic<bool> m_pendingFlushMsg;
|
||||||
|
|
||||||
// This is a counter of the number of threads waiting for the Queue to flush.
|
// This is a counter of the number of threads waiting for the Queue to flush.
|
||||||
volatile int m_WaitingThreadsForFlush;
|
std::atomic<int> m_WaitingThreadsForFlush;
|
||||||
|
|
||||||
// Indicates to the main thread if a child thread is actively writing to the log. If
|
// Indicates to the main thread if a child thread is actively writing to the log. If
|
||||||
// true the main thread will sleep briefly to allow the child a chance to accumulate
|
// true the main thread will sleep briefly to allow the child a chance to accumulate
|
||||||
// more messages (helps avoid rapid successive flushes on high volume logging).
|
// more messages (helps avoid rapid successive flushes on high volume logging).
|
||||||
volatile bool m_ThreadedLogInQueue;
|
std::atomic<bool> m_ThreadedLogInQueue;
|
||||||
|
|
||||||
// Used by threads waiting on the queue to flush.
|
// Used by threads waiting on the queue to flush.
|
||||||
Semaphore m_sem_QueueFlushed;
|
Semaphore m_sem_QueueFlushed;
|
||||||
|
@ -258,6 +258,6 @@ protected:
|
||||||
void OnMoveAround( wxMoveEvent& evt );
|
void OnMoveAround( wxMoveEvent& evt );
|
||||||
void OnResize( wxSizeEvent& evt );
|
void OnResize( wxSizeEvent& evt );
|
||||||
void OnActivate( wxActivateEvent& evt );
|
void OnActivate( wxActivateEvent& evt );
|
||||||
|
|
||||||
void OnLoggingChanged();
|
void OnLoggingChanged();
|
||||||
};
|
};
|
||||||
|
|
|
@ -158,7 +158,7 @@ void SysExecEvent::PostResult() const
|
||||||
// --------------------------------------------------------------------------------------
|
// --------------------------------------------------------------------------------------
|
||||||
pxEvtQueue::pxEvtQueue()
|
pxEvtQueue::pxEvtQueue()
|
||||||
{
|
{
|
||||||
AtomicExchange( m_Quitting, false );
|
m_Quitting = false;
|
||||||
m_qpc_Start = 0;
|
m_qpc_Start = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,7 +171,7 @@ pxEvtQueue::pxEvtQueue()
|
||||||
void pxEvtQueue::ShutdownQueue()
|
void pxEvtQueue::ShutdownQueue()
|
||||||
{
|
{
|
||||||
if( m_Quitting ) return;
|
if( m_Quitting ) return;
|
||||||
AtomicExchange( m_Quitting, true );
|
m_Quitting = true;
|
||||||
m_wakeup.Post();
|
m_wakeup.Post();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -40,7 +40,7 @@ bool States_isSlotUsed(int num)
|
||||||
|
|
||||||
// FIXME : Use of the IsSavingOrLoading flag is mostly a hack until we implement a
|
// FIXME : Use of the IsSavingOrLoading flag is mostly a hack until we implement a
|
||||||
// complete thread to manage queuing savestate tasks, and zipping states to disk. --air
|
// complete thread to manage queuing savestate tasks, and zipping states to disk. --air
|
||||||
static volatile u32 IsSavingOrLoading = false;
|
static std::atomic<bool> IsSavingOrLoading(false);
|
||||||
|
|
||||||
class SysExecEvent_ClearSavingLoadingFlag : public SysExecEvent
|
class SysExecEvent_ClearSavingLoadingFlag : public SysExecEvent
|
||||||
{
|
{
|
||||||
|
@ -57,7 +57,7 @@ public:
|
||||||
protected:
|
protected:
|
||||||
void InvokeEvent()
|
void InvokeEvent()
|
||||||
{
|
{
|
||||||
AtomicExchange(IsSavingOrLoading, false);
|
IsSavingOrLoading = false;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ void States_FreezeCurrentSlot()
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( wxGetApp().HasPendingSaves() || AtomicExchange(IsSavingOrLoading, true) )
|
if( wxGetApp().HasPendingSaves() || IsSavingOrLoading.exchange(true) )
|
||||||
{
|
{
|
||||||
Console.WriteLn( "Load or save action is already pending." );
|
Console.WriteLn( "Load or save action is already pending." );
|
||||||
return;
|
return;
|
||||||
|
@ -94,7 +94,7 @@ void _States_DefrostCurrentSlot( bool isFromBackup )
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( AtomicExchange(IsSavingOrLoading, true) )
|
if( IsSavingOrLoading.exchange(true) )
|
||||||
{
|
{
|
||||||
Console.WriteLn( "Load or save action is already pending." );
|
Console.WriteLn( "Load or save action is already pending." );
|
||||||
return;
|
return;
|
||||||
|
|
|
@ -204,11 +204,13 @@ protected:
|
||||||
Threading::MutexRecursive m_mtx_pending;
|
Threading::MutexRecursive m_mtx_pending;
|
||||||
Threading::Semaphore m_wakeup;
|
Threading::Semaphore m_wakeup;
|
||||||
wxThreadIdType m_OwnerThreadId;
|
wxThreadIdType m_OwnerThreadId;
|
||||||
volatile u32 m_Quitting;
|
std::atomic<bool> m_Quitting;
|
||||||
|
|
||||||
// Used for performance measuring the execution of individual events,
|
// Used for performance measuring the execution of individual events,
|
||||||
// and also for detecting deadlocks during message processing.
|
// and also for detecting deadlocks during message processing.
|
||||||
volatile u64 m_qpc_Start;
|
// Clang-3.7 failed to link (maybe 64 bits atomic isn't supported on 32 bits)
|
||||||
|
// std::atomic<unsigned long long> m_qpc_Start;
|
||||||
|
u64 m_qpc_Start;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
pxEvtQueue();
|
pxEvtQueue();
|
||||||
|
|
|
@ -566,8 +566,8 @@ static void recAlloc()
|
||||||
static __aligned16 u16 manual_page[Ps2MemSize::MainRam >> 12];
|
static __aligned16 u16 manual_page[Ps2MemSize::MainRam >> 12];
|
||||||
static __aligned16 u8 manual_counter[Ps2MemSize::MainRam >> 12];
|
static __aligned16 u8 manual_counter[Ps2MemSize::MainRam >> 12];
|
||||||
|
|
||||||
static u32 eeRecIsReset = false;
|
static std::atomic<bool> eeRecIsReset(false);
|
||||||
static u32 eeRecNeedsReset = false;
|
static std::atomic<bool> eeRecNeedsReset(false);
|
||||||
static bool eeCpuExecuting = false;
|
static bool eeCpuExecuting = false;
|
||||||
|
|
||||||
////////////////////////////////////////////////////
|
////////////////////////////////////////////////////
|
||||||
|
@ -579,8 +579,8 @@ static void recResetRaw()
|
||||||
|
|
||||||
recAlloc();
|
recAlloc();
|
||||||
|
|
||||||
if( AtomicExchange( eeRecIsReset, true ) ) return;
|
if( eeRecIsReset.exchange(true) ) return;
|
||||||
AtomicExchange( eeRecNeedsReset, false );
|
eeRecNeedsReset = false;
|
||||||
|
|
||||||
Console.WriteLn( Color_StrongBlack, "EE/iR5900-32 Recompiler Reset" );
|
Console.WriteLn( Color_StrongBlack, "EE/iR5900-32 Recompiler Reset" );
|
||||||
|
|
||||||
|
@ -628,7 +628,7 @@ static void recResetEE()
|
||||||
{
|
{
|
||||||
if (eeCpuExecuting)
|
if (eeCpuExecuting)
|
||||||
{
|
{
|
||||||
AtomicExchange( eeRecNeedsReset, true );
|
eeRecNeedsReset = true;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1561,11 +1561,11 @@ static void __fastcall recRecompile( const u32 startpc )
|
||||||
|
|
||||||
// if recPtr reached the mem limit reset whole mem
|
// if recPtr reached the mem limit reset whole mem
|
||||||
if (recPtr >= (recMem->GetPtrEnd() - _64kb)) {
|
if (recPtr >= (recMem->GetPtrEnd() - _64kb)) {
|
||||||
AtomicExchange( eeRecNeedsReset, true );
|
eeRecNeedsReset = true;
|
||||||
}
|
}
|
||||||
else if ((recConstBufPtr - recConstBuf) >= RECCONSTBUF_SIZE - 64) {
|
else if ((recConstBufPtr - recConstBuf) >= RECCONSTBUF_SIZE - 64) {
|
||||||
Console.WriteLn("EE recompiler stack reset");
|
Console.WriteLn("EE recompiler stack reset");
|
||||||
AtomicExchange( eeRecNeedsReset, true );
|
eeRecNeedsReset = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (eeRecNeedsReset) recResetRaw();
|
if (eeRecNeedsReset) recResetRaw();
|
||||||
|
@ -1633,7 +1633,7 @@ static void __fastcall recRecompile( const u32 startpc )
|
||||||
xFastCall(GoemonPreloadTlb);
|
xFastCall(GoemonPreloadTlb);
|
||||||
} else if (pc == 0x3563b8) {
|
} else if (pc == 0x3563b8) {
|
||||||
// Game will unmap some virtual addresses. If a constant address were hardcoded in the block, we would be in a bad situation.
|
// Game will unmap some virtual addresses. If a constant address were hardcoded in the block, we would be in a bad situation.
|
||||||
AtomicExchange( eeRecNeedsReset, true );
|
eeRecNeedsReset = true;
|
||||||
// 0x3563b8 is the start address of the function that invalidate entry in TLB cache
|
// 0x3563b8 is the start address of the function that invalidate entry in TLB cache
|
||||||
xFastCall(GoemonUnloadTlb, ptr[&cpuRegs.GPR.n.a0.UL[0]]);
|
xFastCall(GoemonUnloadTlb, ptr[&cpuRegs.GPR.n.a0.UL[0]]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -297,22 +297,22 @@ void recMicroVU0::Vsync() throw() { mVUvsyncUpdate(microVU0); }
|
||||||
void recMicroVU1::Vsync() throw() { mVUvsyncUpdate(microVU1); }
|
void recMicroVU1::Vsync() throw() { mVUvsyncUpdate(microVU1); }
|
||||||
|
|
||||||
void recMicroVU0::Reserve() {
|
void recMicroVU0::Reserve() {
|
||||||
if (AtomicExchange(m_Reserved, 1) == 0)
|
if (m_Reserved.exchange(1) == 0)
|
||||||
mVUinit(microVU0, 0);
|
mVUinit(microVU0, 0);
|
||||||
}
|
}
|
||||||
void recMicroVU1::Reserve() {
|
void recMicroVU1::Reserve() {
|
||||||
if (AtomicExchange(m_Reserved, 1) == 0) {
|
if (m_Reserved.exchange(1) == 0) {
|
||||||
mVUinit(microVU1, 1);
|
mVUinit(microVU1, 1);
|
||||||
vu1Thread.Start();
|
vu1Thread.Start();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void recMicroVU0::Shutdown() throw() {
|
void recMicroVU0::Shutdown() throw() {
|
||||||
if (AtomicExchange(m_Reserved, 0) == 1)
|
if (m_Reserved.exchange(0) == 1)
|
||||||
mVUclose(microVU0);
|
mVUclose(microVU0);
|
||||||
}
|
}
|
||||||
void recMicroVU1::Shutdown() throw() {
|
void recMicroVU1::Shutdown() throw() {
|
||||||
if (AtomicExchange(m_Reserved, 0) == 1) {
|
if (m_Reserved.exchange(0) == 1) {
|
||||||
vu1Thread.WaitVU();
|
vu1Thread.WaitVU();
|
||||||
mVUclose(microVU1);
|
mVUclose(microVU1);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue