common: 64 bits intrinsic fixes

long is 8B on linux. Replace it with s32
long long is always 8B but let's replace it with s64 so we are sure
This commit is contained in:
Gregory Hainaut 2015-01-05 00:25:37 +01:00
parent e447ffc8b2
commit 04ca7f4a2b
5 changed files with 32 additions and 25 deletions

View File

@ -111,7 +111,7 @@ protected:
class BaseDeletableObject : public virtual IDeletableObject
{
protected:
volatile long m_IsBeingDeleted;
volatile s32 m_IsBeingDeleted;
public:
BaseDeletableObject();

View File

@ -103,8 +103,8 @@ namespace Threading
MutexRecursive m_mtx_start; // used to lock the Start() code from starting simultaneous threads accidentally.
Mutex m_mtx_ThreadName;
volatile long m_detached; // a boolean value which indicates if the m_thread handle is valid
volatile long m_running; // set true by Start(), and set false by Cancel(), Block(), etc.
volatile s32 m_detached; // a boolean value which indicates if the m_thread handle is valid
volatile s32 m_running; // set true by Start(), and set false by Cancel(), Block(), etc.
// exception handle, set non-NULL if the thread terminated with an exception
// Use RethrowException() to re-throw the exception using its original exception type.

View File

@ -54,34 +54,41 @@
/*** Atomic operations ***/
static __inline__ __attribute__((always_inline)) long _InterlockedCompareExchange(volatile long * const Destination, const long Exchange, const long Comperand)
static __inline__ __attribute__((always_inline)) s32 _InterlockedCompareExchange(volatile s32 * const Destination, const s32 Exchange, const s32 Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
static __inline__ __attribute__((always_inline)) long long _InterlockedCompareExchange64(volatile long long * const Destination, const long long Exchange, const long long Comperand)
static __inline__ __attribute__((always_inline)) s64 _InterlockedCompareExchange64(volatile s64 * const Destination, const s64 Exchange, const s64 Comperand)
{
return __sync_val_compare_and_swap(Destination, Comperand, Exchange);
}
static __inline__ __attribute__((always_inline)) long _InterlockedExchange(volatile long * const Target, const long Value)
static __inline__ __attribute__((always_inline)) s32 _InterlockedExchange(volatile s32 * const Target, const s32 Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
return __sync_lock_test_and_set(Target, Value);
}
static __inline__ __attribute__((always_inline)) long _InterlockedExchangeAdd(volatile long * const Addend, const long Value)
static __inline__ __attribute__((always_inline)) s64 _InterlockedExchange64(volatile s64 * const Target, const s64 Value)
{
/* NOTE: __sync_lock_test_and_set would be an acquire barrier, so we force a full barrier */
__sync_synchronize();
return __sync_lock_test_and_set(Target, Value);
}
static __inline__ __attribute__((always_inline)) s32 _InterlockedExchangeAdd(volatile s32 * const Addend, const s32 Value)
{
return __sync_fetch_and_add(Addend, Value);
}
static __inline__ __attribute__((always_inline)) long _InterlockedDecrement(volatile long * const lpAddend)
static __inline__ __attribute__((always_inline)) s32 _InterlockedDecrement(volatile s32 * const lpAddend)
{
return _InterlockedExchangeAdd(lpAddend, -1) - 1;
}
static __inline__ __attribute__((always_inline)) long _InterlockedIncrement(volatile long * const lpAddend)
static __inline__ __attribute__((always_inline)) s32 _InterlockedIncrement(volatile s32 * const lpAddend)
{
return _InterlockedExchangeAdd(lpAddend, 1) + 1;
}

View File

@ -23,7 +23,7 @@
namespace Threading
{
static long _attr_refcount = 0;
static s32 _attr_refcount = 0;
static pthread_mutexattr_t _attr_recursive;
}

View File

@ -797,52 +797,52 @@ __fi s32 Threading::AtomicRead(volatile s32& Target) {
}
__fi u32 Threading::AtomicExchange(volatile u32& Target, u32 value ) {
return _InterlockedExchange( (volatile long*)&Target, value );
return _InterlockedExchange( (volatile s32*)&Target, value );
}
__fi s32 Threading::AtomicExchange( volatile s32& Target, s32 value ) {
return _InterlockedExchange( (volatile long*)&Target, value );
return _InterlockedExchange( (volatile s32*)&Target, value );
}
__fi u32 Threading::AtomicExchangeAdd( volatile u32& Target, u32 value ) {
return _InterlockedExchangeAdd( (volatile long*)&Target, value );
return _InterlockedExchangeAdd( (volatile s32*)&Target, value );
}
__fi s32 Threading::AtomicExchangeAdd( volatile s32& Target, s32 value ) {
return _InterlockedExchangeAdd( (volatile long*)&Target, value );
return _InterlockedExchangeAdd( (volatile s32*)&Target, value );
}
__fi s32 Threading::AtomicExchangeSub( volatile s32& Target, s32 value ) {
return _InterlockedExchangeAdd( (volatile long*)&Target, -value );
return _InterlockedExchangeAdd( (volatile s32*)&Target, -value );
}
__fi u32 Threading::AtomicIncrement( volatile u32& Target ) {
return _InterlockedExchangeAdd( (volatile long*)&Target, 1 );
return _InterlockedExchangeAdd( (volatile s32*)&Target, 1 );
}
__fi s32 Threading::AtomicIncrement( volatile s32& Target) {
return _InterlockedExchangeAdd( (volatile long*)&Target, 1 );
return _InterlockedExchangeAdd( (volatile s32*)&Target, 1 );
}
__fi u32 Threading::AtomicDecrement( volatile u32& Target ) {
return _InterlockedExchangeAdd( (volatile long*)&Target, -1 );
return _InterlockedExchangeAdd( (volatile s32*)&Target, -1 );
}
__fi s32 Threading::AtomicDecrement(volatile s32& Target) {
return _InterlockedExchangeAdd((volatile long*)&Target, -1);
return _InterlockedExchangeAdd((volatile s32*)&Target, -1);
}
__fi void* Threading::_AtomicExchangePointer(volatile uptr& target, uptr value)
{
#ifdef _M_AMD64 // high-level atomic ops, please leave these 64 bit checks in place.
return (void*)_InterlockedExchange64(&(volatile s64&)target, value);
#ifdef _M_X86_64 // high-level atomic ops, please leave these 64 bit checks in place.
return (void*)_InterlockedExchange64((volatile s64*)&target, value);
#else
return (void*)_InterlockedExchange((volatile long*)&target, value);
return (void*)_InterlockedExchange((volatile s32*)&target, value);
#endif
}
__fi void* Threading::_AtomicCompareExchangePointer(volatile uptr& target, uptr value, uptr comparand)
{
#ifdef _M_AMD64 // high-level atomic ops, please leave these 64 bit checks in place.
return (void*)_InterlockedCompareExchange64(&(volatile s64&)target, value);
#ifdef _M_X86_64 // high-level atomic ops, please leave these 64 bit checks in place.
return (void*)_InterlockedCompareExchange64((volatile s64*)&target, value, comparand);
#else
return (void*)_InterlockedCompareExchange(&(volatile long&)target, value, comparand);
return (void*)_InterlockedCompareExchange((volatile s32*)&target, value, comparand);
#endif
}