Some things improved

shared_mutex_t implemented
GUI Emu Callbacks rewritten
fxm::import, fxm::import_always implemented
cellMsgDialog rewritten
Emu.CallAfter improved (returns std::future)
This commit is contained in:
Nekotekina 2015-09-18 01:41:14 +03:00
parent 9d68c16c62
commit 8ae3401ffa
77 changed files with 1814 additions and 1831 deletions

717
Utilities/Atomic.h Normal file
View File

@ -0,0 +1,717 @@
#pragma once
#if defined(__GNUG__)
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_val_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_val_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), bool> sync_bool_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_bool_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_lock_test_and_set(volatile T* dest, T2 value)
{
return __sync_lock_test_and_set(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_add(volatile T* dest, T2 value)
{
return __sync_fetch_and_add(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_sub(volatile T* dest, T2 value)
{
return __sync_fetch_and_sub(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_or(volatile T* dest, T2 value)
{
return __sync_fetch_and_or(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_and(volatile T* dest, T2 value)
{
return __sync_fetch_and_and(dest, value);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T), T> sync_fetch_and_xor(volatile T* dest, T2 value)
{
return __sync_fetch_and_xor(dest, value);
}
#elif defined(_MSC_VER)
// atomic compare and swap functions
inline u8 sync_val_compare_and_swap(volatile u8* dest, u8 comp, u8 exch)
{
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
}
inline u16 sync_val_compare_and_swap(volatile u16* dest, u16 comp, u16 exch)
{
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
}
inline u32 sync_val_compare_and_swap(volatile u32* dest, u32 comp, u32 exch)
{
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
}
inline u64 sync_val_compare_and_swap(volatile u64* dest, u64 comp, u64 exch)
{
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
}
inline u128 sync_val_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
{
_InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp);
return comp;
}
inline bool sync_bool_compare_and_swap(volatile u8* dest, u8 comp, u8 exch)
{
return (u8)_InterlockedCompareExchange8((volatile char*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u16* dest, u16 comp, u16 exch)
{
return (u16)_InterlockedCompareExchange16((volatile short*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u32* dest, u32 comp, u32 exch)
{
return (u32)_InterlockedCompareExchange((volatile long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u64* dest, u64 comp, u64 exch)
{
return (u64)_InterlockedCompareExchange64((volatile long long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
{
return _InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp) != 0;
}
// atomic exchange functions
inline u8 sync_lock_test_and_set(volatile u8* dest, u8 value)
{
return _InterlockedExchange8((volatile char*)dest, value);
}
inline u16 sync_lock_test_and_set(volatile u16* dest, u16 value)
{
return _InterlockedExchange16((volatile short*)dest, value);
}
inline u32 sync_lock_test_and_set(volatile u32* dest, u32 value)
{
return _InterlockedExchange((volatile long*)dest, value);
}
inline u64 sync_lock_test_and_set(volatile u64* dest, u64 value)
{
return _InterlockedExchange64((volatile long long*)dest, value);
}
inline u128 sync_lock_test_and_set(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
if (sync_bool_compare_and_swap(dest, old, value)) return old;
}
}
// atomic add functions
inline u8 sync_fetch_and_add(volatile u8* dest, u8 value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_add(volatile u16* dest, u16 value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_add(volatile u32* dest, u32 value)
{
return _InterlockedExchangeAdd((volatile long*)dest, value);
}
inline u64 sync_fetch_and_add(volatile u64* dest, u64 value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_add(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo + value.lo;
_new.hi = old.hi + value.hi + (_new.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic sub functions
inline u8 sync_fetch_and_sub(volatile u8* dest, u8 value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, -(char)value);
}
inline u16 sync_fetch_and_sub(volatile u16* dest, u16 value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, -(short)value);
}
inline u32 sync_fetch_and_sub(volatile u32* dest, u32 value)
{
return _InterlockedExchangeAdd((volatile long*)dest, -(long)value);
}
inline u64 sync_fetch_and_sub(volatile u64* dest, u64 value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, -(long long)value);
}
inline u128 sync_fetch_and_sub(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo - value.lo;
_new.hi = old.hi - value.hi - (old.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise or` functions
inline u8 sync_fetch_and_or(volatile u8* dest, u8 value)
{
return _InterlockedOr8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_or(volatile u16* dest, u16 value)
{
return _InterlockedOr16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_or(volatile u32* dest, u32 value)
{
return _InterlockedOr((volatile long*)dest, value);
}
inline u64 sync_fetch_and_or(volatile u64* dest, u64 value)
{
return _InterlockedOr64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_or(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo | value.lo;
_new.hi = old.hi | value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise and` functions
inline u8 sync_fetch_and_and(volatile u8* dest, u8 value)
{
return _InterlockedAnd8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_and(volatile u16* dest, u16 value)
{
return _InterlockedAnd16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_and(volatile u32* dest, u32 value)
{
return _InterlockedAnd((volatile long*)dest, value);
}
inline u64 sync_fetch_and_and(volatile u64* dest, u64 value)
{
return _InterlockedAnd64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_and(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo & value.lo;
_new.hi = old.hi & value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise xor` functions
inline u8 sync_fetch_and_xor(volatile u8* dest, u8 value)
{
return _InterlockedXor8((volatile char*)dest, value);
}
inline u16 sync_fetch_and_xor(volatile u16* dest, u16 value)
{
return _InterlockedXor16((volatile short*)dest, value);
}
inline u32 sync_fetch_and_xor(volatile u32* dest, u32 value)
{
return _InterlockedXor((volatile long*)dest, value);
}
inline u64 sync_fetch_and_xor(volatile u64* dest, u64 value)
{
return _InterlockedXor64((volatile long long*)dest, value);
}
inline u128 sync_fetch_and_xor(volatile u128* dest, u128 value)
{
while (true)
{
u128 old;
old.lo = dest->lo;
old.hi = dest->hi;
u128 _new;
_new.lo = old.lo ^ value.lo;
_new.hi = old.hi ^ value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
#endif /* _MSC_VER */
template<typename T, std::size_t Size = sizeof(T)> struct atomic_storage
{
static_assert(!Size, "Invalid atomic type");
};
template<typename T> struct atomic_storage<T, 1>
{
using type = u8;
};
template<typename T> struct atomic_storage<T, 2>
{
using type = u16;
};
template<typename T> struct atomic_storage<T, 4>
{
using type = u32;
};
template<typename T> struct atomic_storage<T, 8>
{
using type = u64;
};
template<typename T> struct atomic_storage<T, 16>
{
using type = u128;
};
template<typename T> using atomic_storage_t = typename atomic_storage<T>::type;
// result wrapper to deal with void result type
template<typename T, typename RT, typename VT> struct atomic_op_result_t
{
RT result;
template<typename... Args> inline atomic_op_result_t(T func, VT& var, Args&&... args)
: result(std::move(func(var, std::forward<Args>(args)...)))
{
}
inline RT move()
{
return std::move(result);
}
};
// void specialization: result is the initial value of the first arg
template<typename T, typename VT> struct atomic_op_result_t<T, void, VT>
{
VT result;
template<typename... Args> inline atomic_op_result_t(T func, VT& var, Args&&... args)
: result(var)
{
func(var, std::forward<Args>(args)...);
}
inline VT move()
{
return std::move(result);
}
};
// member function specialization
template<typename CT, typename... FArgs, typename RT, typename VT> struct atomic_op_result_t<RT(CT::*)(FArgs...), RT, VT>
{
RT result;
template<typename... Args> inline atomic_op_result_t(RT(CT::*func)(FArgs...), VT& var, Args&&... args)
: result(std::move((var.*func)(std::forward<Args>(args)...)))
{
}
inline RT move()
{
return std::move(result);
}
};
// member function void specialization
template<typename CT, typename... FArgs, typename VT> struct atomic_op_result_t<void(CT::*)(FArgs...), void, VT>
{
VT result;
template<typename... Args> inline atomic_op_result_t(void(CT::*func)(FArgs...), VT& var, Args&&... args)
: result(var)
{
(var.*func)(std::forward<Args>(args)...);
}
inline VT move()
{
return std::move(result);
}
};
template<typename T> class atomic_t
{
using type = std::remove_cv_t<T>;
using stype = atomic_storage_t<type>;
using storage = atomic_storage<type>;
static_assert(alignof(type) <= alignof(stype), "atomic_t<> error: unexpected alignment");
stype m_data;
template<typename T2> static inline void write_relaxed(volatile T2& data, const T2& value)
{
data = value;
}
static inline void write_relaxed(volatile u128& data, const u128& value)
{
sync_lock_test_and_set(&data, value);
}
template<typename T2> static inline T2 read_relaxed(const volatile T2& data)
{
return data;
}
static inline u128 read_relaxed(const volatile u128& value)
{
return sync_val_compare_and_swap(const_cast<volatile u128*>(&value), u128{0}, u128{0});
}
public:
static inline const stype to_subtype(const type& value)
{
return reinterpret_cast<const stype&>(value);
}
static inline const type from_subtype(const stype value)
{
return reinterpret_cast<const type&>(value);
}
atomic_t() = default;
atomic_t(const atomic_t&) = delete;
atomic_t(atomic_t&&) = delete;
inline atomic_t(type value)
: m_data(to_subtype(value))
{
}
atomic_t& operator =(const atomic_t&) = delete;
atomic_t& operator =(atomic_t&&) = delete;
inline atomic_t& operator =(type value)
{
return write_relaxed(m_data, to_subtype(value)), *this;
}
operator type() const volatile
{
return from_subtype(read_relaxed(m_data));
}
// Unsafe direct access
stype* raw_data()
{
return reinterpret_cast<stype*>(&m_data);
}
// Unsafe direct access
type& raw()
{
return reinterpret_cast<type&>(m_data);
}
// Atomically compare data with cmp, replace with exch if equal, return previous data value anyway
inline const type compare_and_swap(const type& cmp, const type& exch) volatile
{
return from_subtype(sync_val_compare_and_swap(&m_data, to_subtype(cmp), to_subtype(exch)));
}
// Atomically compare data with cmp, replace with exch if equal, return true if data was replaced
inline bool compare_and_swap_test(const type& cmp, const type& exch) volatile
{
return sync_bool_compare_and_swap(&m_data, to_subtype(cmp), to_subtype(exch));
}
// Atomically replace data with exch, return previous data value
inline const type exchange(const type& exch) volatile
{
return from_subtype(sync_lock_test_and_set(&m_data, to_subtype(exch)));
}
// Atomically read data, possibly without memory barrier (not for 128 bit)
inline const type load() const volatile
{
return from_subtype(read_relaxed(m_data));
}
// Atomically write data, possibly without memory barrier (not for 128 bit)
inline void store(const type& value) volatile
{
write_relaxed(m_data, to_subtype(value));
}
// Perform an atomic operation on data (func is either pointer to member function or callable object with a T& first arg);
// Returns the result of the callable object call or previous (old) value of the atomic variable if the return type is void
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, Args...)>> auto atomic_op(F func, Args&&... args) volatile -> decltype(atomic_op_result_t<F, RT, T>::result)
{
while (true)
{
// Read the old value from memory
const stype old = read_relaxed(m_data);
// Copy the old value
stype _new = old;
// Call atomic op for the local copy of the old value and save the return value of the function
atomic_op_result_t<F, RT, T> result(func, reinterpret_cast<type&>(_new), args...);
// Atomically compare value with `old`, replace with `_new` and return on success
if (sync_bool_compare_and_swap(&m_data, old, _new)) return result.move();
}
}
// Atomic bitwise OR, returns previous data
inline const type _or(const type& right) volatile
{
return from_subtype(sync_fetch_and_or(&m_data, to_subtype(right)));
}
// Atomic bitwise AND, returns previous data
inline const type _and(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&m_data, to_subtype(right)));
}
// Atomic bitwise AND NOT (inverts right argument), returns previous data
inline const type _and_not(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&m_data, ~to_subtype(right)));
}
// Atomic bitwise XOR, returns previous data
inline const type _xor(const type& right) volatile
{
return from_subtype(sync_fetch_and_xor(&m_data, to_subtype(right)));
}
inline const type operator |=(const type& right) volatile
{
return from_subtype(sync_fetch_and_or(&m_data, to_subtype(right)) | to_subtype(right));
}
inline const type operator &=(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&m_data, to_subtype(right)) & to_subtype(right));
}
inline const type operator ^=(const type& right) volatile
{
return from_subtype(sync_fetch_and_xor(&m_data, to_subtype(right)) ^ to_subtype(right));
}
};
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator ++(atomic_t<T>& left)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1) + 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator --(atomic_t<T>& left)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1) - 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator ++(atomic_t<T>& left, int)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1));
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), T> operator --(atomic_t<T>& left, int)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1));
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, T> operator +=(atomic_t<T>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), right) + right);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, T> operator -=(atomic_t<T>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), right) - right);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator ++(atomic_t<nse_t<T>>& left)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1) + 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator --(atomic_t<nse_t<T>>& left)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1) - 1);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator ++(atomic_t<nse_t<T>>& left, int)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), 1));
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), nse_t<T>> operator --(atomic_t<nse_t<T>>& left, int)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), 1));
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, nse_t<T>> operator +=(atomic_t<nse_t<T>>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_add(left.raw_data(), right) + right);
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, nse_t<T>> operator -=(atomic_t<nse_t<T>>& left, const T2& right)
{
return left.from_subtype(sync_fetch_and_sub(left.raw_data(), right) - right);
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator ++(atomic_t<se_t<T>>& left)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return ++value;
});
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator --(atomic_t<se_t<T>>& left)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return --value;
});
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator ++(atomic_t<se_t<T>>& left, int)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return value++;
});
}
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T), se_t<T>> operator --(atomic_t<se_t<T>>& left, int)
{
return left.atomic_op([](se_t<T>& value) -> se_t<T>
{
return value--;
});
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, se_t<T>> operator +=(atomic_t<se_t<T>>& left, const T2& right)
{
return left.atomic_op([&](se_t<T>& value) -> se_t<T>
{
return value += right;
});
}
template<typename T, typename T2> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<T2, T>::value, se_t<T>> operator -=(atomic_t<se_t<T>>& left, const T2& right)
{
return left.atomic_op([&](se_t<T>& value) -> se_t<T>
{
return value -= right;
});
}
template<typename T> using atomic_be_t = atomic_t<be_t<T>>; // Atomic BE Type (for PS3 virtual memory)
template<typename T> using atomic_le_t = atomic_t<le_t<T>>; // Atomic LE Type (for PSV virtual memory)
// Algorithm for std::atomic; similar to atomic_t::atomic_op()
template<typename T, typename F, typename... Args, typename RT = std::result_of_t<F(T&, Args...)>> auto atomic_op(std::atomic<T>& var, F func, Args&&... args) -> decltype(atomic_op_result_t<F, RT, T>::result)
{
auto old = var.load();
while (true)
{
auto _new = old;
atomic_op_result_t<F, RT, T> result(func, _new, args...);
if (var.compare_exchange_strong(old, _new)) return result.move();
}
}

View File

@ -374,9 +374,6 @@ inline v128 operator ~(const v128& other)
return v128::from64(~other._u64[0], ~other._u64[1]);
}
#define IS_INTEGER(t) (std::is_integral<t>::value || std::is_enum<t>::value)
#define IS_BINARY_COMPARABLE(t1, t2) (IS_INTEGER(t1) && IS_INTEGER(t2) && sizeof(t1) == sizeof(t2))
template<typename T, std::size_t Size = sizeof(T)> struct se_storage
{
static_assert(!Size, "Bad se_storage<> type");
@ -386,7 +383,7 @@ template<typename T> struct se_storage<T, 2>
{
using type = u16;
static constexpr u16 swap(u16 src)
[[deprecated]] static constexpr u16 swap(u16 src) // for reference
{
return (src >> 8) | (src << 8);
}
@ -407,7 +404,7 @@ template<typename T> struct se_storage<T, 4>
{
using type = u32;
static constexpr u32 swap(u32 src)
[[deprecated]] static constexpr u32 swap(u32 src) // for reference
{
return (src >> 24) | (src << 24) | ((src >> 8) & 0x0000ff00) | ((src << 8) & 0x00ff0000);
}
@ -428,7 +425,7 @@ template<typename T> struct se_storage<T, 8>
{
using type = u64;
static constexpr u64 swap(u64 src)
[[deprecated]] static constexpr u64 swap(u64 src) // for reference
{
return (src >> 56) | (src << 56) |
((src >> 40) & 0x000000000000ff00) |
@ -491,7 +488,10 @@ template<typename T1, typename T2> struct se_convert
static struct se_raw_tag_t {} const se_raw{};
template<typename T, bool Se = true> class se_t
template<typename T, bool Se = true> class se_t;
// se_t with switched endianness
template<typename T> class se_t<T, true>
{
using type = std::remove_cv_t<T>;
using stype = se_storage_t<type>;
@ -506,16 +506,32 @@ template<typename T, bool Se = true> class se_t
static_assert(!std::is_enum<type>::value, "se_t<> error: invalid type (enumeration), use integral type instead");
static_assert(alignof(type) == alignof(stype), "se_t<> error: unexpected alignment");
template<typename T2, bool = std::is_integral<T2>::value> struct bool_converter
{
static inline bool to_bool(const se_t<T2>& value)
{
return static_cast<bool>(value.value());
}
};
template<typename T2> struct bool_converter<T2, true>
{
static inline bool to_bool(const se_t<T2>& value)
{
return value.m_data != 0;
}
};
public:
se_t() = default;
se_t(const se_t& right) = default;
template<typename CT, typename = std::enable_if_t<std::is_constructible<type, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value>> inline se_t(const CT& value)
inline se_t(type value)
: m_data(storage::to(value))
{
}
// construct directly from raw data (don't use)
inline se_t(const stype& raw_value, const se_raw_tag_t&)
: m_data(raw_value)
@ -528,29 +544,27 @@ public:
}
// access underlying raw data (don't use)
inline const stype& raw_data() const
inline const stype& raw_data() const noexcept
{
return m_data;
}
se_t& operator =(const se_t&) = default;
template<typename CT> inline std::enable_if_t<std::is_assignable<type&, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value, se_t&> operator =(const CT& value)
inline se_t& operator =(type value)
{
return m_data = storage::to(value), *this;
}
inline operator type() const
{
return value();
return storage::from(m_data);
}
// optimization
explicit inline operator bool() const
{
static_assert(std::is_convertible<T, bool>::value, "Illegal conversion to bool");
return m_data != 0;
return bool_converter<type>::to_bool(*this);
}
// optimization
@ -560,7 +574,7 @@ public:
}
// optimization
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator &=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator &=(CT right)
{
return m_data &= storage::to(right), *this;
}
@ -572,7 +586,7 @@ public:
}
// optimization
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator |=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator |=(CT right)
{
return m_data |= storage::to(right), *this;
}
@ -584,12 +598,13 @@ public:
}
// optimization
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator ^=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator ^=(CT right)
{
return m_data ^= storage::to(right), *this;
}
};
// se_t with native endianness
template<typename T> class se_t<T, false>
{
using type = std::remove_cv_t<T>;
@ -607,8 +622,8 @@ public:
se_t(const se_t&) = default;
template<typename CT, typename = std::enable_if_t<std::is_constructible<type, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value>> inline se_t(CT&& value)
: m_data(std::forward<CT>(value))
inline se_t(type value)
: m_data(value)
{
}
@ -619,32 +634,35 @@ public:
se_t& operator =(const se_t& value) = default;
template<typename CT> inline std::enable_if_t<std::is_assignable<type&, CT>::value && !std::is_same<se_t<type>, std::decay_t<CT>>::value, se_t&> operator =(const CT& value)
inline se_t& operator =(type value)
{
return m_data = value, *this;
}
inline operator type() const
{
return value();
return m_data;
}
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator &=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator &=(const CT& right)
{
return m_data &= right, *this;
}
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator |=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator |=(const CT& right)
{
return m_data |= right, *this;
}
template<typename CT> inline std::enable_if_t<std::is_integral<T>::value && std::is_convertible<CT, T>::value, se_t&> operator ^=(const CT& right)
template<typename CT> inline std::enable_if_t<IS_INTEGRAL(T) && std::is_convertible<CT, T>::value, se_t&> operator ^=(const CT& right)
{
return m_data ^= right, *this;
}
};
// se_t with native endianness (alias)
template<typename T> using nse_t = se_t<T, false>;
template<typename T, bool Se, typename T1> inline se_t<T, Se>& operator +=(se_t<T, Se>& left, const T1& right)
{
auto value = left.value();
@ -722,15 +740,15 @@ template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator ==(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2), bool> operator ==(const se_t<T1>& left, T2 right)
{
return left.raw_data() == se_storage<T2>::to(right);
return left.raw_data() == se_storage<T1>::to(right);
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator ==(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2), bool> operator ==(T1 left, const se_t<T2>& right)
{
return se_storage<T1>::to(left) == right.raw_data();
return se_storage<T2>::to(left) == right.raw_data();
}
// optimization
@ -740,75 +758,75 @@ template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator !=(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2), bool> operator !=(const se_t<T1>& left, T2 right)
{
return left.raw_data() != se_storage<T2>::to(right);
return left.raw_data() != se_storage<T1>::to(right);
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2), bool> operator !=(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2), bool> operator !=(T1 left, const se_t<T2>& right)
{
return se_storage<T1>::to(left) != right.raw_data();
return se_storage<T2>::to(left) != right.raw_data();
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const se_t<T1>& left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() & right.raw_data()), se_raw };
return{ left.raw_data() & right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const se_t<T1>& left, T2 right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() & se_storage<T2>::to(right)), se_raw };
return{ left.raw_data() & se_storage<T1>::to(right), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() & T2())>> operator &(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2) && sizeof(T2) >= 4, se_t<decltype(T1() & T2())>> operator &(T1 left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(se_storage<T1>::to(left) & right.raw_data()), se_raw };
return{ se_storage<T2>::to(left) & right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const se_t<T1>& left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() | right.raw_data()), se_raw };
return{ left.raw_data() | right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const se_t<T1>& left, T2 right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() | se_storage<T2>::to(right)), se_raw };
return{ left.raw_data() | se_storage<T1>::to(right), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() | T2())>> operator |(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2) && sizeof(T2) >= 4, se_t<decltype(T1() | T2())>> operator |(T1 left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(se_storage<T1>::to(left) | right.raw_data()), se_raw };
return{ se_storage<T2>::to(left) | right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const se_t<T1>& left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() ^ right.raw_data()), se_raw };
return{ left.raw_data() ^ right.raw_data(), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const se_t<T1>& left, const T2& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGRAL(T1) && IS_INTEGER(T2) && sizeof(T1) >= sizeof(T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const se_t<T1>& left, T2 right)
{
return{ static_cast<se_storage_t<T1>>(left.raw_data() ^ se_storage<T2>::to(right)), se_raw };
return{ left.raw_data() ^ se_storage<T1>::to(right), se_raw };
}
// optimization
template<typename T1, typename T2> inline std::enable_if_t<IS_BINARY_COMPARABLE(T1, T2) && sizeof(T1) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(const T1& left, const se_t<T2>& right)
template<typename T1, typename T2> inline std::enable_if_t<IS_INTEGER(T1) && IS_INTEGRAL(T2) && sizeof(T1) <= sizeof(T2) && sizeof(T2) >= 4, se_t<decltype(T1() ^ T2())>> operator ^(T1 left, const se_t<T2>& right)
{
return{ static_cast<se_storage_t<T1>>(se_storage<T1>::to(left) ^ right.raw_data()), se_raw };
return{ se_storage<T2>::to(left) ^ right.raw_data(), se_raw };
}
// optimization
template<typename T> inline std::enable_if_t<IS_INTEGER(T) && sizeof(T) >= 4, se_t<decltype(~T())>> operator ~(const se_t<T>& right)
template<typename T> inline std::enable_if_t<IS_INTEGRAL(T) && sizeof(T) >= 4, se_t<decltype(~T())>> operator ~(const se_t<T>& right)
{
return{ static_cast<se_storage_t<T>>(~right.raw_data()), se_raw };
return{ ~right.raw_data(), se_raw };
}
#ifdef IS_LE_MACHINE

View File

@ -232,334 +232,6 @@ struct alignas(16) uint128_t
using __uint128_t = uint128_t;
#endif
// SFINAE Helper type
template<typename T, typename TT = void> using if_integral_t = std::enable_if_t<std::is_integral<T>::value || std::is_same<std::remove_cv_t<T>, __uint128_t>::value, TT>;
#if defined(__GNUG__)
template<typename T, typename T2> inline if_integral_t<T, T> sync_val_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_val_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline if_integral_t<T, bool> sync_bool_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
{
return __sync_bool_compare_and_swap(dest, comp, exch);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_lock_test_and_set(volatile T* dest, T2 value)
{
return __sync_lock_test_and_set(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_add(volatile T* dest, T2 value)
{
return __sync_fetch_and_add(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_sub(volatile T* dest, T2 value)
{
return __sync_fetch_and_sub(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_or(volatile T* dest, T2 value)
{
return __sync_fetch_and_or(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_and(volatile T* dest, T2 value)
{
return __sync_fetch_and_and(dest, value);
}
template<typename T, typename T2> inline if_integral_t<T, T> sync_fetch_and_xor(volatile T* dest, T2 value)
{
return __sync_fetch_and_xor(dest, value);
}
#endif /* __GNUG__ */
#if defined(_MSC_VER)
// atomic compare and swap functions
inline uint8_t sync_val_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
{
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
}
inline uint16_t sync_val_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
{
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
}
inline uint32_t sync_val_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
{
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
}
inline uint64_t sync_val_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
{
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
}
inline uint128_t sync_val_compare_and_swap(volatile uint128_t* dest, uint128_t comp, uint128_t exch)
{
_InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp);
return comp;
}
inline bool sync_bool_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
{
return (uint8_t)_InterlockedCompareExchange8((volatile char*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
{
return (uint16_t)_InterlockedCompareExchange16((volatile short*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
{
return (uint32_t)_InterlockedCompareExchange((volatile long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
{
return (uint64_t)_InterlockedCompareExchange64((volatile long long*)dest, exch, comp) == comp;
}
inline bool sync_bool_compare_and_swap(volatile uint128_t* dest, uint128_t comp, uint128_t exch)
{
return _InterlockedCompareExchange128((volatile long long*)dest, exch.hi, exch.lo, (long long*)&comp) != 0;
}
// atomic exchange functions
inline uint8_t sync_lock_test_and_set(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchange8((volatile char*)dest, value);
}
inline uint16_t sync_lock_test_and_set(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchange16((volatile short*)dest, value);
}
inline uint32_t sync_lock_test_and_set(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchange((volatile long*)dest, value);
}
inline uint64_t sync_lock_test_and_set(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchange64((volatile long long*)dest, value);
}
inline uint128_t sync_lock_test_and_set(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
if (sync_bool_compare_and_swap(dest, old, value)) return old;
}
}
// atomic add functions
inline uint8_t sync_fetch_and_add(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_add(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_add(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchangeAdd((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_add(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_add(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo + value.lo;
_new.hi = old.hi + value.hi + (_new.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic sub functions
inline uint8_t sync_fetch_and_sub(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedExchangeAdd8((volatile char*)dest, -(char)value);
}
inline uint16_t sync_fetch_and_sub(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchangeAdd16((volatile short*)dest, -(short)value);
}
inline uint32_t sync_fetch_and_sub(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchangeAdd((volatile long*)dest, -(long)value);
}
inline uint64_t sync_fetch_and_sub(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedExchangeAdd64((volatile long long*)dest, -(long long)value);
}
inline uint128_t sync_fetch_and_sub(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo - value.lo;
_new.hi = old.hi - value.hi - (old.lo < value.lo);
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise or` functions
inline uint8_t sync_fetch_and_or(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedOr8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_or(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedOr16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_or(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedOr((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_or(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedOr64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_or(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo | value.lo;
_new.hi = old.hi | value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise and` functions
inline uint8_t sync_fetch_and_and(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedAnd8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_and(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedAnd16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_and(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedAnd((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_and(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedAnd64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_and(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo & value.lo;
_new.hi = old.hi & value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
// atomic `bitwise xor` functions
inline uint8_t sync_fetch_and_xor(volatile uint8_t* dest, uint8_t value)
{
return _InterlockedXor8((volatile char*)dest, value);
}
inline uint16_t sync_fetch_and_xor(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedXor16((volatile short*)dest, value);
}
inline uint32_t sync_fetch_and_xor(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedXor((volatile long*)dest, value);
}
inline uint64_t sync_fetch_and_xor(volatile uint64_t* dest, uint64_t value)
{
return _InterlockedXor64((volatile long long*)dest, value);
}
inline uint128_t sync_fetch_and_xor(volatile uint128_t* dest, uint128_t value)
{
while (true)
{
uint128_t old;
old.lo = dest->lo;
old.hi = dest->hi;
uint128_t _new;
_new.lo = old.lo ^ value.lo;
_new.hi = old.hi ^ value.hi;
if (sync_bool_compare_and_swap(dest, old, _new)) return old;
}
}
#endif /* _MSC_VER */
inline uint32_t cntlz32(uint32_t arg)
{
#if defined(_MSC_VER)

View File

@ -22,7 +22,7 @@ public:
const u32 max_value;
semaphore_t(u32 max_value = 1, u32 value = 0)
: m_var({ value, 0 })
: m_var(sync_var_t{ value, 0 })
, max_value(max_value)
{
}

152
Utilities/SharedMutex.cpp Normal file
View File

@ -0,0 +1,152 @@
#include "stdafx.h"
#include "SharedMutex.h"
static const u32 MAX_READERS = 0x7fffffff; // 2^31-1
inline bool shared_mutex_t::try_lock_shared()
{
return m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.readers < MAX_READERS && !info.writers && !info.waiting_readers && !info.waiting_writers)
{
info.readers++;
return true;
}
return false;
});
}
void shared_mutex_t::lock_shared()
{
if (!try_lock_shared())
{
std::unique_lock<std::mutex> lock(m_mutex);
m_wrcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.waiting_readers < UINT16_MAX)
{
info.waiting_readers++;
return true;
}
return false;
})));
m_rcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (!info.writers && !info.waiting_writers && info.readers < MAX_READERS)
{
info.readers++;
return true;
}
return false;
})));
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.waiting_readers--)
{
throw EXCEPTION("Invalid value");
}
});
if (info.waiting_readers == UINT16_MAX)
{
m_wrcv.notify_one();
}
}
}
void shared_mutex_t::unlock_shared()
{
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.readers--)
{
throw EXCEPTION("Not locked");
}
});
const bool notify_writers = info.readers == 1 && info.writers;
const bool notify_readers = info.readers == UINT32_MAX && info.waiting_readers;
if (notify_writers || notify_readers)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (notify_writers) m_wcv.notify_one();
if (notify_readers) m_rcv.notify_one();
}
}
inline bool shared_mutex_t::try_lock()
{
return m_info.compare_and_swap_test({ 0, 0, 0, 0 }, { 0, 1, 0, 0 });
}
void shared_mutex_t::lock()
{
if (!try_lock())
{
std::unique_lock<std::mutex> lock(m_mutex);
m_wwcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (info.waiting_writers < UINT16_MAX)
{
info.waiting_writers++;
return true;
}
return false;
})));
m_wcv.wait(lock, WRAP_EXPR(m_info.atomic_op([](ownership_info_t& info) -> bool
{
if (!info.writers)
{
info.writers++;
return true;
}
return false;
})));
m_wcv.wait(lock, WRAP_EXPR(m_info.load().readers == 0));
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.waiting_writers--)
{
throw EXCEPTION("Invalid value");
}
});
if (info.waiting_writers == UINT16_MAX)
{
m_wwcv.notify_one();
}
}
}
void shared_mutex_t::unlock()
{
const auto info = m_info.atomic_op([](ownership_info_t& info)
{
if (!info.writers--)
{
throw EXCEPTION("Not locked");
}
});
if (info.waiting_writers || info.waiting_readers)
{
std::lock_guard<std::mutex> lock(m_mutex);
if (info.waiting_writers) m_wcv.notify_one();
else if (info.waiting_readers) m_rcv.notify_all();
}
}

46
Utilities/SharedMutex.h Normal file
View File

@ -0,0 +1,46 @@
#pragma once
#include <shared_mutex>
// An attempt to create lock-free (in optimistic case) implementation similar to std::shared_mutex;
// MSVC implementation of std::shared_timed_mutex is not lock-free and thus may be slow, and std::shared_mutex is not available.
class shared_mutex_t
{
struct ownership_info_t
{
u32 readers : 31;
u32 writers : 1;
u16 waiting_readers;
u16 waiting_writers;
};
atomic_t<ownership_info_t> m_info{};
std::mutex m_mutex;
std::condition_variable m_rcv;
std::condition_variable m_wcv;
std::condition_variable m_wrcv;
std::condition_variable m_wwcv;
public:
shared_mutex_t() = default;
// Lock in shared mode
void lock_shared();
// Try to lock in shared mode
bool try_lock_shared();
// Unlock in shared mode
void unlock_shared();
// Lock exclusively
void lock();
// Try to lock exclusively
bool try_lock();
// Unlock exclusively
void unlock();
};

View File

@ -145,8 +145,7 @@ namespace fmt
std::string to_udec(u64 value);
std::string to_sdec(s64 value);
template<typename T, bool is_enum = std::is_enum<T>::value>
struct unveil
template<typename T, bool is_enum = std::is_enum<T>::value> struct unveil
{
using result_type = T;
@ -156,8 +155,7 @@ namespace fmt
}
};
template<>
struct unveil<char*, false>
template<> struct unveil<char*, false>
{
using result_type = const char*;
@ -167,8 +165,7 @@ namespace fmt
}
};
template<size_t N>
struct unveil<const char[N], false>
template<std::size_t N> struct unveil<const char[N], false>
{
using result_type = const char*;
@ -178,8 +175,7 @@ namespace fmt
}
};
template<>
struct unveil<std::string, false>
template<> struct unveil<std::string, false>
{
using result_type = const char*;
@ -189,8 +185,7 @@ namespace fmt
}
};
template<typename T>
struct unveil<T, true>
template<typename T> struct unveil<T, true>
{
using result_type = std::underlying_type_t<T>;
@ -200,25 +195,13 @@ namespace fmt
}
};
template<typename T>
struct unveil<be_t<T>, false>
template<typename T, bool Se> struct unveil<se_t<T, Se>, false>
{
using result_type = typename unveil<T>::result_type;
force_inline static result_type get_value(const be_t<T>& arg)
force_inline static result_type get_value(const se_t<T, Se>& arg)
{
return unveil<T>::get_value(arg.value());
}
};
template<typename T>
struct unveil<le_t<T>, false>
{
using result_type = typename unveil<T>::result_type;
force_inline static result_type get_value(const le_t<T>& arg)
{
return unveil<T>::get_value(arg.value());
return unveil<T>::get_value(arg);
}
};
@ -270,11 +253,11 @@ namespace fmt
}
}
struct exception
struct exception : public std::exception
{
std::unique_ptr<char[]> message;
template<typename... Args> never_inline safe_buffers exception(const char* file, int line, const char* func, const char* text, Args... args)
template<typename... Args> never_inline safe_buffers exception(const char* file, int line, const char* func, const char* text, Args... args) noexcept
{
const std::string data = format(text, args...) + format("\n(in file %s:%d, in function %s)", file, line, func);
@ -283,16 +266,16 @@ namespace fmt
std::memcpy(message.get(), data.c_str(), data.size() + 1);
}
exception(const exception& other)
exception(const exception& other) noexcept
{
const std::size_t size = std::strlen(other);
const std::size_t size = std::strlen(other.message.get());
message.reset(new char[size + 1]);
std::memcpy(message.get(), other, size + 1);
std::memcpy(message.get(), other.message.get(), size + 1);
}
operator const char*() const
virtual const char* what() const noexcept override
{
return message.get();
}

View File

@ -1121,6 +1121,8 @@ void _se_translator(unsigned int u, EXCEPTION_POINTERS* pExp)
{
throw EXCEPTION("Access violation %s location 0x%llx", is_writing ? "writing" : "reading", addr64);
}
//__int2c(); // if it crashed there, check the callstack for the actual source of the crash
}
const PVOID exception_handler = (atexit([]{ RemoveVectoredExceptionHandler(exception_handler); }), AddVectoredExceptionHandler(1, [](PEXCEPTION_POINTERS pExp) -> LONG
@ -1281,14 +1283,9 @@ void named_thread_t::start(std::function<std::string()> name, std::function<void
LOG_NOTICE(GENERAL, "Thread ended");
}
}
catch (const fmt::exception& e)
{
LOG_ERROR(GENERAL, "Exception: %s", e.message.get());
Emu.Pause();
}
catch (const std::exception& e)
{
LOG_ERROR(GENERAL, "STD Exception: %s", e.what());
LOG_ERROR(GENERAL, "Exception: %s", e.what());
Emu.Pause();
}
catch (EmulationStopped)

View File

@ -147,7 +147,7 @@ class squeue_t
public:
squeue_t()
: m_sync({})
: m_sync(squeue_sync_var_t{})
{
}
@ -156,9 +156,9 @@ public:
return sq_size;
}
bool is_full() const volatile
bool is_full() const
{
return m_sync.data.count == sq_size;
return m_sync.load().count == sq_size;
}
bool push(const T& data, const std::function<bool()>& test_exit)

View File

@ -28,7 +28,7 @@ void armv7_init_tls()
for (auto& v : g_armv7_tls_owners)
{
v.store(0, std::memory_order_relaxed);
v = 0;
}
}
@ -53,8 +53,8 @@ u32 armv7_get_tls(u32 thread)
if (g_armv7_tls_owners[i].compare_exchange_strong(old, thread))
{
const u32 addr = g_armv7_tls_start + i * Emu.GetTLSMemsz(); // get TLS address
memcpy(vm::get_ptr(addr), vm::get_ptr(Emu.GetTLSAddr()), Emu.GetTLSFilesz()); // initialize from TLS image
memset(vm::get_ptr(addr + Emu.GetTLSFilesz()), 0, Emu.GetTLSMemsz() - Emu.GetTLSFilesz()); // fill the rest with zeros
std::memcpy(vm::get_ptr(addr), vm::get_ptr(Emu.GetTLSAddr()), Emu.GetTLSFilesz()); // initialize from TLS image
std::memset(vm::get_ptr(addr + Emu.GetTLSFilesz()), 0, Emu.GetTLSMemsz() - Emu.GetTLSFilesz()); // fill the rest with zeros
return addr;
}
}
@ -195,15 +195,13 @@ void ARMv7Thread::task()
{
if (custom_task)
{
if (m_state.load() && check_status()) return;
if (check_status()) return;
return custom_task(*this);
}
while (true)
while (!m_state || !check_status())
{
if (m_state.load() && check_status()) break;
// decode instruction using specified decoder
PC += m_dec->DecodeMemory(PC);
}

View File

@ -116,12 +116,7 @@ s32 sceKernelExitDeleteThread(ARMv7Thread& context, s32 exitStatus)
context.stop();
// current thread should be deleted
const u32 id = context.get_id();
CallAfter([id]()
{
idm::remove<ARMv7Thread>(id);
});
idm::remove<ARMv7Thread>(context.get_id());
return SCE_OK;
}
@ -494,7 +489,7 @@ s32 sceKernelWaitEventFlag(ARMv7Thread& context, s32 evfId, u32 bitPattern, u32
if (passed >= timeout)
{
context.GPR[0] = SCE_KERNEL_ERROR_WAIT_TIMEOUT;
context.GPR[1] = evf->pattern.load();
context.GPR[1] = evf->pattern;
break;
}
@ -629,7 +624,7 @@ s32 sceKernelCancelEventFlag(s32 evfId, u32 setPattern, vm::ptr<s32> pNumWaitThr
*pNumWaitThreads = static_cast<u32>(evf->sq.size());
evf->pattern.store(setPattern);
evf->pattern = setPattern;
evf->sq.clear();
return SCE_OK;
@ -655,7 +650,7 @@ s32 sceKernelGetEventFlagInfo(s32 evfId, vm::ptr<SceKernelEventFlagInfo> pInfo)
pInfo->attr = evf->attr;
pInfo->initPattern = evf->init;
pInfo->currentPattern = evf->pattern.load();
pInfo->currentPattern = evf->pattern;
pInfo->numWaitThreads = static_cast<u32>(evf->sq.size());
return SCE_OK;

View File

@ -464,8 +464,8 @@ struct psv_event_flag_t
: name(name)
, attr(attr)
, init(pattern)
, pattern(pattern)
{
this->pattern.store(pattern);
}
// Wakeup all waiters to return SCE_KERNEL_ERROR_WAIT_DELETE
@ -473,7 +473,7 @@ struct psv_event_flag_t
{
std::lock_guard<std::mutex> lock(mutex);
const u32 pattern = this->pattern.load();
const u32 pattern = this->pattern;
for (auto& thread : sq)
{
@ -550,8 +550,8 @@ struct psv_semaphore_t
: name(name)
, attr(attr)
, max(max)
, count(count)
{
this->count.store(count);
}
};
@ -588,8 +588,8 @@ struct psv_mutex_t
psv_mutex_t(const char* name, u32 attr, s32 count)
: name(name)
, attr(attr)
, count(count)
{
this->count.store(count);
}
};

View File

@ -188,7 +188,7 @@ namespace sce_libc_func
sceLibc.Success("Process finished");
CallAfter([]()
Emu.CallAfter([]()
{
Emu.Stop();
});

View File

@ -4,20 +4,18 @@
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/DbgCommand.h"
#include "CPUDecoder.h"
#include "CPUThread.h"
CPUThread::CPUThread(CPUThreadType type, const std::string& name, std::function<std::string()> thread_name)
: m_state({ CPU_STATE_STOPPED })
, m_id(idm::get_last_id())
: m_id(idm::get_last_id())
, m_type(type)
, m_name(name)
{
start(std::move(thread_name), [this]
{
SendDbgCommand(DID_CREATE_THREAD, this);
Emu.SendDbgCommand(DID_CREATE_THREAD, this);
std::unique_lock<std::mutex> lock(mutex);
@ -71,12 +69,12 @@ CPUThread::CPUThread(CPUThreadType type, const std::string& name, std::function<
CPUThread::~CPUThread()
{
SendDbgCommand(DID_REMOVE_THREAD, this);
Emu.SendDbgCommand(DID_REMOVE_THREAD, this);
}
bool CPUThread::is_paused() const
{
return (m_state.load() & CPU_STATE_PAUSED) != 0 || Emu.IsPaused();
return (m_state & CPU_STATE_PAUSED) != 0 || Emu.IsPaused();
}
void CPUThread::dump_info() const
@ -89,27 +87,27 @@ void CPUThread::dump_info() const
void CPUThread::run()
{
SendDbgCommand(DID_START_THREAD, this);
Emu.SendDbgCommand(DID_START_THREAD, this);
init_stack();
init_regs();
do_run();
SendDbgCommand(DID_STARTED_THREAD, this);
Emu.SendDbgCommand(DID_STARTED_THREAD, this);
}
void CPUThread::pause()
{
SendDbgCommand(DID_PAUSE_THREAD, this);
Emu.SendDbgCommand(DID_PAUSE_THREAD, this);
m_state |= CPU_STATE_PAUSED;
SendDbgCommand(DID_PAUSED_THREAD, this);
Emu.SendDbgCommand(DID_PAUSED_THREAD, this);
}
void CPUThread::resume()
{
SendDbgCommand(DID_RESUME_THREAD, this);
Emu.SendDbgCommand(DID_RESUME_THREAD, this);
{
// lock for reliable notification
@ -120,12 +118,12 @@ void CPUThread::resume()
cv.notify_one();
}
SendDbgCommand(DID_RESUMED_THREAD, this);
Emu.SendDbgCommand(DID_RESUMED_THREAD, this);
}
void CPUThread::stop()
{
SendDbgCommand(DID_STOP_THREAD, this);
Emu.SendDbgCommand(DID_STOP_THREAD, this);
if (is_current())
{
@ -141,12 +139,12 @@ void CPUThread::stop()
cv.notify_one();
}
SendDbgCommand(DID_STOPED_THREAD, this);
Emu.SendDbgCommand(DID_STOPED_THREAD, this);
}
void CPUThread::exec()
{
SendDbgCommand(DID_EXEC_THREAD, this);
Emu.SendDbgCommand(DID_EXEC_THREAD, this);
{
// lock for reliable notification
@ -258,7 +256,7 @@ bool CPUThread::check_status()
{
CHECK_EMU_STATUS; // check at least once
if (!is_paused() && (m_state.load() & CPU_STATE_INTR) == 0)
if (!is_paused() && (m_state & CPU_STATE_INTR) == 0)
{
break;
}
@ -269,7 +267,7 @@ bool CPUThread::check_status()
continue;
}
if (!is_paused() && (m_state.load() & CPU_STATE_INTR) != 0 && handle_interrupt())
if (!is_paused() && (m_state & CPU_STATE_INTR) != 0 && handle_interrupt())
{
continue;
}
@ -277,12 +275,12 @@ bool CPUThread::check_status()
cv.wait(lock);
}
if (m_state.load() & CPU_STATE_RETURN || is_stopped())
if (m_state & CPU_STATE_RETURN || is_stopped())
{
return true;
}
if (m_state.load() & CPU_STATE_STEP)
if (m_state & CPU_STATE_STEP)
{
// set PAUSE, but allow to execute once
m_state |= CPU_STATE_PAUSED;

View File

@ -45,7 +45,7 @@ protected:
using named_thread_t::join;
using named_thread_t::joinable;
atomic_t<u64> m_state; // thread state flags
atomic_t<u64> m_state{ CPU_STATE_STOPPED }; // thread state flags
std::unique_ptr<CPUDecoder> m_dec;
@ -62,8 +62,8 @@ public:
CPUThreadType get_type() const { return m_type; }
std::string get_name() const { return m_name; }
bool is_alive() const { return (m_state.load() & CPU_STATE_DEAD) == 0; }
bool is_stopped() const { return (m_state.load() & CPU_STATE_STOPPED) != 0; }
bool is_alive() const { return (m_state & CPU_STATE_DEAD) == 0; }
bool is_stopped() const { return (m_state & CPU_STATE_STOPPED) != 0; }
virtual bool is_paused() const;
virtual void dump_info() const;

View File

@ -1,7 +1,6 @@
#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/DbgCommand.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUThread.h"
@ -82,7 +81,7 @@ std::shared_ptr<RawSPUThread> CPUThreadManager::NewRawSPUThread()
break;
}
}
return result;
}

View File

@ -293,7 +293,7 @@ void PPUThread::task()
{
while (true)
{
if (m_state.load() && check_status()) break;
if (m_state && check_status()) break;
// decode instruction using specified decoder
m_dec->DecodeMemory(PC);
@ -310,7 +310,7 @@ void PPUThread::task()
const auto func = exec_map[PC / 4];
// check status
if (!m_state.load())
if (!m_state)
{
// call interpreter function
func(*this, { vm::ps3::read32(PC) });

View File

@ -67,7 +67,7 @@ bool RawSPUThread::read_reg(const u32 addr, u32& value)
case SPU_Status_offs:
{
value = status.load();
value = status;
return true;
}
}
@ -201,7 +201,7 @@ bool RawSPUThread::write_reg(const u32 addr, const u32 value)
break;
}
run_ctrl.store(value);
run_ctrl = value;
return true;
}
@ -212,7 +212,7 @@ bool RawSPUThread::write_reg(const u32 addr, const u32 value)
break;
}
npc.store(value);
npc = value;
return true;
}
@ -245,5 +245,5 @@ void RawSPUThread::task()
SPUThread::task();
// save next PC and current SPU Interrupt status
npc.store(pc | u32{ (ch_event_stat.load() & SPU_EVENT_INTR_ENABLED) != 0 });
npc = pc | ((ch_event_stat & SPU_EVENT_INTR_ENABLED) != 0);
}

View File

@ -272,7 +272,7 @@ void spu_recompiler::InterpreterCall(spu_opcode_t op)
const u32 old_pc = _spu->pc;
if (_spu->m_state.load() && _spu->check_status())
if (_spu->m_state && _spu->check_status())
{
return 0x2000000 | _spu->pc;
}
@ -343,12 +343,12 @@ void spu_recompiler::FunctionCall()
LOG_ERROR(SPU, "Branch-to-self");
}
while (!_spu->m_state.load() || !_spu->check_status())
while (!_spu->m_state || !_spu->check_status())
{
// Call override function directly since the type is known
static_cast<SPURecompilerDecoder&>(*_spu->m_dec).DecodeMemory(_spu->offset + _spu->pc);
if (_spu->m_state.load() & CPU_STATE_RETURN)
if (_spu->m_state & CPU_STATE_RETURN)
{
break;
}

View File

@ -40,8 +40,6 @@ SPUDatabase::~SPUDatabase()
std::shared_ptr<spu_function_t> SPUDatabase::analyse(const be_t<u32>* ls, u32 entry, u32 max_limit)
{
std::lock_guard<std::mutex> lock(m_mutex);
// Check arguments (bounds and alignment)
if (max_limit > 0x40000 || entry >= max_limit || entry % 4 || max_limit % 4)
{
@ -51,7 +49,19 @@ std::shared_ptr<spu_function_t> SPUDatabase::analyse(const be_t<u32>* ls, u32 en
// Key for multimap
const u64 key = entry | u64{ ls[entry / 4] } << 32;
// Try to find existing function in the database
{
std::shared_lock<shared_mutex_t> lock(m_mutex);
// Try to find existing function in the database
if (auto func = find(ls + entry / 4, key, max_limit - entry))
{
return func;
}
}
std::lock_guard<shared_mutex_t> lock(m_mutex);
// Double-check
if (auto func = find(ls + entry / 4, key, max_limit - entry))
{
return func;

View File

@ -1,6 +1,7 @@
#pragma once
#include "Emu/Cell/SPUOpcodes.h"
#include "Utilities/SharedMutex.h"
class SPUThread;
@ -258,7 +259,7 @@ struct spu_function_t
// SPU Function Database (must be global or PS3 process-local)
class SPUDatabase final
{
std::mutex m_mutex;
shared_mutex_t m_mutex;
// All registered functions (uses addr and first instruction as a key)
std::unordered_multimap<u64, std::shared_ptr<spu_function_t>> m_db;

View File

@ -29,7 +29,7 @@ thread_local bool spu_channel_t::notification_required;
void spu_int_ctrl_t::set(u64 ints)
{
// leave only enabled interrupts
ints &= mask.load();
ints &= mask;
// notify if at least 1 bit was set
if (ints && ~stat._or(ints) & ints && tag)
@ -118,7 +118,7 @@ void SPUThread::task()
while (true)
{
if (!m_state.load())
if (!m_state)
{
// read opcode
const u32 opcode = base[pc / 4];
@ -146,7 +146,7 @@ void SPUThread::task()
return custom_task(*this);
}
while (!m_state.load() || !check_status())
while (!m_state || !check_status())
{
// decode instruction using specified decoder
pc += m_dec->DecodeMemory(pc + offset);
@ -162,32 +162,34 @@ void SPUThread::init_regs()
mfc_queue.clear();
ch_tag_mask = 0;
ch_tag_stat = {};
ch_stall_stat = {};
ch_atomic_stat = {};
ch_tag_stat.data.store({});
ch_stall_stat.data.store({});
ch_atomic_stat.data.store({});
ch_in_mbox.clear();
ch_out_mbox = {};
ch_out_intr_mbox = {};
ch_out_mbox.data.store({});
ch_out_intr_mbox.data.store({});
snr_config = 0;
ch_snr1 = {};
ch_snr2 = {};
ch_snr1.data.store({});
ch_snr2.data.store({});
ch_event_mask = {};
ch_event_stat = {};
ch_event_mask = 0;
ch_event_stat = 0;
last_raddr = 0;
ch_dec_start_timestamp = get_timebased_time(); // ???
ch_dec_value = 0;
run_ctrl = {};
status = {};
npc = {};
run_ctrl = 0;
status = 0;
npc = 0;
int_ctrl = {};
int_ctrl[0].clear();
int_ctrl[1].clear();
int_ctrl[2].clear();
gpr[1]._u32[3] = 0x3FFF0; // initial stack frame pointer
}
@ -511,7 +513,7 @@ u32 SPUThread::get_events(bool waiting)
// polling with atomically set/removed SPU_EVENT_WAITING flag
return ch_event_stat.atomic_op([this](u32& stat) -> u32
{
if (u32 res = stat & ch_event_mask.load())
if (u32 res = stat & ch_event_mask)
{
stat &= ~SPU_EVENT_WAITING;
return res;
@ -525,7 +527,7 @@ u32 SPUThread::get_events(bool waiting)
}
// simple polling
return ch_event_stat.load() & ch_event_mask.load();
return ch_event_stat & ch_event_mask;
}
void SPUThread::set_events(u32 mask)
@ -543,7 +545,7 @@ void SPUThread::set_events(u32 mask)
{
std::lock_guard<std::mutex> lock(mutex);
if (ch_event_stat.load() & SPU_EVENT_WAITING)
if (ch_event_stat & SPU_EVENT_WAITING)
{
cv.notify_one();
}
@ -555,7 +557,7 @@ void SPUThread::set_interrupt_status(bool enable)
if (enable)
{
// detect enabling interrupts with events masked
if (u32 mask = ch_event_mask.load())
if (u32 mask = ch_event_mask)
{
throw EXCEPTION("SPU Interrupts not implemented (mask=0x%x)", mask);
}
@ -710,7 +712,7 @@ u32 SPUThread::get_ch_value(u32 ch)
case SPU_RdEventMask:
{
return ch_event_mask.load();
return ch_event_mask;
}
case SPU_RdEventStat:
@ -723,7 +725,7 @@ u32 SPUThread::get_ch_value(u32 ch)
return res;
}
if (ch_event_mask.load() & SPU_EVENT_LR)
if (ch_event_mask & SPU_EVENT_LR)
{
// register waiter if polling reservation status is required
vm::wait_op(*this, last_raddr, 128, WRAP_EXPR(get_events(true) || is_stopped()));
@ -752,7 +754,7 @@ u32 SPUThread::get_ch_value(u32 ch)
{
// HACK: "Not isolated" status
// Return SPU Interrupt status in LSB
return (ch_event_stat.load() & SPU_EVENT_INTR_ENABLED) != 0;
return (ch_event_stat & SPU_EVENT_INTR_ENABLED) != 0;
}
}
@ -1120,7 +1122,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventMask:
{
// detect masking events with enabled interrupt status
if (value && ch_event_stat.load() & SPU_EVENT_INTR_ENABLED)
if (value && ch_event_stat & SPU_EVENT_INTR_ENABLED)
{
throw EXCEPTION("SPU Interrupts not implemented (mask=0x%x)", value);
}
@ -1131,7 +1133,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
break;
}
ch_event_mask.store(value);
ch_event_mask = value;
return;
}

View File

@ -147,13 +147,13 @@ struct spu_channel_t
u32 value;
};
atomic_t<sync_var_t> sync_var;
atomic_t<sync_var_t> data;
public:
// returns true on success
bool try_push(u32 value)
{
const auto old = sync_var.atomic_op([=](sync_var_t& data)
const auto old = data.atomic_op([=](sync_var_t& data)
{
if ((data.wait = data.count) == false)
{
@ -168,7 +168,7 @@ public:
// push performing bitwise OR with previous value, may require notification
void push_or(u32 value)
{
const auto old = sync_var.atomic_op([=](sync_var_t& data)
const auto old = data.atomic_op([=](sync_var_t& data)
{
data.count = true;
data.wait = false;
@ -181,7 +181,7 @@ public:
// push unconditionally (overwriting previous value), may require notification
void push(u32 value)
{
const auto old = sync_var.atomic_op([=](sync_var_t& data)
const auto old = data.atomic_op([=](sync_var_t& data)
{
data.count = true;
data.wait = false;
@ -194,7 +194,7 @@ public:
// returns true on success and loaded value
std::tuple<bool, u32> try_pop()
{
const auto old = sync_var.atomic_op([](sync_var_t& data)
const auto old = data.atomic_op([](sync_var_t& data)
{
data.wait = !data.count;
data.count = false;
@ -207,7 +207,7 @@ public:
// pop unconditionally (loading last value), may require notification
u32 pop()
{
const auto old = sync_var.atomic_op([](sync_var_t& data)
const auto old = data.atomic_op([](sync_var_t& data)
{
data.wait = false;
data.count = false;
@ -221,17 +221,17 @@ public:
void set_value(u32 value, bool count = true)
{
sync_var.store({ count, false, value });
data.store({ count, false, value });
}
u32 get_value() volatile
u32 get_value()
{
return sync_var.data.value;
return data.load().value;
}
u32 get_count() volatile
u32 get_count()
{
return sync_var.data.count;
return data.load().count;
}
};
@ -250,22 +250,22 @@ struct spu_channel_4_t
u32 value2;
};
atomic_t<sync_var_t> sync_var;
atomic_t<sync_var_t> values;
atomic_t<u32> value3;
public:
void clear()
{
sync_var = {};
value3 = {};
values = sync_var_t{};
value3 = 0;
}
// push unconditionally (overwriting latest value), returns true if needs signaling
bool push(u32 value)
{
value3.exchange(value);
value3 = value; _mm_sfence();
return sync_var.atomic_op([=](sync_var_t& data) -> bool
return values.atomic_op([=](sync_var_t& data) -> bool
{
switch (data.count++)
{
@ -289,7 +289,7 @@ public:
// returns true on success and two u32 values: data and count after removing the first element
std::tuple<bool, u32, u32> try_pop()
{
return sync_var.atomic_op([this](sync_var_t& data)
return values.atomic_op([this](sync_var_t& data)
{
const auto result = std::make_tuple(data.count != 0, u32{ data.value0 }, u32{ data.count - 1u });
@ -300,7 +300,8 @@ public:
data.value0 = data.value1;
data.value1 = data.value2;
data.value2 = value3.load_sync();
_mm_lfence();
data.value2 = this->value3;
}
else
{
@ -311,19 +312,15 @@ public:
});
}
u32 get_count() volatile
u32 get_count()
{
return sync_var.data.count;
return values.raw().count;
}
void set_values(u32 count, u32 value0, u32 value1 = 0, u32 value2 = 0, u32 value3 = 0)
{
sync_var.data.waiting = 0;
sync_var.data.count = count;
sync_var.data.value0 = value0;
sync_var.data.value1 = value1;
sync_var.data.value2 = value2;
this->value3.store(value3);
this->values.raw() = { 0, count, value0, value1, value2 };
this->value3 = value3;
}
};
@ -337,6 +334,13 @@ struct spu_int_ctrl_t
void set(u64 ints);
void clear(u64 ints);
void clear()
{
mask = 0;
stat = 0;
tag = nullptr;
}
};
struct spu_imm_table_t

View File

@ -1,14 +0,0 @@
#include "stdafx.h"
#include "DbgCommand.h"
SendDbgCommandCb SendDbgCommandFunc = nullptr;
void SendDbgCommand(DbgCommand id, CPUThread* t)
{
SendDbgCommandFunc(id, t);
}
void SetSendDbgCommandCallback(SendDbgCommandCb cb)
{
SendDbgCommandFunc = cb;
}

View File

@ -1,7 +1,5 @@
#pragma once
class CPUThread;
enum DbgCommand
{
DID_FIRST_COMMAND = 0x500,
@ -35,9 +33,3 @@ enum DbgCommand
DID_LAST_COMMAND,
};
typedef void(*SendDbgCommandCb)(DbgCommand id, CPUThread* t);
void SetSendDbgCommandCallback(SendDbgCommandCb value);
void SendDbgCommand(DbgCommand id, CPUThread* thr = nullptr);

View File

@ -1,35 +1,14 @@
#include "stdafx.h"
#include "IdManager.h"
namespace idm
{
std::mutex g_id_mutex;
std::mutex idm::g_mutex;
std::unordered_map<u32, id_data_t> g_id_map;
std::unordered_map<u32, id_data_t> idm::g_map;
thread_local u32 g_tls_last_id = 0xdeadbeef;
u32 idm::g_last_raw_id = 0;
u32 g_last_raw_id = 0;
thread_local u32 idm::g_tls_last_id = 0xdeadbeef;
void clear()
{
std::lock_guard<std::mutex> lock(g_id_mutex);
std::mutex fxm::g_mutex;
g_id_map.clear();
g_last_raw_id = 0;
}
}
namespace fxm
{
std::mutex g_fx_mutex;
std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
void clear()
{
std::lock_guard<std::mutex> lock(g_fx_mutex);
g_fx_map.clear();
}
}
std::unordered_map<const void*, std::shared_ptr<void>> fxm::g_map;

View File

@ -2,6 +2,15 @@
#define ID_MANAGER_INCLUDED
template<typename T> struct type_info_t { static char value; };
template<typename T> char type_info_t<T>::value = 42;
template<typename T> constexpr inline const void* get_type_index()
{
return &type_info_t<T>::value;
}
// default traits for any arbitrary type
template<typename T> struct id_traits
{
@ -15,24 +24,16 @@ template<typename T> struct id_traits
static u32 out_id(u32 raw_id) { return raw_id; }
};
class id_data_t final
struct id_data_t final
{
public:
const std::shared_ptr<void> data;
const std::type_info& info;
const std::size_t hash;
std::shared_ptr<void> data;
const std::type_info* info;
const void* type_index;
template<typename T> force_inline id_data_t(std::shared_ptr<T> data)
template<typename T> inline id_data_t(std::shared_ptr<T> data)
: data(std::move(data))
, info(typeid(T))
, hash(typeid(T).hash_code())
{
}
id_data_t(id_data_t&& right)
: data(std::move(const_cast<std::shared_ptr<void>&>(right.data)))
, info(right.info)
, hash(right.hash)
, info(&typeid(T))
, type_index(get_type_index<T>())
{
}
};
@ -43,64 +44,63 @@ public:
// 0x80000000+ : reserved (may be used through id_traits specializations)
namespace idm
{
// can be called from the constructor called through make() or make_ptr() to get the ID of currently created object
inline u32 get_last_id()
{
thread_local extern u32 g_tls_last_id;
extern std::mutex g_mutex;
extern std::unordered_map<u32, id_data_t> g_map;
extern u32 g_last_raw_id;
thread_local extern u32 g_tls_last_id;
// can be called from the constructor called through make() or make_ptr() to get the ID of the object being created
inline static u32 get_last_id()
{
return g_tls_last_id;
}
// reinitialize ID manager
void clear();
static void clear()
{
std::lock_guard<std::mutex> lock(g_mutex);
g_map.clear();
g_last_raw_id = 0;
}
// check if ID of specified type exists
template<typename T> bool check(u32 id)
template<typename T> static bool check(u32 id)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_mutex);
const auto found = g_map.find(id_traits<T>::in_id(id));
std::lock_guard<std::mutex> lock(g_id_mutex);
const auto found = g_id_map.find(id_traits<T>::in_id(id));
return found != g_id_map.end() && found->second.info == typeid(T);
return found != g_map.end() && found->second.type_index == get_type_index<T>();
}
// check if ID exists and return its type or nullptr
inline const std::type_info* get_type(u32 raw_id)
inline static const std::type_info* get_type(u32 raw_id)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
const auto found = g_map.find(raw_id);
const auto found = g_id_map.find(raw_id);
return found == g_id_map.end() ? nullptr : &found->second.info;
return found == g_map.end() ? nullptr : found->second.info;
}
// add new ID of specified type with specified constructor arguments (returns object or nullptr)
template<typename T, typename... Args> std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> make_ptr(Args&&... args)
template<typename T, typename... Args> static std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> make_ptr(Args&&... args)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
extern u32 g_last_raw_id;
thread_local extern u32 g_tls_last_id;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
u32 raw_id = g_last_raw_id;
while ((raw_id = id_traits<T>::next_id(raw_id)))
for (u32 raw_id = g_last_raw_id; (raw_id = id_traits<T>::next_id(raw_id)); /**/)
{
if (g_id_map.find(raw_id) != g_id_map.end()) continue;
if (g_map.find(raw_id) != g_map.end()) continue;
g_tls_last_id = id_traits<T>::out_id(raw_id);
auto ptr = std::make_shared<T>(std::forward<Args>(args)...);
g_id_map.emplace(raw_id, id_data_t(ptr));
g_map.emplace(raw_id, id_data_t(ptr));
if (raw_id < 0x80000000) g_last_raw_id = raw_id;
@ -111,24 +111,17 @@ namespace idm
}
// add new ID of specified type with specified constructor arguments (returns id)
template<typename T, typename... Args> std::enable_if_t<std::is_constructible<T, Args...>::value, u32> make(Args&&... args)
template<typename T, typename... Args> static std::enable_if_t<std::is_constructible<T, Args...>::value, u32> make(Args&&... args)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
extern u32 g_last_raw_id;
thread_local extern u32 g_tls_last_id;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
u32 raw_id = g_last_raw_id;
while ((raw_id = id_traits<T>::next_id(raw_id)))
for (u32 raw_id = g_last_raw_id; (raw_id = id_traits<T>::next_id(raw_id)); /**/)
{
if (g_id_map.find(raw_id) != g_id_map.end()) continue;
if (g_map.find(raw_id) != g_map.end()) continue;
g_tls_last_id = id_traits<T>::out_id(raw_id);
g_id_map.emplace(raw_id, id_data_t(std::make_shared<T>(std::forward<Args>(args)...)));
g_map.emplace(raw_id, id_data_t(std::make_shared<T>(std::forward<Args>(args)...)));
if (raw_id < 0x80000000) g_last_raw_id = raw_id;
@ -139,24 +132,17 @@ namespace idm
}
// add new ID for an existing object provided (don't use for initial object creation)
template<typename T> u32 import(const std::shared_ptr<T>& ptr)
template<typename T> static u32 import(const std::shared_ptr<T>& ptr)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
extern u32 g_last_raw_id;
thread_local extern u32 g_tls_last_id;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
u32 raw_id = g_last_raw_id;
while ((raw_id = id_traits<T>::next_id(raw_id)))
for (u32 raw_id = g_last_raw_id; (raw_id = id_traits<T>::next_id(raw_id)); /**/)
{
if (g_id_map.find(raw_id) != g_id_map.end()) continue;
if (g_map.find(raw_id) != g_map.end()) continue;
g_tls_last_id = id_traits<T>::out_id(raw_id);
g_id_map.emplace(raw_id, id_data_t(ptr));
g_map.emplace(raw_id, id_data_t(ptr));
if (raw_id < 0x80000000) g_last_raw_id = raw_id;
@ -167,16 +153,13 @@ namespace idm
}
// get ID of specified type
template<typename T> std::shared_ptr<T> get(u32 id)
template<typename T> static std::shared_ptr<T> get(u32 id)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
const auto found = g_map.find(id_traits<T>::in_id(id));
const auto found = g_id_map.find(id_traits<T>::in_id(id));
if (found == g_id_map.end() || found->second.info != typeid(T))
if (found == g_map.end() || found->second.type_index != get_type_index<T>())
{
return nullptr;
}
@ -185,20 +168,17 @@ namespace idm
}
// get all IDs of specified type T (unsorted)
template<typename T> std::vector<std::shared_ptr<T>> get_all()
template<typename T> static std::vector<std::shared_ptr<T>> get_all()
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_id_mutex);
std::lock_guard<std::mutex> lock(g_mutex);
std::vector<std::shared_ptr<T>> result;
const std::size_t hash = typeid(T).hash_code();
const auto type = get_type_index<T>();
for (auto& v : g_id_map)
for (auto& v : g_map)
{
if (v.second.hash == hash && v.second.info == typeid(T))
if (v.second.type_index == type)
{
result.emplace_back(std::static_pointer_cast<T>(v.second.data));
}
@ -208,61 +188,52 @@ namespace idm
}
// remove ID created with type T
template<typename T> bool remove(u32 id)
template<typename T> static bool remove(u32 id)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
const auto found = g_map.find(id_traits<T>::in_id(id));
const auto found = g_id_map.find(id_traits<T>::in_id(id));
if (found == g_id_map.end() || found->second.info != typeid(T))
if (found == g_map.end() || found->second.type_index != get_type_index<T>())
{
return false;
}
g_id_map.erase(found);
g_map.erase(found);
return true;
}
// remove ID created with type T and return the object
template<typename T> std::shared_ptr<T> withdraw(u32 id)
template<typename T> static std::shared_ptr<T> withdraw(u32 id)
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_id_mutex);
const auto found = g_map.find(id_traits<T>::in_id(id));
const auto found = g_id_map.find(id_traits<T>::in_id(id));
if (found == g_id_map.end() || found->second.info != typeid(T))
if (found == g_map.end() || found->second.type_index != get_type_index<T>())
{
return nullptr;
}
auto ptr = std::static_pointer_cast<T>(found->second.data);
g_id_map.erase(found);
g_map.erase(found);
return ptr;
}
template<typename T> u32 get_count()
template<typename T> static u32 get_count()
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_id_mutex);
std::lock_guard<std::mutex> lock(g_mutex);
u32 result = 0;
const std::size_t hash = typeid(T).hash_code();
const auto type = get_type_index<T>();
for (auto& v : g_id_map)
for (auto& v : g_map)
{
if (v.second.hash == hash && v.second.info == typeid(T))
if (v.second.type_index == type)
{
result++;
}
@ -272,20 +243,17 @@ namespace idm
}
// get sorted ID list of specified type
template<typename T> std::set<u32> get_set()
template<typename T> static std::set<u32> get_set()
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_id_mutex);
std::lock_guard<std::mutex> lock(g_mutex);
std::set<u32> result;
const std::size_t hash = typeid(T).hash_code();
const auto type = get_type_index<T>();
for (auto& v : g_id_map)
for (auto& v : g_map)
{
if (v.second.hash == hash && v.second.info == typeid(T))
if (v.second.type_index == type)
{
result.insert(id_traits<T>::out_id(v.first));
}
@ -295,20 +263,17 @@ namespace idm
}
// get sorted ID map (ID value -> ID data) of specified type
template<typename T> std::map<u32, std::shared_ptr<T>> get_map()
template<typename T> static std::map<u32, std::shared_ptr<T>> get_map()
{
extern std::mutex g_id_mutex;
extern std::unordered_map<u32, id_data_t> g_id_map;
std::lock_guard<std::mutex> lock(g_id_mutex);
std::lock_guard<std::mutex> lock(g_mutex);
std::map<u32, std::shared_ptr<T>> result;
const std::size_t hash = typeid(T).hash_code();
const auto type = get_type_index<T>();
for (auto& v : g_id_map)
for (auto& v : g_map)
{
if (v.second.hash == hash && v.second.info == typeid(T))
if (v.second.type_index == type)
{
result[id_traits<T>::out_id(v.first)] = std::static_pointer_cast<T>(v.second.data);
}
@ -316,69 +281,102 @@ namespace idm
return result;
}
}
};
// Fixed Object Manager
// allows to manage shared objects of any specified type, but only one object per type;
// object are deleted when the emulation is stopped
namespace fxm
{
extern std::mutex g_mutex;
extern std::unordered_map<const void*, std::shared_ptr<void>> g_map;
// reinitialize
void clear();
static void clear()
{
std::lock_guard<std::mutex> lock(g_mutex);
g_map.clear();
}
// add fixed object of specified type only if it doesn't exist (one unique object per type may exist)
template<typename T, typename... Args> std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> make(Args&&... args)
template<typename T, typename... Args> static std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> make(Args&&... args)
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_fx_mutex);
const auto index = get_type_index<T>();
const auto found = g_fx_map.find(typeid(T));
const auto found = g_map.find(index);
// only if object of this type doesn't exist
if (found == g_fx_map.end())
if (found == g_map.end())
{
auto ptr = std::make_shared<T>(std::forward<Args>(args)...);
g_fx_map.emplace(typeid(T), ptr);
g_map.emplace(index, ptr);
return std::move(ptr);
return ptr;
}
return nullptr;
}
// add fixed object of specified type, replacing previous one if it exists
template<typename T, typename... Args> std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> make_always(Args&&... args)
template<typename T, typename... Args> static std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> make_always(Args&&... args)
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_fx_mutex);
std::lock_guard<std::mutex> lock(g_mutex);
auto ptr = std::make_shared<T>(std::forward<Args>(args)...);
g_fx_map[typeid(T)] = ptr;
g_map[get_type_index<T>()] = ptr;
return ptr;
}
// import existing fixed object of specified type only if it doesn't exist (don't use)
template<typename T> static std::shared_ptr<T> import(std::shared_ptr<T>&& ptr)
{
std::lock_guard<std::mutex> lock(g_mutex);
const auto index = get_type_index<T>();
const auto found = g_map.find(index);
if (found == g_map.end())
{
g_map.emplace(index, ptr);
return ptr;
}
return nullptr;
}
// import existing fixed object of specified type, replacing previous one if it exists (don't use)
template<typename T> static std::shared_ptr<T> import_always(std::shared_ptr<T>&& ptr)
{
std::lock_guard<std::mutex> lock(g_mutex);
g_map[get_type_index<T>()] = ptr;
return ptr;
}
// get fixed object of specified type (always returns an object, it's created if it doesn't exist)
template<typename T, typename... Args> std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> get_always(Args&&... args)
template<typename T, typename... Args> static std::enable_if_t<std::is_constructible<T, Args...>::value, std::shared_ptr<T>> get_always(Args&&... args)
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_fx_mutex);
const auto index = get_type_index<T>();
const auto found = g_fx_map.find(typeid(T));
const auto found = g_map.find(index);
if (found == g_fx_map.end())
if (found == g_map.end())
{
auto ptr = std::make_shared<T>(std::forward<Args>(args)...);
g_fx_map[typeid(T)] = ptr;
g_map[index] = ptr;
return ptr;
}
@ -387,27 +385,21 @@ namespace fxm
}
// check whether the object exists
template<typename T> bool check()
template<typename T> static bool check()
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_fx_mutex);
return g_fx_map.find(typeid(T)) != g_fx_map.end();
return g_map.find(get_type_index<T>()) != g_map.end();
}
// get fixed object of specified type (returns nullptr if it doesn't exist)
template<typename T> std::shared_ptr<T> get()
template<typename T> static std::shared_ptr<T> get()
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_fx_mutex);
const auto found = g_map.find(get_type_index<T>());
const auto found = g_fx_map.find(typeid(T));
if (found == g_fx_map.end())
if (found == g_map.end())
{
return nullptr;
}
@ -416,40 +408,34 @@ namespace fxm
}
// remove fixed object created with type T
template<typename T> bool remove()
template<typename T> static bool remove()
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_fx_mutex);
const auto found = g_map.find(get_type_index<T>());
const auto found = g_fx_map.find(typeid(T));
if (found == g_fx_map.end())
if (found == g_map.end())
{
return false;
}
return g_fx_map.erase(found), true;
return g_map.erase(found), true;
}
// remove fixed object created with type T and return it
template<typename T> std::shared_ptr<T> withdraw()
template<typename T> static std::shared_ptr<T> withdraw()
{
extern std::mutex g_fx_mutex;
extern std::unordered_map<std::type_index, std::shared_ptr<void>> g_fx_map;
std::lock_guard<std::mutex> lock(g_mutex);
std::lock_guard<std::mutex> lock(g_fx_mutex);
const auto found = g_map.find(get_type_index<T>());
const auto found = g_fx_map.find(typeid(T));
if (found == g_fx_map.end())
if (found == g_map.end())
{
return nullptr;
}
auto ptr = std::static_pointer_cast<T>(std::move(found->second));
return g_fx_map.erase(found), ptr;
return g_map.erase(found), ptr;
}
}
};

View File

@ -1,60 +1,15 @@
#include "stdafx.h"
#include "rpcs3/Ini.h"
#include "Null/NullKeyboardHandler.h"
#include "Emu/System.h"
#include "Keyboard.h"
GetKeyboardHandlerCountCb GetKeyboardHandlerCount = []()
void KeyboardManager::Init(u32 max_connect)
{
return 1;
};
GetKeyboardHandlerCb GetKeyboardHandler = [](int i) -> KeyboardHandlerBase*
{
return new NullKeyboardHandler;
};
void SetGetKeyboardHandlerCountCallback(GetKeyboardHandlerCountCb cb)
{
GetKeyboardHandlerCount = cb;
}
void SetGetKeyboardHandlerCallback(GetKeyboardHandlerCb cb)
{
GetKeyboardHandler = cb;
}
KeyboardManager::KeyboardManager()
: m_keyboard_handler(nullptr)
, m_inited(false)
{
}
KeyboardManager::~KeyboardManager()
{
}
void KeyboardManager::Init(const u32 max_connect)
{
if(m_inited)
return;
// NOTE: Change these to std::make_unique assignments when C++14 comes out.
int numHandlers = GetKeyboardHandlerCount();
int selectedHandler = Ini.KeyboardHandlerMode.GetValue();
if (selectedHandler > numHandlers)
{
selectedHandler = 0;
}
m_keyboard_handler.reset(GetKeyboardHandler(selectedHandler));
m_keyboard_handler = Emu.GetCallbacks().get_kb_handler();
m_keyboard_handler->Init(max_connect);
m_inited = true;
}
void KeyboardManager::Close()
{
if(m_keyboard_handler) m_keyboard_handler->Close();
m_keyboard_handler = nullptr;
m_inited = false;
m_keyboard_handler.reset();
}

View File

@ -1,16 +1,13 @@
#pragma once
#include "KeyboardHandler.h"
class KeyboardManager
{
bool m_inited = false;
std::unique_ptr<KeyboardHandlerBase> m_keyboard_handler;
public:
KeyboardManager();
~KeyboardManager();
void Init(const u32 max_connect);
void Init(u32 max_connect);
void Close();
std::vector<Keyboard>& GetKeyboards() { return m_keyboard_handler->GetKeyboards(); }
@ -19,11 +16,5 @@ public:
KbData& GetData(const u32 keyboard) { return m_keyboard_handler->GetData(keyboard); }
KbConfig& GetConfig(const u32 keyboard) { return m_keyboard_handler->GetConfig(keyboard); }
bool IsInited() const { return m_inited; }
bool IsInited() const { return m_keyboard_handler.operator bool(); }
};
typedef int(*GetKeyboardHandlerCountCb)();
typedef KeyboardHandlerBase*(*GetKeyboardHandlerCb)(int i);
void SetGetKeyboardHandlerCountCallback(GetKeyboardHandlerCountCb cb);
void SetGetKeyboardHandlerCallback(GetKeyboardHandlerCb cb);

View File

@ -1,60 +1,15 @@
#include "stdafx.h"
#include "rpcs3/Ini.h"
#include "Null/NullMouseHandler.h"
#include "Emu/System.h"
#include "Mouse.h"
GetMouseHandlerCountCb GetMouseHandlerCount = []()
void MouseManager::Init(u32 max_connect)
{
return 1;
};
GetMouseHandlerCb GetMouseHandler = [](int i) -> MouseHandlerBase*
{
return new NullMouseHandler;
};
void SetGetMouseHandlerCountCallback(GetMouseHandlerCountCb cb)
{
GetMouseHandlerCount = cb;
}
void SetGetMouseHandlerCallback(GetMouseHandlerCb cb)
{
GetMouseHandler = cb;
}
MouseManager::MouseManager()
: m_mouse_handler(nullptr)
, m_inited(false)
{
}
MouseManager::~MouseManager()
{
}
void MouseManager::Init(const u32 max_connect)
{
if(m_inited)
return;
// NOTE: Change these to std::make_unique assignments when C++14 is available.
int numHandlers = GetMouseHandlerCount();
int selectedHandler = Ini.MouseHandlerMode.GetValue();
if (selectedHandler > numHandlers)
{
selectedHandler = 0;
}
m_mouse_handler.reset(GetMouseHandler(selectedHandler));
m_mouse_handler = Emu.GetCallbacks().get_mouse_handler();
m_mouse_handler->Init(max_connect);
m_inited = true;
}
void MouseManager::Close()
{
if(m_mouse_handler) m_mouse_handler->Close();
m_mouse_handler = nullptr;
m_inited = false;
m_mouse_handler.reset();
}

View File

@ -1,16 +1,13 @@
#pragma once
#include "MouseHandler.h"
class MouseManager
{
bool m_inited;
std::unique_ptr<MouseHandlerBase> m_mouse_handler;
public:
MouseManager();
~MouseManager();
void Init(const u32 max_connect);
void Init(u32 max_connect);
void Close();
std::vector<Mouse>& GetMice() { return m_mouse_handler->GetMice(); }
@ -18,11 +15,5 @@ public:
MouseData& GetData(const u32 mouse) { return m_mouse_handler->GetData(mouse); }
MouseRawData& GetRawData(const u32 mouse) { return m_mouse_handler->GetRawData(mouse); }
bool IsInited() const { return m_inited; }
bool IsInited() const { return m_mouse_handler.operator bool(); }
};
typedef int(*GetMouseHandlerCountCb)();
typedef MouseHandlerBase*(*GetMouseHandlerCb)(int i);
void SetGetMouseHandlerCountCallback(GetMouseHandlerCountCb cb);
void SetGetMouseHandlerCallback(GetMouseHandlerCb cb);

View File

@ -1,60 +1,15 @@
#include "stdafx.h"
#include "rpcs3/Ini.h"
#include "Null/NullPadHandler.h"
#include "Emu/System.h"
#include "Pad.h"
GetPadHandlerCountCb GetPadHandlerCount = []()
void PadManager::Init(u32 max_connect)
{
return 1;
};
GetPadHandlerCb GetPadHandler = [](int i) -> PadHandlerBase*
{
return new NullPadHandler;
};
void SetGetPadHandlerCountCallback(GetPadHandlerCountCb cb)
{
GetPadHandlerCount = cb;
}
void SetGetPadHandlerCallback(GetPadHandlerCb cb)
{
GetPadHandler = cb;
}
PadManager::PadManager()
: m_pad_handler(nullptr)
, m_inited(false)
{
}
PadManager::~PadManager()
{
}
void PadManager::Init(const u32 max_connect)
{
if(m_inited)
return;
// NOTE: Change these to std::make_unique assignments when C++14 is available.
int numHandlers = GetPadHandlerCount();
int selectedHandler = Ini.PadHandlerMode.GetValue();
if (selectedHandler > numHandlers)
{
selectedHandler = 0;
}
m_pad_handler.reset(GetPadHandler(selectedHandler));
m_pad_handler = Emu.GetCallbacks().get_pad_handler();
m_pad_handler->Init(max_connect);
m_inited = true;
}
void PadManager::Close()
{
if(m_pad_handler) m_pad_handler->Close();
m_pad_handler = nullptr;
m_inited = false;
}
m_pad_handler.reset();
}

View File

@ -1,27 +1,18 @@
#pragma once
#include "PadHandler.h"
class PadManager
{
bool m_inited;
std::unique_ptr<PadHandlerBase> m_pad_handler;
public:
PadManager();
~PadManager();
void Init(const u32 max_connect);
void Init(u32 max_connect);
void Close();
std::vector<Pad>& GetPads() { return m_pad_handler->GetPads(); }
PadInfo& GetInfo() { return m_pad_handler->GetInfo(); }
std::vector<Button>& GetButtons(const u32 pad) { return m_pad_handler->GetButtons(pad); }
bool IsInited() const { return m_inited; }
bool IsInited() const { return m_pad_handler.operator bool(); }
};
typedef int(*GetPadHandlerCountCb)();
typedef PadHandlerBase*(*GetPadHandlerCb)(int i);
void SetGetPadHandlerCountCallback(GetPadHandlerCountCb cb);
void SetGetPadHandlerCallback(GetPadHandlerCb cb);

View File

@ -1,355 +0,0 @@
#pragma once
template<typename T, size_t size = sizeof(T)> struct _to_atomic_subtype
{
static_assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 16, "Invalid atomic type");
};
template<typename T> struct _to_atomic_subtype<T, 1>
{
using type = u8;
};
template<typename T> struct _to_atomic_subtype<T, 2>
{
using type = u16;
};
template<typename T> struct _to_atomic_subtype<T, 4>
{
using type = u32;
};
template<typename T> struct _to_atomic_subtype<T, 8>
{
using type = u64;
};
template<typename T> struct _to_atomic_subtype<T, 16>
{
using type = u128;
};
template<typename T> using atomic_subtype_t = typename _to_atomic_subtype<T>::type;
// result wrapper to deal with void result type
template<typename T, typename RT, typename VT> struct atomic_op_result_t
{
RT result;
template<typename... Args> inline atomic_op_result_t(T func, VT& var, Args&&... args)
: result(std::move(func(var, std::forward<Args>(args)...)))
{
}
inline RT move()
{
return std::move(result);
}
};
// void specialization: result is the initial value of the first arg
template<typename T, typename VT> struct atomic_op_result_t<T, void, VT>
{
VT result;
template<typename... Args> inline atomic_op_result_t(T func, VT& var, Args&&... args)
: result(var)
{
func(var, std::forward<Args>(args)...);
}
inline VT move()
{
return std::move(result);
}
};
// member function specialization
template<typename CT, typename... FArgs, typename RT, typename VT> struct atomic_op_result_t<RT(CT::*)(FArgs...), RT, VT>
{
RT result;
template<typename... Args> inline atomic_op_result_t(RT(CT::*func)(FArgs...), VT& var, Args&&... args)
: result(std::move((var.*func)(std::forward<Args>(args)...)))
{
}
inline RT move()
{
return std::move(result);
}
};
// member function void specialization
template<typename CT, typename... FArgs, typename VT> struct atomic_op_result_t<void(CT::*)(FArgs...), void, VT>
{
VT result;
template<typename... Args> inline atomic_op_result_t(void(CT::*func)(FArgs...), VT& var, Args&&... args)
: result(var)
{
(var.*func)(std::forward<Args>(args)...);
}
inline VT move()
{
return std::move(result);
}
};
template<typename T> union _atomic_base
{
using type = std::remove_cv_t<T>;
using subtype = atomic_subtype_t<type>;
type data; // unsafe direct access
subtype sub_data; // unsafe direct access to substitute type
force_inline static const subtype to_subtype(const type& value)
{
return reinterpret_cast<const subtype&>(value);
}
force_inline static const type from_subtype(const subtype value)
{
return reinterpret_cast<const type&>(value);
}
force_inline static type& to_type(subtype& value)
{
return reinterpret_cast<type&>(value);
}
private:
template<typename T2> force_inline static void write_relaxed(volatile T2& data, const T2& value)
{
data = value;
}
force_inline static void write_relaxed(volatile u128& data, const u128& value)
{
sync_lock_test_and_set(&data, value);
}
template<typename T2> force_inline static T2 read_relaxed(const volatile T2& data)
{
return data;
}
force_inline static u128 read_relaxed(const volatile u128& value)
{
return sync_val_compare_and_swap(const_cast<volatile u128*>(&value), u128{0}, u128{0});
}
public:
// atomically compare data with cmp, replace with exch if equal, return previous data value anyway
force_inline const type compare_and_swap(const type& cmp, const type& exch) volatile
{
return from_subtype(sync_val_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch)));
}
// atomically compare data with cmp, replace with exch if equal, return true if data was replaced
force_inline bool compare_and_swap_test(const type& cmp, const type& exch) volatile
{
return sync_bool_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch));
}
// read data with memory barrier
force_inline const type load_sync() const volatile
{
const subtype zero = {};
return from_subtype(sync_val_compare_and_swap(const_cast<subtype*>(&sub_data), zero, zero));
}
// atomically replace data with exch, return previous data value
force_inline const type exchange(const type& exch) volatile
{
return from_subtype(sync_lock_test_and_set(&sub_data, to_subtype(exch)));
}
// read data without memory barrier (works as load_sync() for 128 bit)
force_inline const type load() const volatile
{
return from_subtype(read_relaxed(sub_data));
}
// write data without memory barrier (works as exchange() for 128 bit, discarding result)
force_inline void store(const type& value) volatile
{
write_relaxed(sub_data, to_subtype(value));
}
// perform an atomic operation on data (func is either pointer to member function or callable object with a T& first arg);
// returns the result of the callable object call or previous (old) value of the atomic variable if the return type is void
template<typename F, typename... Args, typename RT = std::result_of_t<F(T&, Args...)>> auto atomic_op(F func, Args&&... args) volatile -> decltype(atomic_op_result_t<F, RT, T>::result)
{
while (true)
{
// read the old value from memory
const subtype old = read_relaxed(sub_data);
// copy the old value
subtype _new = old;
// call atomic op for the local copy of the old value and save the return value of the function
atomic_op_result_t<F, RT, T> result(func, to_type(_new), args...);
// atomically compare value with `old`, replace with `_new` and return on success
if (sync_bool_compare_and_swap(&sub_data, old, _new)) return result.move();
}
}
// atomic bitwise OR, returns previous data
force_inline const type _or(const type& right) volatile
{
return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)));
}
// atomic bitwise AND, returns previous data
force_inline const type _and(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)));
}
// atomic bitwise AND NOT (inverts right argument), returns previous data
force_inline const type _and_not(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&sub_data, ~to_subtype(right)));
}
// atomic bitwise XOR, returns previous data
force_inline const type _xor(const type& right) volatile
{
return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)));
}
force_inline const type operator |=(const type& right) volatile
{
return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)) | to_subtype(right));
}
force_inline const type operator &=(const type& right) volatile
{
return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)) & to_subtype(right));
}
force_inline const type operator ^=(const type& right) volatile
{
return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)) ^ to_subtype(right));
}
};
template<typename T, typename = if_integral_t<T>> inline T operator ++(_atomic_base<T>& left)
{
return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1);
}
template<typename T, typename = if_integral_t<T>> inline T operator --(_atomic_base<T>& left)
{
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1);
}
template<typename T, typename = if_integral_t<T>> inline T operator ++(_atomic_base<T>& left, int)
{
return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1));
}
template<typename T, typename = if_integral_t<T>> inline T operator --(_atomic_base<T>& left, int)
{
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1));
}
template<typename T, typename T2, typename = if_integral_t<T>> inline auto operator +=(_atomic_base<T>& left, T2 right) -> decltype(std::declval<T>() + std::declval<T2>())
{
return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right);
}
template<typename T, typename T2, typename = if_integral_t<T>> inline auto operator -=(_atomic_base<T>& left, T2 right) -> decltype(std::declval<T>() - std::declval<T2>())
{
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right);
}
template<typename T, typename = if_integral_t<T>> inline le_t<T> operator ++(_atomic_base<le_t<T>>& left)
{
return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1);
}
template<typename T, typename = if_integral_t<T>> inline le_t<T> operator --(_atomic_base<le_t<T>>& left)
{
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1);
}
template<typename T, typename = if_integral_t<T>> inline le_t<T> operator ++(_atomic_base<le_t<T>>& left, int)
{
return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1));
}
template<typename T, typename = if_integral_t<T>> inline le_t<T> operator --(_atomic_base<le_t<T>>& left, int)
{
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1));
}
template<typename T, typename T2, typename = if_integral_t<T>> inline auto operator +=(_atomic_base<le_t<T>>& left, T2 right) -> decltype(std::declval<T>() + std::declval<T2>())
{
return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right);
}
template<typename T, typename T2, typename = if_integral_t<T>> inline auto operator -=(_atomic_base<le_t<T>>& left, T2 right) -> decltype(std::declval<T>() - std::declval<T2>())
{
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right);
}
template<typename T, typename = if_integral_t<T>> inline be_t<T> operator ++(_atomic_base<be_t<T>>& left)
{
return left.atomic_op([](be_t<T>& value) -> be_t<T>
{
return ++value;
});
}
template<typename T, typename = if_integral_t<T>> inline be_t<T> operator --(_atomic_base<be_t<T>>& left)
{
return left.atomic_op([](be_t<T>& value) -> be_t<T>
{
return --value;
});
}
template<typename T, typename = if_integral_t<T>> inline be_t<T> operator ++(_atomic_base<be_t<T>>& left, int)
{
return left.atomic_op([](be_t<T>& value) -> be_t<T>
{
return value++;
});
}
template<typename T, typename = if_integral_t<T>> inline be_t<T> operator --(_atomic_base<be_t<T>>& left, int)
{
return left.atomic_op([](be_t<T>& value) -> be_t<T>
{
return value--;
});
}
template<typename T, typename T2, typename = if_integral_t<T>> inline auto operator +=(_atomic_base<be_t<T>>& left, T2 right) -> be_t<decltype(std::declval<T>() + std::declval<T2>())>
{
return left.atomic_op([right](be_t<T>& value) -> be_t<T>
{
return value += right;
});
}
template<typename T, typename T2, typename = if_integral_t<T>> inline auto operator -=(_atomic_base<be_t<T>>& left, T2 right) -> be_t<decltype(std::declval<T>() - std::declval<T2>())>
{
return left.atomic_op([right](be_t<T>& value) -> be_t<T>
{
return value -= right;
});
}
template<typename T> using atomic_t = _atomic_base<T>; // Atomic Type with native endianness (for emulator memory)
template<typename T> using atomic_be_t = _atomic_base<to_be_t<T>>; // Atomic BE Type (for PS3 virtual memory)
template<typename T> using atomic_le_t = _atomic_base<to_le_t<T>>; // Atomic LE Type (for PSV virtual memory)

View File

@ -77,7 +77,7 @@ namespace vm
void* const g_base_addr = (atexit(finalize), initialize());
void* g_priv_addr;
std::array<atomic_t<u8>, 0x100000000ull / 4096> g_pages = {}; // information about every page
std::array<atomic_t<u8>, 0x100000000ull / 4096> g_pages{}; // information about every page
const thread_ctrl_t* const INVALID_THREAD = reinterpret_cast<const thread_ctrl_t*>(~0ull);
@ -85,16 +85,11 @@ namespace vm
class reservation_mutex_t
{
atomic_t<const thread_ctrl_t*> m_owner;
atomic_t<const thread_ctrl_t*> m_owner{ INVALID_THREAD };
std::condition_variable m_cv;
std::mutex m_mutex;
public:
reservation_mutex_t()
{
m_owner.store(INVALID_THREAD);
}
bool do_notify = false;
never_inline void lock()
@ -105,7 +100,7 @@ namespace vm
while (!m_owner.compare_and_swap_test(INVALID_THREAD, owner))
{
if (m_owner.load() == owner)
if (m_owner == owner)
{
throw EXCEPTION("Deadlock");
}
@ -423,7 +418,7 @@ namespace vm
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
}
const u8 flags = g_pages[addr >> 12].load();
const u8 flags = g_pages[addr >> 12];
if (!(flags & page_writable) || !(flags & page_allocated) || (flags & page_no_reservations))
{
@ -587,7 +582,7 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i].load())
if (g_pages[i])
{
throw EXCEPTION("Memory already mapped (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
}
@ -630,7 +625,7 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if ((g_pages[i].load() & flags_test) != (flags_test | page_allocated))
if ((g_pages[i] & flags_test) != (flags_test | page_allocated))
{
return false;
}
@ -677,7 +672,7 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (!(g_pages[i].load() & page_allocated))
if ((g_pages[i] & page_allocated) == 0)
{
throw EXCEPTION("Memory not mapped (addr=0x%x, size=0x%x, current_addr=0x%x)", addr, size, i * 4096);
}
@ -719,7 +714,7 @@ namespace vm
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
if ((g_pages[i].load() & page_allocated) != page_allocated)
if ((g_pages[i] & page_allocated) == 0)
{
return false;
}
@ -788,7 +783,7 @@ namespace vm
// check if memory area is already mapped
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
{
if (g_pages[i].load())
if (g_pages[i])
{
return false;
}
@ -862,7 +857,7 @@ namespace vm
return addr;
}
if (used.load() + size > this->size)
if (used + size > this->size)
{
return 0;
}
@ -941,7 +936,7 @@ namespace vm
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (g_pages[i].load())
if (g_pages[i])
{
throw EXCEPTION("Unexpected pages allocated (current_addr=0x%x)", i * 4096);
}

View File

@ -269,19 +269,11 @@ namespace vm
}
};
template<typename T> struct cast_ptr<be_t<T>>
template<typename T, bool Se> struct cast_ptr<se_t<T, Se>>
{
force_inline static u32 cast(const be_t<T>& addr, const char* file, int line, const char* func)
force_inline static u32 cast(const se_t<T, Se>& addr, const char* file, int line, const char* func)
{
return cast_ptr<T>::cast(addr.value(), file, line, func);
}
};
template<typename T> struct cast_ptr<le_t<T>>
{
force_inline static u32 cast(const le_t<T>& addr, const char* file, int line, const char* func)
{
return cast_ptr<T>::cast(addr.value(), file, line, func);
return cast_ptr<T>::cast(addr, file, line, func);
}
};

View File

@ -7,13 +7,6 @@
#include "Emu/System.h"
#include "GLGSRender.h"
GetGSFrameCb GetGSFrame = nullptr;
void SetGetGSFrameCallback(GetGSFrameCb value)
{
GetGSFrame = value;
}
#define CMD_DEBUG 0
#define DUMP_VERTEX_DATA 0
@ -789,7 +782,7 @@ GLGSRender::GLGSRender()
, m_vp_buf_num(-1)
, m_context(nullptr)
{
m_frame = GetGSFrame();
m_frame = Emu.GetCallbacks().get_gs_frame().release();
}
GLGSRender::~GLGSRender()

View File

@ -130,10 +130,6 @@ public:
};
typedef GSFrameBase*(*GetGSFrameCb)();
void SetGetGSFrameCallback(GetGSFrameCb value);
class GLGSRender final : public GSRender
{
private:

View File

@ -2531,8 +2531,8 @@ void RSXThread::Task()
inc = 1;
u32 put = m_ctrl->put.load();
u32 get = m_ctrl->get.load();
const be_t<u32> put = m_ctrl->put;
const be_t<u32> get = m_ctrl->get;
if (put == get || !Emu.IsRunning())
{

View File

@ -9,7 +9,7 @@ bool LogBase::CheckLogging() const
return Ini.HLELogging.GetValue() || m_logging;
}
void LogBase::LogOutput(LogType type, const std::string& text) const
void LogBase::LogOutput(LogType type, std::string text) const
{
switch (type)
{

View File

@ -14,13 +14,18 @@ class LogBase
LogTodo,
};
void LogOutput(LogType type, const std::string& text) const;
void LogOutput(LogType type, std::string text) const;
template<typename... Args> never_inline void LogPrepare(LogType type, const char* fmt, Args... args) const
template<typename... Args> never_inline safe_buffers void LogPrepare(LogType type, const char* fmt, Args... args) const
{
LogOutput(type, fmt::format(fmt, args...));
}
never_inline safe_buffers void LogPrepare(LogType type, const char* fmt) const
{
LogOutput(type, fmt);
}
public:
void SetLogging(bool value)
{

View File

@ -32,7 +32,7 @@ s32 cellAudioInit()
// clear ports
for (auto& port : g_audio.ports)
{
port.state.store(AUDIO_PORT_STATE_CLOSED);
port.state = AUDIO_PORT_STATE_CLOSED;
}
// reset variables
@ -90,7 +90,7 @@ s32 cellAudioInit()
bool opened = false;
float* buffer;
while (out_queue.pop(buffer, [](){ return g_audio.state.load() != AUDIO_STATE_INITIALIZED; }))
while (out_queue.pop(buffer, [](){ return g_audio.state != AUDIO_STATE_INITIALIZED; }))
{
if (use_u16)
{
@ -137,7 +137,7 @@ s32 cellAudioInit()
Emu.GetAudioManager().GetAudioOut().Quit();
});
while (g_audio.state.load() == AUDIO_STATE_INITIALIZED && !Emu.IsStopped())
while (g_audio.state == AUDIO_STATE_INITIALIZED && !Emu.IsStopped())
{
if (Emu.IsPaused())
{
@ -176,7 +176,7 @@ s32 cellAudioInit()
// mixing:
for (auto& port : g_audio.ports)
{
if (port.state.load() != AUDIO_PORT_STATE_STARTED) continue;
if (port.state != AUDIO_PORT_STATE_STARTED) continue;
const u32 block_size = port.channel * AUDIO_SAMPLES;
const u32 position = port.tag % port.block; // old value
@ -340,7 +340,7 @@ s32 cellAudioInit()
memset(out_buffer[out_pos].get(), 0, out_buffer_size * sizeof(float));
}
if (!out_queue.push(out_buffer[out_pos].get(), [](){ return g_audio.state.load() != AUDIO_STATE_INITIALIZED; }))
if (!out_queue.push(out_buffer[out_pos].get(), [](){ return g_audio.state != AUDIO_STATE_INITIALIZED; }))
{
break;
}
@ -356,7 +356,7 @@ s32 cellAudioInit()
{
AudioPortConfig& port = g_audio.ports[i];
if (port.state.load() != AUDIO_PORT_STATE_STARTED) continue;
if (port.state != AUDIO_PORT_STATE_STARTED) continue;
u32 position = port.tag % port.block; // old value
port.counter = g_audio.counter;
@ -428,7 +428,7 @@ s32 cellAudioPortOpen(vm::ptr<CellAudioPortParam> audioParam, vm::ptr<u32> portN
{
cellAudio.Warning("cellAudioPortOpen(audioParam=*0x%x, portNum=*0x%x)", audioParam, portNum);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -520,7 +520,7 @@ s32 cellAudioPortOpen(vm::ptr<CellAudioPortParam> audioParam, vm::ptr<u32> portN
port.level = 1.0f;
}
port.level_set.data = { port.level, 0.0f };
port.level_set.store({ port.level, 0.0f });
*portNum = port_index;
cellAudio.Warning("*** audio port opened(nChannel=%d, nBlock=%d, attr=0x%llx, level=%f): port = %d", channel, block, attr, port.level, port_index);
@ -532,7 +532,7 @@ s32 cellAudioGetPortConfig(u32 portNum, vm::ptr<CellAudioPortConfig> portConfig)
{
cellAudio.Warning("cellAudioGetPortConfig(portNum=%d, portConfig=*0x%x)", portNum, portConfig);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -565,7 +565,7 @@ s32 cellAudioPortStart(u32 portNum)
{
cellAudio.Warning("cellAudioPortStart(portNum=%d)", portNum);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -588,7 +588,7 @@ s32 cellAudioPortClose(u32 portNum)
{
cellAudio.Warning("cellAudioPortClose(portNum=%d)", portNum);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -611,7 +611,7 @@ s32 cellAudioPortStop(u32 portNum)
{
cellAudio.Warning("cellAudioPortStop(portNum=%d)", portNum);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -634,7 +634,7 @@ s32 cellAudioGetPortTimestamp(u32 portNum, u64 tag, vm::ptr<u64> stamp)
{
cellAudio.Log("cellAudioGetPortTimestamp(portNum=%d, tag=0x%llx, stamp=*0x%x)", portNum, tag, stamp);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -646,7 +646,7 @@ s32 cellAudioGetPortTimestamp(u32 portNum, u64 tag, vm::ptr<u64> stamp)
AudioPortConfig& port = g_audio.ports[portNum];
if (port.state.load() == AUDIO_PORT_STATE_CLOSED)
if (port.state == AUDIO_PORT_STATE_CLOSED)
{
return CELL_AUDIO_ERROR_PORT_NOT_OPEN;
}
@ -664,7 +664,7 @@ s32 cellAudioGetPortBlockTag(u32 portNum, u64 blockNo, vm::ptr<u64> tag)
{
cellAudio.Log("cellAudioGetPortBlockTag(portNum=%d, blockNo=0x%llx, tag=*0x%x)", portNum, blockNo, tag);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -676,7 +676,7 @@ s32 cellAudioGetPortBlockTag(u32 portNum, u64 blockNo, vm::ptr<u64> tag)
AudioPortConfig& port = g_audio.ports[portNum];
if (port.state.load() == AUDIO_PORT_STATE_CLOSED)
if (port.state == AUDIO_PORT_STATE_CLOSED)
{
return CELL_AUDIO_ERROR_PORT_NOT_OPEN;
}
@ -707,7 +707,7 @@ s32 cellAudioSetPortLevel(u32 portNum, float level)
{
cellAudio.Log("cellAudioSetPortLevel(portNum=%d, level=%f)", portNum, level);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -719,7 +719,7 @@ s32 cellAudioSetPortLevel(u32 portNum, float level)
AudioPortConfig& port = g_audio.ports[portNum];
if (port.state.load() == AUDIO_PORT_STATE_CLOSED)
if (port.state == AUDIO_PORT_STATE_CLOSED)
{
return CELL_AUDIO_ERROR_PORT_NOT_OPEN;
}
@ -775,7 +775,7 @@ s32 cellAudioSetNotifyEventQueue(u64 key)
{
cellAudio.Warning("cellAudioSetNotifyEventQueue(key=0x%llx)", key);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -808,7 +808,7 @@ s32 cellAudioRemoveNotifyEventQueue(u64 key)
{
cellAudio.Warning("cellAudioRemoveNotifyEventQueue(key=0x%llx)", key);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -841,7 +841,7 @@ s32 cellAudioAddData(u32 portNum, vm::ptr<float> src, u32 samples, float volume)
{
cellAudio.Log("cellAudioAddData(portNum=%d, src=*0x%x, samples=%d, volume=%f)", portNum, src, samples, volume);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -874,7 +874,7 @@ s32 cellAudioAdd2chData(u32 portNum, vm::ptr<float> src, u32 samples, float volu
{
cellAudio.Log("cellAudioAdd2chData(portNum=%d, src=*0x%x, samples=%d, volume=%f)", portNum, src, samples, volume);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -937,7 +937,7 @@ s32 cellAudioAdd6chData(u32 portNum, vm::ptr<float> src, float volume)
{
cellAudio.Log("cellAudioAdd6chData(portNum=%d, src=*0x%x, volume=%f)", portNum, src, volume);
if (g_audio.state.load() != AUDIO_STATE_INITIALIZED)
if (g_audio.state != AUDIO_STATE_INITIALIZED)
{
return CELL_AUDIO_ERROR_NOT_INIT;
}
@ -1003,7 +1003,7 @@ s32 cellAudioUnsetPersonalDevice(s32 iPersonalStream)
Module<> cellAudio("cellAudio", []()
{
g_audio.state.store(AUDIO_STATE_NOT_INITIALIZED);
g_audio.state = AUDIO_STATE_NOT_INITIALIZED;
g_audio.buffer = 0;
g_audio.indexes = 0;

View File

@ -397,7 +397,7 @@ s32 cellFsStReadGetRingBuf(u32 fd, vm::ptr<CellFsRingBuffer> ringbuf)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED)
if (file->st_status == SSS_NOT_INITIALIZED)
{
return CELL_FS_ENXIO;
}
@ -455,7 +455,7 @@ s32 cellFsStReadGetRegid(u32 fd, vm::ptr<u64> regid)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED)
if (file->st_status == SSS_NOT_INITIALIZED)
{
return CELL_FS_ENXIO;
}
@ -498,7 +498,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
{
std::unique_lock<std::mutex> lock(file->mutex);
while (file->st_status.load() == SSS_STARTED && !Emu.IsStopped())
while (file->st_status == SSS_STARTED && !Emu.IsStopped())
{
// check free space in buffer and available data in stream
if (file->st_total_read - file->st_copied <= file->st_ringbuf_size - file->st_block_size && file->st_total_read < file->st_read_size)
@ -518,11 +518,11 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
}
// check callback condition if set
if (file->st_callback.data.func)
if (file->st_callback.load().func)
{
const u64 available = file->st_total_read - file->st_copied;
if (available >= file->st_callback.data.size)
if (available >= file->st_callback.load().size)
{
const auto func = file->st_callback.exchange({}).func;
@ -540,7 +540,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
file->st_read_size = 0;
file->st_total_read = 0;
file->st_copied = 0;
file->st_callback.data = {};
file->st_callback.store({});
});
return CELL_OK;
@ -588,14 +588,14 @@ s32 cellFsStRead(u32 fd, vm::ptr<u8> buf, u64 size, vm::ptr<u64> rsize)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED || file->st_copyless)
if (file->st_status == SSS_NOT_INITIALIZED || file->st_copyless)
{
return CELL_FS_ENXIO;
}
const u64 copied = file->st_copied.load();
const u64 copied = file->st_copied;
const u32 position = VM_CAST(file->st_buffer + copied % file->st_ringbuf_size);
const u64 total_read = file->st_total_read.load();
const u64 total_read = file->st_total_read;
const u64 copy_size = (*rsize = std::min<u64>(size, total_read - copied)); // write rsize
// copy data
@ -622,14 +622,14 @@ s32 cellFsStReadGetCurrentAddr(u32 fd, vm::ptr<u32> addr, vm::ptr<u64> size)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED || !file->st_copyless)
if (file->st_status == SSS_NOT_INITIALIZED || !file->st_copyless)
{
return CELL_FS_ENXIO;
}
const u64 copied = file->st_copied.load();
const u64 copied = file->st_copied;
const u32 position = VM_CAST(file->st_buffer + copied % file->st_ringbuf_size);
const u64 total_read = file->st_total_read.load();
const u64 total_read = file->st_total_read;
if ((*size = std::min<u64>(file->st_ringbuf_size - (position - file->st_buffer), total_read - copied)))
{
@ -655,13 +655,13 @@ s32 cellFsStReadPutCurrentAddr(u32 fd, vm::ptr<u8> addr, u64 size)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED || !file->st_copyless)
if (file->st_status == SSS_NOT_INITIALIZED || !file->st_copyless)
{
return CELL_FS_ENXIO;
}
const u64 copied = file->st_copied.load();
const u64 total_read = file->st_total_read.load();
const u64 copied = file->st_copied;
const u64 total_read = file->st_total_read;
// notify
file->st_copied += size;
@ -682,7 +682,7 @@ s32 cellFsStReadWait(u32 fd, u64 size)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED)
if (file->st_status == SSS_NOT_INITIALIZED)
{
return CELL_FS_ENXIO;
}
@ -711,7 +711,7 @@ s32 cellFsStReadWaitCallback(u32 fd, u64 size, fs_st_cb_t func)
return CELL_FS_EBADF;
}
if (file->st_status.load() == SSS_NOT_INITIALIZED)
if (file->st_status == SSS_NOT_INITIALIZED)
{
return CELL_FS_ENXIO;
}

View File

@ -383,9 +383,9 @@ s32 _cellGcmInitBody(vm::pptr<CellGcmContextData> context, u32 cmdSize, u32 ioSi
context->set(gcm_info.context_addr);
auto& ctrl = vm::get_ref<CellGcmControl>(gcm_info.control_addr);
ctrl.put.store(0);
ctrl.get.store(0);
ctrl.ref.store(-1);
ctrl.put = 0;
ctrl.get = 0;
ctrl.ref = -1;
auto& render = Emu.GetGSManager().GetRender();
render.m_ctxt_addr = context.addr();

View File

@ -1,6 +1,7 @@
#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/SysCalls/Modules.h"
#include "Emu/SysCalls/Callback.h"
@ -9,18 +10,12 @@
extern Module<> cellSysutil;
extern u64 get_system_time();
std::unique_ptr<MsgDialogInstance> g_msg_dialog;
MsgDialogInstance::MsgDialogInstance()
void MsgDialogBase::Close(s32 status)
{
}
void MsgDialogInstance::Close()
{
state = msgDialogClose;
wait_until = get_system_time();
if (state.compare_and_swap_test(MsgDialogState::Open, MsgDialogState::Close))
{
on_close(status);
}
}
s32 cellMsgDialogOpen()
@ -86,19 +81,20 @@ s32 cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialog
default: return CELL_MSGDIALOG_ERROR_PARAM;
}
MsgDialogState old = msgDialogNone;
if (!g_msg_dialog->state.compare_exchange_strong(old, msgDialogInit))
const auto dlg = fxm::import<MsgDialogBase>(Emu.GetCallbacks().get_msg_dialog());
if (!dlg)
{
return CELL_SYSUTIL_ERROR_BUSY;
}
g_msg_dialog->wait_until = get_system_time() + 31536000000000ull; // some big value
dlg->type = type;
switch (type & CELL_MSGDIALOG_TYPE_PROGRESSBAR)
{
case CELL_MSGDIALOG_TYPE_PROGRESSBAR_DOUBLE: g_msg_dialog->progress_bar_count = 2; break;
case CELL_MSGDIALOG_TYPE_PROGRESSBAR_SINGLE: g_msg_dialog->progress_bar_count = 1; break;
default: g_msg_dialog->progress_bar_count = 0; break;
case CELL_MSGDIALOG_TYPE_PROGRESSBAR_DOUBLE: dlg->progress_bar_count = 2; break;
case CELL_MSGDIALOG_TYPE_PROGRESSBAR_SINGLE: dlg->progress_bar_count = 1; break;
default: dlg->progress_bar_count = 0; break;
}
switch (type & CELL_MSGDIALOG_TYPE_SE_MUTE) // TODO
@ -107,75 +103,34 @@ s32 cellMsgDialogOpen2(u32 type, vm::cptr<char> msgString, vm::ptr<CellMsgDialog
case CELL_MSGDIALOG_TYPE_SE_MUTE_ON: break;
}
std::string msg = msgString.get_ptr();
switch (type & CELL_MSGDIALOG_TYPE_SE_TYPE)
{
case CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL: cellSysutil.Warning("%s", msg); break;
case CELL_MSGDIALOG_TYPE_SE_TYPE_ERROR: cellSysutil.Error("%s", msg); break;
case CELL_MSGDIALOG_TYPE_SE_TYPE_NORMAL: cellSysutil.Warning(msgString.get_ptr()); break;
case CELL_MSGDIALOG_TYPE_SE_TYPE_ERROR: cellSysutil.Error(msgString.get_ptr()); break;
}
g_msg_dialog->status = CELL_MSGDIALOG_BUTTON_NONE;
dlg->callback = callback;
dlg->user_data = userData;
dlg->extra_param = extParam;
CallAfter([type, msg]()
dlg->on_close = [](s32 status)
{
if (Emu.IsStopped())
const auto dlg = fxm::get<MsgDialogBase>();
if (dlg->callback)
{
g_msg_dialog->state.exchange(msgDialogNone);
return;
}
g_msg_dialog->Create(type, msg);
g_msg_dialog->state.exchange(msgDialogOpen);
});
while (g_msg_dialog->state == msgDialogInit)
{
if (Emu.IsStopped())
{
if (g_msg_dialog->state != msgDialogNone)
Emu.GetCallbackManager().Register([func = dlg->callback, status, arg = dlg->user_data](CPUThread& cpu)->s32
{
break;
}
CHECK_EMU_STATUS;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
}
named_thread_t(WRAP_EXPR("MsgDialog Thread"), [=]()
{
while (g_msg_dialog->state == msgDialogOpen || (s64)(get_system_time() - g_msg_dialog->wait_until) < 0)
{
if (Emu.IsStopped())
{
g_msg_dialog->state = msgDialogAbort;
break;
}
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
}
if (callback && g_msg_dialog->state != msgDialogAbort)
{
const s32 status = g_msg_dialog->status;
Emu.GetCallbackManager().Register([=](CPUThread& cpu) -> s32
{
callback(static_cast<PPUThread&>(cpu), status, userData);
func(static_cast<PPUThread&>(cpu), status, arg);
return CELL_OK;
});
}
CallAfter([]()
{
g_msg_dialog->Destroy();
g_msg_dialog->state = msgDialogNone;
});
fxm::remove<MsgDialogBase>();
};
}).detach();
// call initialization asynchronously from the GUI thread, wait for the "result"
Emu.CallAfter(WRAP_EXPR(dlg->Create(type, msgString.get_ptr()))).get();
return CELL_OK;
}
@ -266,25 +221,35 @@ s32 cellMsgDialogOpenSimulViewWarning()
throw EXCEPTION("");
}
s32 cellMsgDialogClose(float delay)
s32 cellMsgDialogClose(f32 delay)
{
cellSysutil.Warning("cellMsgDialogClose(delay=%f)", delay);
MsgDialogState old = msgDialogOpen;
const auto dlg = fxm::get<MsgDialogBase>();
if (!g_msg_dialog->state.compare_exchange_strong(old, msgDialogClose))
if (!dlg)
{
if (old == msgDialogNone)
{
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
else
{
return CELL_SYSUTIL_ERROR_BUSY;
}
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
g_msg_dialog->wait_until = get_system_time() + static_cast<u64>(std::max<float>(delay, 0.0f) * 1000);
extern u64 get_system_time();
const u64 wait_until = get_system_time() + static_cast<u64>(std::max<float>(delay, 0.0f) * 1000);
named_thread_t(WRAP_EXPR("MsgDialog Thread"), [=]()
{
while (dlg->state == MsgDialogState::Open && get_system_time() < wait_until)
{
CHECK_EMU_STATUS;
std::this_thread::sleep_for(1ms);
}
Emu.CallAfter(COPY_EXPR(dlg->Destroy()));
dlg->Close(CELL_MSGDIALOG_BUTTON_NONE);
}).detach();
return CELL_OK;
}
@ -293,21 +258,25 @@ s32 cellMsgDialogAbort()
{
cellSysutil.Warning("cellMsgDialogAbort()");
MsgDialogState old = msgDialogOpen;
const auto dlg = fxm::get<MsgDialogBase>();
if (!g_msg_dialog->state.compare_exchange_strong(old, msgDialogAbort))
if (!dlg)
{
if (old == msgDialogNone)
{
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
else
{
return CELL_SYSUTIL_ERROR_BUSY;
}
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
g_msg_dialog->wait_until = get_system_time();
if (!dlg->state.compare_and_swap_test(MsgDialogState::Open, MsgDialogState::Abort))
{
return CELL_SYSUTIL_ERROR_BUSY;
}
if (!fxm::remove<MsgDialogBase>())
{
throw EXCEPTION("Failed to remove MsgDialog object");
}
// call finalization from the GUI thread
Emu.CallAfter(COPY_EXPR(dlg->Destroy()));
return CELL_OK;
}
@ -316,17 +285,22 @@ s32 cellMsgDialogProgressBarSetMsg(u32 progressBarIndex, vm::cptr<char> msgStrin
{
cellSysutil.Warning("cellMsgDialogProgressBarSetMsg(progressBarIndex=%d, msgString=*0x%x)", progressBarIndex, msgString);
if (g_msg_dialog->state != msgDialogOpen)
const auto dlg = fxm::get<MsgDialogBase>();
if (!dlg)
{
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
if (progressBarIndex >= g_msg_dialog->progress_bar_count)
if (progressBarIndex >= dlg->progress_bar_count)
{
return CELL_MSGDIALOG_ERROR_PARAM;
}
g_msg_dialog->ProgressBarSetMsg(progressBarIndex, msgString.get_ptr());
Emu.CallAfter([=, msg = std::string{ msgString.get_ptr() }]
{
dlg->ProgressBarSetMsg(progressBarIndex, msg);
});
return CELL_OK;
}
@ -335,17 +309,19 @@ s32 cellMsgDialogProgressBarReset(u32 progressBarIndex)
{
cellSysutil.Warning("cellMsgDialogProgressBarReset(progressBarIndex=%d)", progressBarIndex);
if (g_msg_dialog->state != msgDialogOpen)
const auto dlg = fxm::get<MsgDialogBase>();
if (!dlg)
{
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
if (progressBarIndex >= g_msg_dialog->progress_bar_count)
if (progressBarIndex >= dlg->progress_bar_count)
{
return CELL_MSGDIALOG_ERROR_PARAM;
}
g_msg_dialog->ProgressBarReset(progressBarIndex);
Emu.CallAfter(COPY_EXPR(dlg->ProgressBarReset(progressBarIndex)));
return CELL_OK;
}
@ -354,25 +330,25 @@ s32 cellMsgDialogProgressBarInc(u32 progressBarIndex, u32 delta)
{
cellSysutil.Warning("cellMsgDialogProgressBarInc(progressBarIndex=%d, delta=%d)", progressBarIndex, delta);
if (g_msg_dialog->state != msgDialogOpen)
const auto dlg = fxm::get<MsgDialogBase>();
if (!dlg)
{
return CELL_MSGDIALOG_ERROR_DIALOG_NOT_OPENED;
}
if (progressBarIndex >= g_msg_dialog->progress_bar_count)
if (progressBarIndex >= dlg->progress_bar_count)
{
return CELL_MSGDIALOG_ERROR_PARAM;
}
g_msg_dialog->ProgressBarInc(progressBarIndex, delta);
Emu.CallAfter(COPY_EXPR(dlg->ProgressBarInc(progressBarIndex, delta)));
return CELL_OK;
}
void cellSysutil_MsgDialog_init()
{
g_msg_dialog->state = msgDialogNone;
REG_FUNC(cellSysutil, cellMsgDialogOpen);
REG_FUNC(cellSysutil, cellMsgDialogOpen2);
REG_FUNC(cellSysutil, cellMsgDialogOpenErrorCode);

View File

@ -84,31 +84,33 @@ enum : s32
using CellMsgDialogCallback = void(s32 buttonType, vm::ptr<void> userData);
enum MsgDialogState
enum class MsgDialogState
{
msgDialogNone,
msgDialogInit,
msgDialogOpen,
msgDialogClose,
msgDialogAbort,
Open,
Abort,
Close,
};
struct MsgDialogInstance
class MsgDialogBase
{
std::atomic<MsgDialogState> state;
public:
atomic_t<MsgDialogState> state{ MsgDialogState::Open };
s32 status;
u64 wait_until;
u32 type;
u32 progress_bar_count;
MsgDialogInstance();
virtual ~MsgDialogInstance() = default;
vm::ptr<CellMsgDialogCallback> callback;
vm::ptr<void> user_data;
vm::ptr<void> extra_param;
virtual void Close();
std::function<void(s32 status)> on_close;
virtual void Create(u32 type, std::string msg) = 0;
void Close(s32 status);
virtual ~MsgDialogBase() = default;
virtual void Create(u32 type, const std::string& msg) = 0;
virtual void Destroy() = 0;
virtual void ProgressBarSetMsg(u32 progressBarIndex, std::string msg) = 0;
virtual void ProgressBarSetMsg(u32 progressBarIndex, const std::string& msg) = 0;
virtual void ProgressBarReset(u32 progressBarIndex) = 0;
virtual void ProgressBarInc(u32 progressBarIndex, u32 delta) = 0;
};

View File

@ -14,12 +14,6 @@ extern Module<> cellSysutil;
extern Module<> cellSaveData;
extern Module<> cellMinisSaveData;
std::unique_ptr<SaveDataDialogInstance> g_savedata_dialog;
SaveDataDialogInstance::SaveDataDialogInstance()
{
}
// cellSaveData aliases (only for cellSaveData.cpp)
using PSetList = vm::ptr<CellSaveDataSetList>;
using PSetBuf = vm::ptr<CellSaveDataSetBuf>;
@ -43,12 +37,13 @@ enum : u32
SAVEDATA_OP_FIXED_DELETE = 14,
};
std::mutex g_savedata_mutex;
never_inline s32 savedata_op(PPUThread& ppu, u32 operation, u32 version, vm::cptr<char> dirName, u32 errDialog, PSetList setList, PSetBuf setBuf, PFuncList funcList, PFuncFixed funcFixed, PFuncStat funcStat, PFuncFile funcFile, u32 container, u32 unknown, vm::ptr<void> userdata, u32 userId, PFuncDone funcDone)
{
// TODO: check arguments
// try to lock the mutex (not sure how it originally works; std::try_to_lock makes it non-blocking)
std::unique_lock<std::mutex> lock(g_savedata_dialog->mutex, std::try_to_lock);
std::unique_lock<std::mutex> lock(g_savedata_mutex, std::try_to_lock);
if (!lock)
{
@ -291,7 +286,7 @@ never_inline s32 savedata_op(PPUThread& ppu, u32 operation, u32 version, vm::cpt
}
// Display Save Data List
selected = g_savedata_dialog->ShowSaveDataList(save_entries, focused, listSet);
selected = Emu.GetCallbacks().get_save_dialog()->ShowSaveDataList(save_entries, focused, listSet);
if (selected == -1)
{

View File

@ -289,12 +289,10 @@ struct SaveDataEntry
bool isNew;
};
struct SaveDataDialogInstance
class SaveDialogBase
{
std::mutex mutex;
SaveDataDialogInstance();
virtual ~SaveDataDialogInstance() = default;
public:
virtual ~SaveDialogBase() = default;
virtual s32 ShowSaveDataList(std::vector<SaveDataEntry>& save_entries, s32 focused, vm::ptr<CellSaveDataListSet> listSet) = 0;
};

View File

@ -420,7 +420,7 @@ void spursHandlerWaitReady(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
{
CHECK_EMU_STATUS;
if (spurs->handlerExiting.load())
if (spurs->handlerExiting)
{
if (s32 rc = CALL_FUNC(ppu, sys_lwmutex_unlock, ppu, spurs.ptr(&CellSpurs::mutex)))
{
@ -431,20 +431,20 @@ void spursHandlerWaitReady(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
}
// Find a runnable workload
spurs->handlerDirty.store(0);
spurs->handlerDirty = 0;
if (spurs->exception == 0)
{
bool foundRunnableWorkload = false;
for (u32 i = 0; i < 16; i++)
{
if (spurs->wklState1[i].load() == SPURS_WKL_STATE_RUNNABLE &&
if (spurs->wklState1[i] == SPURS_WKL_STATE_RUNNABLE &&
*((u64*)spurs->wklInfo1[i].priority) != 0 &&
spurs->wklMaxContention[i].load() & 0x0F)
spurs->wklMaxContention[i] & 0x0F)
{
if (spurs->wklReadyCount1[i].load() ||
if (spurs->wklReadyCount1[i] ||
spurs->wklSignal1.load() & (0x8000u >> i) ||
(spurs->wklFlag.flag.load() == 0 &&
spurs->wklFlagReceiver.load() == (u8)i))
spurs->wklFlagReceiver == (u8)i))
{
foundRunnableWorkload = true;
break;
@ -456,14 +456,14 @@ void spursHandlerWaitReady(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
{
for (u32 i = 0; i < 16; i++)
{
if (spurs->wklState2[i].load() == SPURS_WKL_STATE_RUNNABLE &&
if (spurs->wklState2[i] == SPURS_WKL_STATE_RUNNABLE &&
*((u64*)spurs->wklInfo2[i].priority) != 0 &&
spurs->wklMaxContention[i].load() & 0xF0)
spurs->wklMaxContention[i] & 0xF0)
{
if (spurs->wklIdleSpuCountOrReadyCount2[i].load() ||
if (spurs->wklIdleSpuCountOrReadyCount2[i] ||
spurs->wklSignal2.load() & (0x8000u >> i) ||
(spurs->wklFlag.flag.load() == 0 &&
spurs->wklFlagReceiver.load() == (u8)i + 0x10))
spurs->wklFlagReceiver == (u8)i + 0x10))
{
foundRunnableWorkload = true;
break;
@ -479,8 +479,8 @@ void spursHandlerWaitReady(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
// If we reach it means there are no runnable workloads in this SPURS instance.
// Wait until some workload becomes ready.
spurs->handlerWaiting.store(1);
if (spurs->handlerDirty.load() == 0)
spurs->handlerWaiting = 1;
if (spurs->handlerDirty == 0)
{
if (s32 rc = sys_lwcond_wait(ppu, spurs.ptr(&CellSpurs::cond), 0))
{
@ -488,7 +488,7 @@ void spursHandlerWaitReady(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
}
}
spurs->handlerWaiting.store(0);
spurs->handlerWaiting = 0;
}
// If we reach here then a runnable workload was found
@ -534,7 +534,7 @@ void spursHandlerEntry(PPUThread& ppu)
if ((spurs->flags1 & SF1_EXIT_IF_NO_WORK) == 0)
{
if (spurs->handlerExiting.load() != 1)
if (spurs->handlerExiting != 1)
{
throw EXCEPTION("Unexpected handlerExiting value (false)");
}
@ -591,7 +591,7 @@ s32 spursWakeUpShutdownCompletionWaiter(PPUThread& ppu, vm::ptr<CellSpurs> spurs
return CELL_SPURS_POLICY_MODULE_ERROR_SRCH;
}
const u8 wklState = wid < CELL_SPURS_MAX_WORKLOAD ? spurs->wklState1[wid].load() : spurs->wklState2[wid & 0x0F].load();
const u8 wklState = wid < CELL_SPURS_MAX_WORKLOAD ? spurs->wklState1[wid] : spurs->wklState2[wid & 0x0F];
if (wklState != SPURS_WKL_STATE_REMOVABLE)
{
@ -1003,11 +1003,11 @@ s32 spursInit(
if (!isSecond)
{
spurs->wklEnabled.store(0xffff);
spurs->wklEnabled = 0xffff;
}
// Initialise trace
spurs->sysSrvTrace = {};
spurs->sysSrvTrace.store({});
for (u32 i = 0; i < 8; i++)
{
@ -1018,7 +1018,7 @@ s32 spursInit(
spurs->wklInfoSysSrv.addr.set(SPURS_IMG_ADDR_SYS_SRV_WORKLOAD);
spurs->wklInfoSysSrv.size = 0x2200;
spurs->wklInfoSysSrv.arg = 0;
spurs->wklInfoSysSrv.uniqueId.store(0xff);
spurs->wklInfoSysSrv.uniqueId = 0xff;
auto sys_semaphore_attribute_initialize = [](vm::ptr<sys_semaphore_attribute_t> attr)
{
@ -1196,11 +1196,11 @@ s32 spursInit(
}
spurs->flags1 = (flags & SAF_EXIT_IF_NO_WORK ? SF1_EXIT_IF_NO_WORK : 0) | (isSecond ? SF1_32_WORKLOADS : 0);
spurs->wklFlagReceiver.store(0xff);
spurs->wklFlag.flag.store(-1);
spurs->handlerDirty.store(0);
spurs->handlerWaiting.store(0);
spurs->handlerExiting.store(0);
spurs->wklFlagReceiver = 0xff;
spurs->wklFlag.flag = -1;
spurs->handlerDirty = 0;
spurs->handlerWaiting = 0;
spurs->handlerExiting = 0;
spurs->ppuPriority = ppuPriority;
// Create the SPURS event helper thread
@ -1547,7 +1547,7 @@ s32 cellSpursFinalize(vm::ptr<CellSpurs> spurs)
return CELL_SPURS_CORE_ERROR_ALIGN;
}
if (spurs->handlerExiting.load())
if (spurs->handlerExiting)
{
return CELL_SPURS_CORE_ERROR_STAT;
}
@ -1725,8 +1725,8 @@ s32 cellSpursSetPriorities(vm::ptr<CellSpurs> spurs, u32 wid, vm::cptr<u8> prior
const auto wklInfo = wid < CELL_SPURS_MAX_WORKLOAD ? &spurs->wklInfo1[wid] : &spurs->wklInfo2[wid];
*((be_t<u64>*)wklInfo->priority) = prio;
spurs->sysSrvMsgUpdateWorkload.store(0xFF);
spurs->sysSrvMessage.store(0xFF);
spurs->sysSrvMsgUpdateWorkload = 0xff;
spurs->sysSrvMessage = 0xff;
return CELL_OK;
}
@ -1875,7 +1875,7 @@ void spursTraceStatusUpdate(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
if (init)
{
spurs->sysSrvMessage.store(0xFF);
spurs->sysSrvMessage = 0xff;
if (s32 rc = sys_semaphore_wait(ppu, (u32)spurs->semPrv, 0))
{
@ -2209,9 +2209,9 @@ s32 spursAddWorkload(
{
assert((spurs->wklCurrentContention[wnum] & 0xf) == 0);
assert((spurs->wklPendingContention[wnum] & 0xf) == 0);
spurs->wklState1[wnum].store(1);
spurs->wklState1[wnum] = 1;
spurs->wklStatus1[wnum] = 0;
spurs->wklEvent1[wnum].store(0);
spurs->wklEvent1[wnum] = 0;
spurs->wklInfo1[wnum].addr = pm;
spurs->wklInfo1[wnum].arg = data;
spurs->wklInfo1[wnum].size = size;
@ -2235,19 +2235,19 @@ s32 spursAddWorkload(
if ((spurs->flags1 & SF1_32_WORKLOADS) == 0)
{
spurs->wklIdleSpuCountOrReadyCount2[wnum].store(0);
spurs->wklIdleSpuCountOrReadyCount2[wnum] = 0;
spurs->wklMinContention[wnum] = minContention > 8 ? 8 : minContention;
}
spurs->wklReadyCount1[wnum].store(0);
spurs->wklReadyCount1[wnum] = 0;
}
else
{
assert((spurs->wklCurrentContention[index] & 0xf0) == 0);
assert((spurs->wklPendingContention[index] & 0xf0) == 0);
spurs->wklState2[index].store(1);
spurs->wklState2[index] = 1;
spurs->wklStatus2[index] = 0;
spurs->wklEvent2[index].store(0);
spurs->wklEvent2[index] = 0;
spurs->wklInfo2[index].addr = pm;
spurs->wklInfo2[index].arg = data;
spurs->wklInfo2[index].size = size;
@ -2269,7 +2269,7 @@ s32 spursAddWorkload(
spurs->wklEvent2[index] |= 2;
}
spurs->wklIdleSpuCountOrReadyCount2[wnum].store(0);
spurs->wklIdleSpuCountOrReadyCount2[wnum] = 0;
}
if (wnum <= 15)
@ -2308,12 +2308,12 @@ s32 spursAddWorkload(
if (current->addr == wkl->addr)
{
// if a workload with identical policy module found
res_wkl = current->uniqueId.load();
res_wkl = current->uniqueId;
break;
}
else
{
k |= 0x80000000 >> current->uniqueId.load();
k |= 0x80000000 >> current->uniqueId;
res_wkl = cntlz32(~k);
}
}
@ -2405,7 +2405,7 @@ s32 cellSpursWakeUp(PPUThread& ppu, vm::ptr<CellSpurs> spurs)
spurs->handlerDirty.exchange(1);
if (spurs->handlerWaiting.load())
if (spurs->handlerWaiting)
{
spursSignalToHandlerThread(ppu, spurs);
}
@ -2443,7 +2443,7 @@ s32 cellSpursSendWorkloadSignal(vm::ptr<CellSpurs> spurs, u32 wid)
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
if (spurs->wklState(wid).load() != SPURS_WKL_STATE_RUNNABLE)
if (spurs->wklState(wid) != SPURS_WKL_STATE_RUNNABLE)
{
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
@ -2504,7 +2504,7 @@ s32 cellSpursReadyCountStore(vm::ptr<CellSpurs> spurs, u32 wid, u32 value)
return CELL_SPURS_POLICY_MODULE_ERROR_SRCH;
}
if (spurs->exception || spurs->wklState(wid).load() != 2)
if (spurs->exception || spurs->wklState(wid) != 2)
{
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
@ -2641,14 +2641,14 @@ s32 _cellSpursWorkloadFlagReceiver(vm::ptr<CellSpurs> spurs, u32 wid, u32 is_set
{
if (is_set)
{
if (spurs->wklFlagReceiver.load() != 0xff)
if (spurs->wklFlagReceiver != 0xff)
{
return CELL_SPURS_POLICY_MODULE_ERROR_BUSY;
}
}
else
{
if (spurs->wklFlagReceiver.load() != wid)
if (spurs->wklFlagReceiver != wid)
{
return CELL_SPURS_POLICY_MODULE_ERROR_PERM;
}
@ -2922,7 +2922,7 @@ s32 spursEventFlagWait(PPUThread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm
return CELL_SPURS_TASK_ERROR_STAT;
}
if (eventFlag->ctrl.data.ppuWaitMask || eventFlag->ctrl.data.ppuPendingRecv)
if (eventFlag->ctrl.raw().ppuWaitMask || eventFlag->ctrl.raw().ppuPendingRecv)
{
return CELL_SPURS_TASK_ERROR_BUSY;
}
@ -3041,11 +3041,11 @@ s32 spursEventFlagWait(PPUThread& ppu, vm::ptr<CellSpursEventFlag> eventFlag, vm
s32 i = 0;
if (eventFlag->direction == CELL_SPURS_EVENT_FLAG_ANY2ANY)
{
i = eventFlag->ctrl.data.ppuWaitSlotAndMode >> 4;
i = eventFlag->ctrl.raw().ppuWaitSlotAndMode >> 4;
}
*mask = eventFlag->pendingRecvTaskEvents[i];
eventFlag->ctrl.data.ppuPendingRecv = 0;
((CellSpursEventFlag::ControlSyncVar&)eventFlag->ctrl).ppuPendingRecv = 0;
}
*mask = receivedEvents;
@ -3179,7 +3179,7 @@ s32 cellSpursEventFlagDetachLv2EventQueue(vm::ptr<CellSpursEventFlag> eventFlag)
return CELL_SPURS_TASK_ERROR_STAT;
}
if (eventFlag->ctrl.data.ppuWaitMask || eventFlag->ctrl.data.ppuPendingRecv)
if (eventFlag->ctrl.raw().ppuWaitMask || eventFlag->ctrl.raw().ppuPendingRecv)
{
return CELL_SPURS_TASK_ERROR_BUSY;
}

View File

@ -444,7 +444,7 @@ struct alignas(128) CellSpurs
be_t<u32> x08; // 0x08
be_t<u32> x0C; // 0x0C
be_t<u64> eventPort; // 0x10
atomic_be_t<vm::ptr<EventHandlerListNode, u64>> handlerList; // 0x18
atomic_t<vm::bptr<EventHandlerListNode, u64>> handlerList; // 0x18
u8 x20[0x80 - 0x20]; // 0x20
};
@ -455,7 +455,7 @@ struct alignas(128) CellSpurs
vm::bcptr<void, u64> addr; // 0x00 Address of the executable
be_t<u64> arg; // 0x08 Argument
be_t<u32> size; // 0x10 Size of the executable
atomic_be_t<u8> uniqueId; // 0x14 Unique id of the workload. It is the same for all workloads with the same addr.
atomic_t<u8> uniqueId; // 0x14 Unique id of the workload. It is the same for all workloads with the same addr.
u8 pad[3];
u8 priority[8]; // 0x18 Priority of the workload on each SPU
};
@ -468,30 +468,30 @@ struct alignas(128) CellSpurs
vm::bcptr<char, u64> nameInstance;
};
atomic_be_t<u8> wklReadyCount1[0x10]; // 0x00 Number of SPUs requested by each workload (0..15 wids).
atomic_be_t<u8> wklIdleSpuCountOrReadyCount2[0x10]; // 0x10 SPURS1: Number of idle SPUs requested by each workload (0..15 wids). SPURS2: Number of SPUs requested by each workload (16..31 wids).
atomic_t<u8> wklReadyCount1[0x10]; // 0x00 Number of SPUs requested by each workload (0..15 wids).
atomic_t<u8> wklIdleSpuCountOrReadyCount2[0x10]; // 0x10 SPURS1: Number of idle SPUs requested by each workload (0..15 wids). SPURS2: Number of SPUs requested by each workload (16..31 wids).
u8 wklCurrentContention[0x10]; // 0x20 Number of SPUs used by each workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
u8 wklPendingContention[0x10]; // 0x30 Number of SPUs that are pending to context switch to the workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
u8 wklMinContention[0x10]; // 0x40 Min SPUs required for each workload. SPURS1: index = wid. SPURS2: Unused.
atomic_be_t<u8> wklMaxContention[0x10]; // 0x50 Max SPUs that may be allocated to each workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
atomic_t<u8> wklMaxContention[0x10]; // 0x50 Max SPUs that may be allocated to each workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
CellSpursWorkloadFlag wklFlag; // 0x60
atomic_be_t<u16> wklSignal1; // 0x70 Bitset for 0..15 wids
atomic_be_t<u8> sysSrvMessage; // 0x72
atomic_t<u8> sysSrvMessage; // 0x72
u8 spuIdling; // 0x73
u8 flags1; // 0x74 Type is SpursFlags1
u8 sysSrvTraceControl; // 0x75
u8 nSpus; // 0x76
atomic_be_t<u8> wklFlagReceiver; // 0x77
atomic_t<u8> wklFlagReceiver; // 0x77
atomic_be_t<u16> wklSignal2; // 0x78 Bitset for 16..32 wids
u8 x7A[6]; // 0x7A
atomic_be_t<u8> wklState1[0x10]; // 0x80 SPURS_WKL_STATE_*
atomic_t<u8> wklState1[0x10]; // 0x80 SPURS_WKL_STATE_*
u8 wklStatus1[0x10]; // 0x90
atomic_be_t<u8> wklEvent1[0x10]; // 0xA0
atomic_t<u8> wklEvent1[0x10]; // 0xA0
atomic_be_t<u32> wklEnabled; // 0xB0
atomic_be_t<u32> wklMskB; // 0xB4 - System service - Available module id
u32 xB8; // 0xB8
u8 sysSrvExitBarrier; // 0xBC
atomic_be_t<u8> sysSrvMsgUpdateWorkload; // 0xBD
atomic_t<u8> sysSrvMsgUpdateWorkload; // 0xBD
u8 xBE; // 0xBE
u8 sysSrvMsgTerminate; // 0xBF
u8 sysSrvPreemptWklId[8]; // 0xC0 Id of the workload that was preempted by the system workload on each SPU
@ -508,11 +508,11 @@ struct alignas(128) CellSpurs
u8 xCF;
};
atomic_be_t<SrvTraceSyncVar> sysSrvTrace; // 0xCC
atomic_t<SrvTraceSyncVar> sysSrvTrace; // 0xCC
atomic_be_t<u8> wklState2[0x10]; // 0xD0 SPURS_WKL_STATE_*
atomic_t<u8> wklState2[0x10]; // 0xD0 SPURS_WKL_STATE_*
u8 wklStatus2[0x10]; // 0xE0
atomic_be_t<u8> wklEvent2[0x10]; // 0xF0
atomic_t<u8> wklEvent2[0x10]; // 0xF0
_sub_str1 wklF1[0x10]; // 0x100
vm::bptr<CellSpursTraceInfo, u64> traceBuffer; // 0x900
be_t<u32> traceStartIndex[6]; // 0x908
@ -534,9 +534,9 @@ struct alignas(128) CellSpurs
u8 unknown3[0xD5C - 0xD54];
be_t<u32> eventQueue; // 0xD5C
be_t<u32> eventPort; // 0xD60
atomic_be_t<u8> handlerDirty; // 0xD64
atomic_be_t<u8> handlerWaiting; // 0xD65
atomic_be_t<u8> handlerExiting; // 0xD66
atomic_t<u8> handlerDirty; // 0xD64
atomic_t<u8> handlerWaiting; // 0xD65
atomic_t<u8> handlerExiting; // 0xD66
atomic_be_t<u32> enableEH; // 0xD68
be_t<u32> exception; // 0xD6C
sys_spu_image spuImg; // 0xD70
@ -562,7 +562,7 @@ struct alignas(128) CellSpurs
_sub_str4 wklH2[0x10]; // 0x1A00
u8 unknown_[0x2000 - 0x1B00];
force_inline atomic_be_t<u8>& wklState(const u32 wid)
force_inline atomic_t<u8>& wklState(const u32 wid)
{
if (wid & 0x10)
{
@ -622,7 +622,7 @@ struct alignas(128) CellSpursEventFlag
union
{
atomic_be_t<ControlSyncVar> ctrl; // 0x00
atomic_t<ControlSyncVar> ctrl; // 0x00
atomic_be_t<u16> events; // 0x00
};

View File

@ -194,11 +194,11 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
// The system service has the highest priority. Select the system service if
// the system service message bit for this SPU is set.
if (spurs->sysSrvMessage.load() & (1 << ctxt->spuNum)) {
if (spurs->sysSrvMessage & (1 << ctxt->spuNum)) {
ctxt->spuIdling = 0;
if (!isPoll || ctxt->wklCurrentId == CELL_SPURS_SYS_SERVICE_WORKLOAD_ID) {
// Clear the message bit
spurs->sysSrvMessage.store(spurs->sysSrvMessage.load() & ~(1 << ctxt->spuNum));
spurs->sysSrvMessage.raw() &= ~(1 << ctxt->spuNum);
}
} else {
// Caclulate the scheduling weight for each workload
@ -206,9 +206,9 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
for (auto i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
u16 runnable = ctxt->wklRunnable1 & (0x8000 >> i);
u16 wklSignal = spurs->wklSignal1.load() & (0x8000 >> i);
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver.load() == i ? 1 : 0 : 0;
u8 readyCount = spurs->wklReadyCount1[i].load() > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklReadyCount1[i].load();
u8 idleSpuCount = spurs->wklIdleSpuCountOrReadyCount2[i].load() > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklIdleSpuCountOrReadyCount2[i].load();
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver == i ? 1 : 0 : 0;
u8 readyCount = spurs->wklReadyCount1[i] > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklReadyCount1[i].load();
u8 idleSpuCount = spurs->wklIdleSpuCountOrReadyCount2[i] > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklIdleSpuCountOrReadyCount2[i].load();
u8 requestCount = readyCount + idleSpuCount;
// For a workload to be considered for scheduling:
@ -218,7 +218,7 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
// 4. The number of SPUs allocated to it must be less than the number of SPUs requested (i.e. readyCount)
// OR the workload must be signalled
// OR the workload flag is 0 and the workload is configured as the wokload flag receiver
if (runnable && ctxt->priority[i] != 0 && spurs->wklMaxContention[i].load() > contention[i]) {
if (runnable && ctxt->priority[i] != 0 && spurs->wklMaxContention[i] > contention[i]) {
if (wklFlag || wklSignal || (readyCount != 0 && requestCount > contention[i])) {
// The scheduling weight of the workload is formed from the following parameters in decreasing order of priority:
// 1. Wokload signal set or workload flag or ready count > contention
@ -253,12 +253,12 @@ bool spursKernel1SelectWorkload(SPUThread & spu) {
if (!isPoll || wklSelectedId == ctxt->wklCurrentId) {
// Clear workload signal for the selected workload
spurs->wklSignal1.store(spurs->wklSignal1.load() & ~(0x8000 >> wklSelectedId));
spurs->wklSignal2.store(spurs->wklSignal1.load() & ~(0x80000000u >> wklSelectedId));
spurs->wklSignal1.raw() &= ~(0x8000 >> wklSelectedId);
spurs->wklSignal2.raw() &= ~(0x80000000u >> wklSelectedId);
// If the selected workload is the wklFlag workload then pull the wklFlag to all 1s
if (wklSelectedId == spurs->wklFlagReceiver.load()) {
spurs->wklFlag.flag.store(0xFFFFFFFF);
if (wklSelectedId == spurs->wklFlagReceiver) {
spurs->wklFlag.flag = -1;
}
}
}
@ -353,12 +353,12 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
// The system service has the highest priority. Select the system service if
// the system service message bit for this SPU is set.
if (spurs->sysSrvMessage.load() & (1 << ctxt->spuNum)) {
if (spurs->sysSrvMessage & (1 << ctxt->spuNum)) {
// Not sure what this does. Possibly Mark the SPU as in use.
ctxt->spuIdling = 0;
if (!isPoll || ctxt->wklCurrentId == CELL_SPURS_SYS_SERVICE_WORKLOAD_ID) {
// Clear the message bit
spurs->sysSrvMessage.store(spurs->sysSrvMessage.load() & ~(1 << ctxt->spuNum));
spurs->sysSrvMessage.raw() &= ~(1 << ctxt->spuNum);
}
} else {
// Caclulate the scheduling weight for each workload
@ -367,10 +367,10 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
auto j = i & 0x0F;
u16 runnable = i < CELL_SPURS_MAX_WORKLOAD ? ctxt->wklRunnable1 & (0x8000 >> j) : ctxt->wklRunnable2 & (0x8000 >> j);
u8 priority = i < CELL_SPURS_MAX_WORKLOAD ? ctxt->priority[j] & 0x0F : ctxt->priority[j] >> 4;
u8 maxContention = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklMaxContention[j].load() & 0x0F : spurs->wklMaxContention[j].load() >> 4;
u8 maxContention = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklMaxContention[j] & 0x0F : spurs->wklMaxContention[j] >> 4;
u16 wklSignal = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklSignal1.load() & (0x8000 >> j) : spurs->wklSignal2.load() & (0x8000 >> j);
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver.load() == i ? 1 : 0 : 0;
u8 readyCount = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[j].load() : spurs->wklIdleSpuCountOrReadyCount2[j].load();
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver == i ? 1 : 0 : 0;
u8 readyCount = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[j] : spurs->wklIdleSpuCountOrReadyCount2[j];
// For a workload to be considered for scheduling:
// 1. Its priority must be greater than 0
@ -405,12 +405,12 @@ bool spursKernel2SelectWorkload(SPUThread & spu) {
if (!isPoll || wklSelectedId == ctxt->wklCurrentId) {
// Clear workload signal for the selected workload
spurs->wklSignal1.store(spurs->wklSignal1.load() & ~(0x8000 >> wklSelectedId));
spurs->wklSignal2.store(spurs->wklSignal1.load() & ~(0x80000000u >> wklSelectedId));
spurs->wklSignal1.raw() &= ~(0x8000 >> wklSelectedId);
spurs->wklSignal2.raw() &= ~(0x80000000u >> wklSelectedId);
// If the selected workload is the wklFlag workload then pull the wklFlag to all 1s
if (wklSelectedId == spurs->wklFlagReceiver.load()) {
spurs->wklFlag.flag.store(0xFFFFFFFF);
if (wklSelectedId == spurs->wklFlagReceiver) {
spurs->wklFlag.flag = -1;
}
}
}
@ -492,7 +492,7 @@ void spursKernelDispatchWorkload(SPUThread & spu, u64 widAndPollStatus) {
}
ctxt->wklCurrentAddr = wklInfo->addr;
ctxt->wklCurrentUniqueId = wklInfo->uniqueId.load();
ctxt->wklCurrentUniqueId = wklInfo->uniqueId;
}
if (!isKernel2) {
@ -624,7 +624,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
// Check if any workloads can be scheduled
bool foundReadyWorkload = false;
if (spurs->sysSrvMessage.load() & (1 << ctxt->spuNum)) {
if (spurs->sysSrvMessage & (1 << ctxt->spuNum)) {
foundReadyWorkload = true;
} else {
if (spurs->flags1 & SF1_32_WORKLOADS) {
@ -632,11 +632,11 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
u32 j = i & 0x0F;
u16 runnable = i < CELL_SPURS_MAX_WORKLOAD ? ctxt->wklRunnable1 & (0x8000 >> j) : ctxt->wklRunnable2 & (0x8000 >> j);
u8 priority = i < CELL_SPURS_MAX_WORKLOAD ? ctxt->priority[j] & 0x0F : ctxt->priority[j] >> 4;
u8 maxContention = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklMaxContention[j].load() & 0x0F : spurs->wklMaxContention[j].load() >> 4;
u8 maxContention = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklMaxContention[j] & 0x0F : spurs->wklMaxContention[j] >> 4;
u8 contention = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklCurrentContention[j] & 0x0F : spurs->wklCurrentContention[j] >> 4;
u16 wklSignal = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklSignal1.load() & (0x8000 >> j) : spurs->wklSignal2.load() & (0x8000 >> j);
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver.load() == i ? 1 : 0 : 0;
u8 readyCount = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[j].load() : spurs->wklIdleSpuCountOrReadyCount2[j].load();
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver == i ? 1 : 0 : 0;
u8 readyCount = i < CELL_SPURS_MAX_WORKLOAD ? spurs->wklReadyCount1[j] : spurs->wklIdleSpuCountOrReadyCount2[j];
if (runnable && priority > 0 && maxContention > contention) {
if (wklFlag || wklSignal || readyCount > contention) {
@ -649,12 +649,12 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
u16 runnable = ctxt->wklRunnable1 & (0x8000 >> i);
u16 wklSignal = spurs->wklSignal1.load() & (0x8000 >> i);
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver.load() == i ? 1 : 0 : 0;
u8 readyCount = spurs->wklReadyCount1[i].load() > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklReadyCount1[i].load();
u8 idleSpuCount = spurs->wklIdleSpuCountOrReadyCount2[i].load() > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklIdleSpuCountOrReadyCount2[i].load();
u8 wklFlag = spurs->wklFlag.flag.load() == 0 ? spurs->wklFlagReceiver == i ? 1 : 0 : 0;
u8 readyCount = spurs->wklReadyCount1[i] > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklReadyCount1[i].load();
u8 idleSpuCount = spurs->wklIdleSpuCountOrReadyCount2[i] > CELL_SPURS_MAX_SPU ? CELL_SPURS_MAX_SPU : spurs->wklIdleSpuCountOrReadyCount2[i].load();
u8 requestCount = readyCount + idleSpuCount;
if (runnable && ctxt->priority[i] != 0 && spurs->wklMaxContention[i].load() > spurs->wklCurrentContention[i]) {
if (runnable && ctxt->priority[i] != 0 && spurs->wklMaxContention[i] > spurs->wklCurrentContention[i]) {
if (wklFlag || wklSignal || (readyCount != 0 && requestCount > spurs->wklCurrentContention[i])) {
foundReadyWorkload = true;
break;
@ -805,13 +805,13 @@ void spursSysServiceProcessRequests(SPUThread & spu, SpursKernelContext * ctxt)
}
// Update workload message
if (spurs->sysSrvMsgUpdateWorkload.load() & (1 << ctxt->spuNum)) {
if (spurs->sysSrvMsgUpdateWorkload & (1 << ctxt->spuNum)) {
spurs->sysSrvMsgUpdateWorkload &= ~(1 << ctxt->spuNum);
updateWorkload = true;
}
// Update trace message
if (spurs->sysSrvTrace.data.sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) {
if (spurs->sysSrvTrace.load().sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) {
updateTrace = true;
}
@ -850,7 +850,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
// Copy the priority of the workload for this SPU and its unique id to the LS
ctxt->priority[i] = wklInfo1[i].priority[ctxt->spuNum] == 0 ? 0 : 0x10 - wklInfo1[i].priority[ctxt->spuNum];
ctxt->wklUniqueId[i] = wklInfo1[i].uniqueId.load();
ctxt->wklUniqueId[i] = wklInfo1[i].uniqueId;
if (spurs->flags1 & SF1_32_WORKLOADS) {
auto wklInfo2 = vm::get_ptr<CellSpurs::WorkloadInfo>(spu.offset + 0x30200);
@ -868,7 +868,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
// Update workload status and runnable flag based on the workload state
auto wklStatus = spurs->wklStatus1[i];
if (spurs->wklState1[i].load() == SPURS_WKL_STATE_RUNNABLE) {
if (spurs->wklState1[i] == SPURS_WKL_STATE_RUNNABLE) {
spurs->wklStatus1[i] |= 1 << ctxt->spuNum;
ctxt->wklRunnable1 |= 0x8000 >> i;
} else {
@ -877,9 +877,9 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
// If the workload is shutting down and if this is the last SPU from which it is being removed then
// add it to the shutdown bit set
if (spurs->wklState1[i].load() == SPURS_WKL_STATE_SHUTTING_DOWN) {
if (spurs->wklState1[i] == SPURS_WKL_STATE_SHUTTING_DOWN) {
if (((wklStatus & (1 << ctxt->spuNum)) != 0) && (spurs->wklStatus1[i] == 0)) {
spurs->wklState1[i].store(SPURS_WKL_STATE_REMOVABLE);
spurs->wklState1[i] = SPURS_WKL_STATE_REMOVABLE;
wklShutdownBitSet |= 0x80000000u >> i;
}
}
@ -887,7 +887,7 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
if (spurs->flags1 & SF1_32_WORKLOADS) {
// Update workload status and runnable flag based on the workload state
wklStatus = spurs->wklStatus2[i];
if (spurs->wklState2[i].load() == SPURS_WKL_STATE_RUNNABLE) {
if (spurs->wklState2[i] == SPURS_WKL_STATE_RUNNABLE) {
spurs->wklStatus2[i] |= 1 << ctxt->spuNum;
ctxt->wklRunnable2 |= 0x8000 >> i;
} else {
@ -896,9 +896,9 @@ void spursSysServiceActivateWorkload(SPUThread & spu, SpursKernelContext * ctxt)
// If the workload is shutting down and if this is the last SPU from which it is being removed then
// add it to the shutdown bit set
if (spurs->wklState2[i].load() == SPURS_WKL_STATE_SHUTTING_DOWN) {
if (spurs->wklState2[i] == SPURS_WKL_STATE_SHUTTING_DOWN) {
if (((wklStatus & (1 << ctxt->spuNum)) != 0) && (spurs->wklStatus2[i] == 0)) {
spurs->wklState2[i].store(SPURS_WKL_STATE_REMOVABLE);
spurs->wklState2[i] = SPURS_WKL_STATE_REMOVABLE;
wklShutdownBitSet |= 0x8000 >> i;
}
}
@ -927,14 +927,14 @@ void spursSysServiceUpdateShutdownCompletionEvents(SPUThread & spu, SpursKernelC
for (u32 i = 0; i < CELL_SPURS_MAX_WORKLOAD; i++) {
if (wklShutdownBitSet & (0x80000000u >> i)) {
spurs->wklEvent1[i] |= 0x01;
if (spurs->wklEvent1[i].load() & 0x02 || spurs->wklEvent1[i].load() & 0x10) {
if (spurs->wklEvent1[i] & 0x02 || spurs->wklEvent1[i] & 0x10) {
wklNotifyBitSet |= 0x80000000u >> i;
}
}
if (wklShutdownBitSet & (0x8000 >> i)) {
spurs->wklEvent2[i] |= 0x01;
if (spurs->wklEvent2[i].load() & 0x02 || spurs->wklEvent2[i].load() & 0x10) {
if (spurs->wklEvent2[i] & 0x02 || spurs->wklEvent2[i] & 0x10) {
wklNotifyBitSet |= 0x8000 >> i;
}
}
@ -963,20 +963,21 @@ void spursSysServiceTraceUpdate(SPUThread & spu, SpursKernelContext * ctxt, u32
u8 sysSrvMsgUpdateTrace;
vm::reservation_op(VM_CAST(ctxt->spurs.addr() + offsetof(CellSpurs, wklState1)), 128, [&]() {
auto spurs = ctxt->spurs.priv_ptr();
auto& trace = spurs->sysSrvTrace.raw();
sysSrvMsgUpdateTrace = spurs->sysSrvTrace.data.sysSrvMsgUpdateTrace;
spurs->sysSrvTrace.data.sysSrvMsgUpdateTrace &= ~(1 << ctxt->spuNum);
spurs->sysSrvTrace.data.sysSrvTraceInitialised &= ~(1 << ctxt->spuNum);
spurs->sysSrvTrace.data.sysSrvTraceInitialised |= arg2 << ctxt->spuNum;
sysSrvMsgUpdateTrace = trace.sysSrvMsgUpdateTrace;
trace.sysSrvMsgUpdateTrace &= ~(1 << ctxt->spuNum);
trace.sysSrvTraceInitialised &= ~(1 << ctxt->spuNum);
trace.sysSrvTraceInitialised |= arg2 << ctxt->spuNum;
notify = false;
if (((sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) != 0) && (spurs->sysSrvTrace.data.sysSrvMsgUpdateTrace == 0) && (spurs->sysSrvTrace.data.sysSrvNotifyUpdateTraceComplete != 0)) {
spurs->sysSrvTrace.data.sysSrvNotifyUpdateTraceComplete = 0;
if (((sysSrvMsgUpdateTrace & (1 << ctxt->spuNum)) != 0) && (spurs->sysSrvTrace.load().sysSrvMsgUpdateTrace == 0) && (spurs->sysSrvTrace.load().sysSrvNotifyUpdateTraceComplete != 0)) {
trace.sysSrvNotifyUpdateTraceComplete = 0;
notify = true;
}
if (forceNotify && spurs->sysSrvTrace.data.sysSrvNotifyUpdateTraceComplete != 0) {
spurs->sysSrvTrace.data.sysSrvNotifyUpdateTraceComplete = 0;
if (forceNotify && spurs->sysSrvTrace.load().sysSrvNotifyUpdateTraceComplete != 0) {
trace.sysSrvNotifyUpdateTraceComplete = 0;
notify = true;
}
@ -1038,10 +1039,10 @@ void spursSysServiceCleanupAfterSystemWorkload(SPUThread & spu, SpursKernelConte
if (wklId >= CELL_SPURS_MAX_WORKLOAD) {
spurs->wklCurrentContention[wklId & 0x0F] -= 0x10;
spurs->wklReadyCount1[wklId & 0x0F].store(spurs->wklReadyCount1[wklId & 0x0F].load() - 1);
spurs->wklReadyCount1[wklId & 0x0F].raw() -= 1;
} else {
spurs->wklCurrentContention[wklId & 0x0F] -= 0x01;
spurs->wklIdleSpuCountOrReadyCount2[wklId & 0x0F].store(spurs->wklIdleSpuCountOrReadyCount2[wklId & 0x0F].load() - 1);
spurs->wklIdleSpuCountOrReadyCount2[wklId & 0x0F].raw() -= 1;
}
memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128);
@ -1326,9 +1327,9 @@ s32 spursTasksetProcessRequest(SPUThread & spu, s32 request, u32 * taskId, u32 *
readyCount = readyCount < 0 ? 0 : readyCount > 0xFF ? 0xFF : readyCount;
if (kernelCtxt->wklCurrentId < CELL_SPURS_MAX_WORKLOAD) {
spurs->wklReadyCount1[kernelCtxt->wklCurrentId].store(readyCount);
spurs->wklReadyCount1[kernelCtxt->wklCurrentId] = readyCount;
} else {
spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F].store(readyCount);
spurs->wklIdleSpuCountOrReadyCount2[kernelCtxt->wklCurrentId & 0x0F] = readyCount;
}
memcpy(vm::get_ptr(spu.offset + 0x100), spurs, 128);

View File

@ -665,28 +665,28 @@ void syncLFQueueInitialize(vm::ptr<CellSyncLFQueue> queue, vm::cptr<void> buffer
if (direction == CELL_SYNC_QUEUE_ANY2ANY)
{
queue->pop1 = {};
queue->push1 = {};
queue->pop1.store({});
queue->push1.store({});
queue->m_buffer.set(queue->m_buffer.addr() | 1);
queue->m_bs[0] = -1;
queue->m_bs[1] = -1;
//m_bs[2]
//m_bs[3]
queue->m_v1 = -1;
queue->push2 = { { 0xffff } };
queue->pop2 = { { 0xffff } };
queue->push2.store({ 0xffff });
queue->pop2.store({ 0xffff });
}
else
{
queue->pop1 = { { 0, 0, queue->pop1.load().m_h3, 0 } };
queue->push1 = { { 0, 0, queue->push1.load().m_h7, 0 } };
queue->pop1.store({ 0, 0, queue->pop1.load().m_h3, 0});
queue->push1.store({ 0, 0, queue->push1.load().m_h7, 0 });
queue->m_bs[0] = -1; // written as u32
queue->m_bs[1] = -1;
queue->m_bs[2] = -1;
queue->m_bs[3] = -1;
queue->m_v1 = 0;
queue->push2 = {};
queue->pop2 = {};
queue->push2.store({});
queue->pop2.store({});
}
queue->m_v2 = 0;
@ -823,7 +823,7 @@ s32 _cellSyncLFQueueGetPushPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queu
{
CHECK_EMU_STATUS;
const auto old = queue->push1.load_sync();
const auto old = queue->push1.load(); _mm_lfence();
auto push = old;
if (var1)
@ -921,7 +921,7 @@ s32 _cellSyncLFQueueCompletePushPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue>
while (true)
{
const auto old = queue->push2.load_sync();
const auto old = queue->push2.load(); _mm_lfence();
auto push2 = old;
const auto old2 = queue->push3.load();
@ -1129,7 +1129,7 @@ s32 _cellSyncLFQueueGetPopPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue> queue
{
CHECK_EMU_STATUS;
const auto old = queue->pop1.load_sync();
const auto old = queue->pop1.load(); _mm_lfence();
auto pop = old;
if (var1)
@ -1228,7 +1228,7 @@ s32 _cellSyncLFQueueCompletePopPointer(PPUThread& ppu, vm::ptr<CellSyncLFQueue>
while (true)
{
const auto old = queue->pop2.load_sync();
const auto old = queue->pop2.load(); _mm_lfence();
auto pop2 = old;
const auto old2 = queue->pop3.load();
@ -1432,7 +1432,7 @@ s32 cellSyncLFQueueClear(vm::ptr<CellSyncLFQueue> queue)
while (true)
{
const auto old = queue->pop1.load_sync();
const auto old = queue->pop1.load(); _mm_lfence();
auto pop = old;
const auto push = queue->push1.load();
@ -1483,7 +1483,7 @@ s32 cellSyncLFQueueSize(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u32> size)
while (true)
{
const auto old = queue->pop3.load_sync();
const auto old = queue->pop3.load(); _mm_lfence();
u32 var1 = (u16)queue->pop1.load().m_h1;
u32 var2 = (u16)queue->push1.load().m_h5;

View File

@ -50,7 +50,7 @@ struct alignas(4) sync_mutex_t // CellSyncMutex sync var
}
};
using CellSyncMutex = atomic_be_t<sync_mutex_t>;
using CellSyncMutex = atomic_t<sync_mutex_t>;
CHECK_SIZE_ALIGN(CellSyncMutex, 4, 4);
@ -100,7 +100,7 @@ struct alignas(4) sync_barrier_t // CellSyncBarrier sync var
}
};
using CellSyncBarrier = atomic_be_t<sync_barrier_t>;
using CellSyncBarrier = atomic_t<sync_barrier_t>;
CHECK_SIZE_ALIGN(CellSyncBarrier, 4, 4);
@ -145,7 +145,7 @@ struct sync_rwm_t // CellSyncRwm sync var
struct alignas(16) CellSyncRwm
{
atomic_be_t<sync_rwm_t> ctrl; // sync var
atomic_t<sync_rwm_t> ctrl; // sync var
be_t<u32> size;
vm::bptr<void, u64> buffer;
@ -245,7 +245,7 @@ struct sync_queue_t // CellSyncQueue sync var
struct alignas(32) CellSyncQueue
{
atomic_be_t<sync_queue_t> ctrl;
atomic_t<sync_queue_t> ctrl;
be_t<u32> size;
be_t<u32> depth;
@ -317,14 +317,14 @@ struct alignas(128) CellSyncLFQueue
union // 0x0
{
atomic_be_t<pop1_t> pop1;
atomic_be_t<pop3_t> pop3;
atomic_t<pop1_t> pop1;
atomic_t<pop3_t> pop3;
};
union // 0x8
{
atomic_be_t<push1_t> push1;
atomic_be_t<push3_t> push3;
atomic_t<push1_t> push1;
atomic_t<push3_t> push3;
};
be_t<u32> m_size; // 0x10
@ -334,9 +334,9 @@ struct alignas(128) CellSyncLFQueue
be_t<u32> m_direction; // 0x24 CellSyncQueueDirection
be_t<u32> m_v1; // 0x28
atomic_be_t<s32> init; // 0x2C
atomic_be_t<push2_t> push2; // 0x30
atomic_t<push2_t> push2; // 0x30
be_t<u16> m_hs1[15]; // 0x32
atomic_be_t<pop2_t> pop2; // 0x50
atomic_t<pop2_t> pop2; // 0x50
be_t<u16> m_hs2[15]; // 0x52
vm::bptr<void, u64> m_eaSignal; // 0x70
be_t<u32> m_v2; // 0x78

View File

@ -310,7 +310,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
port.size = port.channel * port.block * AUDIO_SAMPLES * sizeof(float);
port.tag = 0;
port.level = 1.0f;
port.level_set.data = { 1.0f, 0.0f };
port.level_set.store({ 1.0f, 0.0f });
libmixer.Warning("*** audio port opened (port=%d)", g_surmx.audio_port);
@ -328,7 +328,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
{
AudioPortConfig& port = g_audio.ports[g_surmx.audio_port];
while (port.state.load() != AUDIO_PORT_STATE_CLOSED)
while (port.state != AUDIO_PORT_STATE_CLOSED)
{
CHECK_EMU_STATUS;
@ -338,7 +338,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
continue;
}
if (port.state.load() == AUDIO_PORT_STATE_STARTED)
if (port.state == AUDIO_PORT_STATE_STARTED)
{
//u64 stamp0 = get_system_time();

View File

@ -170,9 +170,10 @@ extern void sysPrxForUser_sys_libc_init();
Module<> sysPrxForUser("sysPrxForUser", []()
{
g_tls_start = 0;
for (auto& v : g_tls_owners)
{
v.store(0, std::memory_order_relaxed);
v = 0;
}
// Setup random number generator

View File

@ -70,7 +70,7 @@ void sys_game_process_exitspawn(vm::cptr<char> path, u32 argv_addr, u32 envp_add
Emu.Pause();
sysPrxForUser.Success("Process finished");
CallAfter([=]()
Emu.CallAfter([=]()
{
Emu.Stop();
@ -146,7 +146,7 @@ void sys_game_process_exitspawn2(vm::cptr<char> path, u32 argv_addr, u32 envp_ad
Emu.Pause();
sysPrxForUser.Success("Process finished");
CallAfter([=]()
Emu.CallAfter([=]()
{
Emu.Stop();

View File

@ -226,7 +226,7 @@ s32 sys_lwcond_wait(PPUThread& ppu, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
const be_t<u32> recursive_value = lwmutex->recursive_count;
// set special value
lwmutex->vars.owner = { lwmutex_reserved };
lwmutex->vars.owner = lwmutex_reserved;
lwmutex->recursive_count = 0;
// call the syscall

View File

@ -32,7 +32,7 @@ s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attri
default: sysPrxForUser.Error("sys_lwmutex_create(): invalid protocol (0x%x)", protocol); return CELL_EINVAL;
}
lwmutex->lock_var = { { lwmutex_free, 0 } };
lwmutex->lock_var.store({ lwmutex_free, 0 });
lwmutex->attribute = attr->recursive | attr->protocol;
lwmutex->recursive_count = 0;
lwmutex->sleep_queue = idm::make<lv2_lwmutex_t>(protocol, attr->name_u64);
@ -133,7 +133,7 @@ s32 sys_lwmutex_lock(PPUThread& ppu, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout
if (lwmutex->vars.owner.compare_and_swap_test(lwmutex_free, tid))
{
// locking succeeded
lwmutex->all_info--;
--lwmutex->all_info;
return CELL_OK;
}

View File

@ -197,8 +197,8 @@ struct lv2_file_t
: file(std::move(file))
, mode(mode)
, flags(flags)
, st_status({ SSS_NOT_INITIALIZED })
, st_callback({})
, st_status(SSS_NOT_INITIALIZED)
, st_callback(fs_st_cb_rec_t{})
{
}
};

View File

@ -33,7 +33,7 @@ struct sys_lwmutex_t
union
{
atomic_be_t<sync_var_t> lock_var;
atomic_t<sync_var_t> lock_var;
struct
{

View File

@ -219,7 +219,7 @@ s32 sys_memory_get_user_memory_size(vm::ptr<sys_memory_info_t> mem_info)
// Fetch the user memory available
mem_info->total_user_memory = area->size - reserved;
mem_info->available_user_memory = area->size - area->used.load();
mem_info->available_user_memory = area->size - area->used;
return CELL_OK;
}
@ -248,8 +248,7 @@ s32 sys_memory_container_create(vm::ptr<u32> cid, u32 size)
const auto area = vm::get(vm::user_space);
if (area->size < reserved + size ||
area->size - area->used.load() < size)
if (area->size < reserved + size || area->size - area->used < size)
{
return CELL_ENOMEM;
}
@ -274,7 +273,7 @@ s32 sys_memory_container_destroy(u32 cid)
}
// Check if some memory is not deallocated (the container cannot be destroyed in this case)
if (ct->used.load())
if (ct->used)
{
return CELL_EBUSY;
}
@ -298,7 +297,7 @@ s32 sys_memory_container_get_size(vm::ptr<sys_memory_info_t> mem_info, u32 cid)
}
mem_info->total_user_memory = ct->size; // total container memory
mem_info->available_user_memory = ct->size - ct->used.load(); // available container memory
mem_info->available_user_memory = ct->size - ct->used; // available container memory
return CELL_OK;
}

View File

@ -204,7 +204,7 @@ s32 sys_mmapper_free_address(u32 addr)
return CELL_EINVAL;
}
if (area->used.load())
if (area->used)
{
return CELL_EBUSY;
}
@ -273,7 +273,7 @@ s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
return CELL_EALIGN;
}
if (const u32 old_addr = mem->addr.load())
if (const u32 old_addr = mem->addr)
{
throw EXCEPTION("Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, old_addr);
}

View File

@ -1,9 +1,8 @@
#include "stdafx.h"
#include "Emu/Memory/Memory.h"
#include "Emu/System.h"
#include "Emu/SysCalls/SysCalls.h"
#include "Emu/IdManager.h"
#include "Emu/DbgCommand.h"
#include "Emu/SysCalls/SysCalls.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_mutex.h"

View File

@ -54,7 +54,7 @@ s32 sys_process_exit(s32 status)
sys_process.Success("Process finished");
CallAfter([]()
Emu.CallAfter([]()
{
Emu.Stop();
});

View File

@ -544,7 +544,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
{
if (t)
{
if ((t->status.load() & SPU_STATUS_STOPPED_BY_STOP) == 0)
if ((t->status & SPU_STATUS_STOPPED_BY_STOP) == 0)
{
stopped = false;
break;
@ -1263,7 +1263,7 @@ s32 sys_raw_spu_get_int_mask(u32 id, u32 class_id, vm::ptr<u64> mask)
return CELL_ESRCH;
}
*mask = thread->int_ctrl[class_id].mask.load();
*mask = thread->int_ctrl[class_id].mask;
return CELL_OK;
}
@ -1305,7 +1305,7 @@ s32 sys_raw_spu_get_int_stat(u32 id, u32 class_id, vm::ptr<u64> stat)
return CELL_ESRCH;
}
*stat = thread->int_ctrl[class_id].stat.load();
*stat = thread->int_ctrl[class_id].stat;
return CELL_OK;
}

View File

@ -13,7 +13,6 @@
#include "Emu/FS/vfsFile.h"
#include "Emu/FS/vfsLocalFile.h"
#include "Emu/FS/vfsDeviceLocalFile.h"
#include "Emu/DbgCommand.h"
#include "Emu/CPU/CPUThreadManager.h"
#include "Emu/SysCalls/Callback.h"
@ -60,10 +59,6 @@ Emulator::Emulator()
m_loader.register_handler(new loader::handlers::elf64);
}
Emulator::~Emulator()
{
}
void Emulator::Init()
{
}
@ -389,7 +384,9 @@ void Emulator::Stop()
while (g_thread_count)
{
std::this_thread::sleep_for(std::chrono::milliseconds(1));
m_cb.process_events();
std::this_thread::sleep_for(10ms);
}
LOG_NOTICE(GENERAL, "All threads stopped...");
@ -516,15 +513,3 @@ bool Emulator::LoadPoints(const std::string& path)
}
Emulator Emu;
CallAfterCbType CallAfterCallback = nullptr;
void CallAfter(std::function<void()> func)
{
CallAfterCallback(func);
}
void SetCallAfterCallback(CallAfterCbType cb)
{
CallAfterCallback = cb;
}

View File

@ -1,6 +1,20 @@
#pragma once
#include "Loader/Loader.h"
#include "DbgCommand.h"
struct EmuCallbacks
{
std::function<void(std::function<void()>)> call_after;
std::function<void()> process_events;
std::function<void(DbgCommand, class CPUThread*)> send_dbg_command;
std::function<std::unique_ptr<class KeyboardHandlerBase>()> get_kb_handler;
std::function<std::unique_ptr<class MouseHandlerBase>()> get_mouse_handler;
std::function<std::unique_ptr<class PadHandlerBase>()> get_pad_handler;
std::function<std::unique_ptr<class GSFrameBase>()> get_gs_frame;
std::function<std::unique_ptr<class MsgDialogBase>()> get_msg_dialog;
std::function<std::unique_ptr<class SaveDialogBase>()> get_save_dialog;
};
enum Status : u32
{
@ -44,8 +58,10 @@ public:
}
};
class Emulator
class Emulator final
{
EmuCallbacks m_cb;
enum Mode
{
DisAsm,
@ -81,27 +97,59 @@ class Emulator
EmuInfo m_info;
loader::loader m_loader;
public:
std::string m_path;
std::string m_elf_path;
std::string m_emu_path;
std::string m_title_id;
std::string m_title;
public:
Emulator();
~Emulator();
void SetCallbacks(EmuCallbacks&& cb)
{
m_cb = std::move(cb);
}
const auto& GetCallbacks() const
{
return m_cb;
}
void SendDbgCommand(DbgCommand cmd, class CPUThread* thread = nullptr)
{
m_cb.send_dbg_command(cmd, thread);
}
// returns a future object associated with the result of the function called from the GUI thread
template<typename F, typename RT = std::result_of_t<F()>> inline std::future<RT> CallAfter(F&& func) const
{
// create task
auto task = std::make_shared<std::packaged_task<RT()>>(std::forward<F>(func));
// get future
std::future<RT> future = task->get_future();
// run asynchronously in GUI thread
m_cb.call_after([=]
{
(*task)();
});
return future;
}
void Init();
void SetPath(const std::string& path, const std::string& elf_path = "");
void SetTitleID(const std::string& id);
void SetTitle(const std::string& title);
std::string GetPath() const
const std::string& GetPath() const
{
return m_elf_path;
}
std::string GetEmulatorPath() const
const std::string& GetEmulatorPath() const
{
return m_emu_path;
}
@ -212,9 +260,3 @@ inline bool check_lv2_lock(lv2_lock_t& lv2_lock)
#define LV2_DEFER_LOCK lv2_lock_t lv2_lock
#define CHECK_LV2_LOCK(x) if (!check_lv2_lock(x)) throw EXCEPTION("lv2_lock is invalid or not locked")
#define CHECK_EMU_STATUS if (Emu.IsStopped()) throw EmulationStopped{}
typedef void(*CallAfterCbType)(std::function<void()> func);
void CallAfter(std::function<void()> func);
void SetCallAfterCallback(CallAfterCbType cb);

View File

@ -45,7 +45,7 @@ public:
{
m_btn_run->Enable(!Emu.IsStopped());
m_btn_stop->Enable(!Emu.IsStopped());
m_btn_restart->Enable(!Emu.m_path.empty());
m_btn_restart->Enable(!Emu.GetPath().empty());
}
void OnRun(wxCommandEvent& event)

View File

@ -568,7 +568,7 @@ void MainFrame::OnKeyDown(wxKeyEvent& event)
case 'E': case 'e': if(Emu.IsPaused()) Emu.Resume(); else if(Emu.IsReady()) Emu.Run(); return;
case 'P': case 'p': if(Emu.IsRunning()) Emu.Pause(); return;
case 'S': case 's': if(!Emu.IsStopped()) Emu.Stop(); return;
case 'R': case 'r': if(!Emu.m_path.empty()) {Emu.Stop(); Emu.Run();} return;
case 'R': case 'r': if(!Emu.GetPath().empty()) {Emu.Stop(); Emu.Run();} return;
}
}

View File

@ -5,7 +5,7 @@
#include "Emu/SysCalls/lv2/sys_time.h"
#include "MsgDialog.h"
void MsgDialogFrame::Create(u32 type, std::string msg)
void MsgDialogFrame::Create(u32 type, const std::string& msg)
{
wxWindow* parent = nullptr; // TODO: align the window better
@ -96,23 +96,18 @@ void MsgDialogFrame::Create(u32 type, std::string msg)
m_dialog->Show();
m_dialog->Enable();
m_dialog->Bind(wxEVT_BUTTON, [&](wxCommandEvent& event)
m_dialog->Bind(wxEVT_BUTTON, [this](wxCommandEvent& event)
{
this->status = (event.GetId() == wxID_NO) ? CELL_MSGDIALOG_BUTTON_NO : CELL_MSGDIALOG_BUTTON_YES /* OK */;
this->m_dialog->Hide();
this->Close();
this->Destroy();
this->Close(event.GetId() == wxID_NO ? CELL_MSGDIALOG_BUTTON_NO : CELL_MSGDIALOG_BUTTON_YES);
});
m_dialog->Bind(wxEVT_CLOSE_WINDOW, [&](wxCloseEvent& event)
m_dialog->Bind(wxEVT_CLOSE_WINDOW, [this, type](wxCloseEvent& event)
{
if (type & CELL_MSGDIALOG_TYPE_DISABLE_CANCEL)
if (~type & CELL_MSGDIALOG_TYPE_DISABLE_CANCEL)
{
}
else
{
this->status = CELL_MSGDIALOG_BUTTON_ESCAPE;
this->m_dialog->Hide();
this->Close();
this->Destroy();
this->Close(CELL_MSGDIALOG_BUTTON_ESCAPE);
}
});
}
@ -122,40 +117,31 @@ void MsgDialogFrame::Destroy()
m_dialog.reset();
}
void MsgDialogFrame::ProgressBarSetMsg(u32 index, std::string msg)
void MsgDialogFrame::ProgressBarSetMsg(u32 index, const std::string& msg)
{
wxGetApp().CallAfter([=]()
if (m_dialog)
{
if (m_dialog)
{
if (index == 0 && m_text1) m_text1->SetLabelText(wxString(msg.c_str(), wxConvUTF8));
if (index == 1 && m_text2) m_text2->SetLabelText(wxString(msg.c_str(), wxConvUTF8));
m_dialog->Layout();
m_dialog->Fit();
}
});
if (index == 0 && m_text1) m_text1->SetLabelText(wxString(msg.c_str(), wxConvUTF8));
if (index == 1 && m_text2) m_text2->SetLabelText(wxString(msg.c_str(), wxConvUTF8));
m_dialog->Layout();
m_dialog->Fit();
}
}
void MsgDialogFrame::ProgressBarReset(u32 index)
{
wxGetApp().CallAfter([=]()
if (m_dialog)
{
if (m_dialog)
{
if (index == 0 && m_gauge1) m_gauge1->SetValue(0);
if (index == 1 && m_gauge2) m_gauge2->SetValue(0);
}
});
if (index == 0 && m_gauge1) m_gauge1->SetValue(0);
if (index == 1 && m_gauge2) m_gauge2->SetValue(0);
}
}
void MsgDialogFrame::ProgressBarInc(u32 index, u32 delta)
{
wxGetApp().CallAfter([=]()
if (m_dialog)
{
if (m_dialog)
{
if (index == 0 && m_gauge1) m_gauge1->SetValue(m_gauge1->GetValue() + delta);
if (index == 1 && m_gauge2) m_gauge2->SetValue(m_gauge2->GetValue() + delta);
}
});
if (index == 0 && m_gauge1) m_gauge1->SetValue(m_gauge1->GetValue() + delta);
if (index == 1 && m_gauge2) m_gauge2->SetValue(m_gauge2->GetValue() + delta);
}
}

View File

@ -2,7 +2,7 @@
#include "Emu/SysCalls/Modules/cellMsgDialog.h"
class MsgDialogFrame : public MsgDialogInstance
class MsgDialogFrame : public MsgDialogBase
{
std::unique_ptr<wxDialog> m_dialog;
wxGauge* m_gauge1;
@ -17,9 +17,9 @@ class MsgDialogFrame : public MsgDialogInstance
wxSizer* m_buttons;
public:
virtual void Create(u32 type, std::string msg) override;
virtual void Create(u32 type, const std::string& msg) override;
virtual void Destroy() override;
virtual void ProgressBarSetMsg(u32 progressBarIndex, std::string msg) override;
virtual void ProgressBarSetMsg(u32 progressBarIndex, const std::string& msg) override;
virtual void ProgressBarReset(u32 progressBarIndex) override;
virtual void ProgressBarInc(u32 progressBarIndex, u32 delta) override;
};

View File

@ -3,7 +3,7 @@
#include "SaveDataDialog.h"
s32 SaveDataDialogFrame::ShowSaveDataList(std::vector<SaveDataEntry>& save_entries, s32 focused, vm::ptr<CellSaveDataListSet> listSet)
s32 SaveDialogFrame::ShowSaveDataList(std::vector<SaveDataEntry>& save_entries, s32 focused, vm::ptr<CellSaveDataListSet> listSet)
{
return focused;
}

View File

@ -1,7 +1,8 @@
#pragma once
#include "Emu/SysCalls/Modules/cellSaveData.h"
class SaveDataDialogFrame : public SaveDataDialogInstance
class SaveDialogFrame : public SaveDialogBase
{
public:
virtual s32 ShowSaveDataList(std::vector<SaveDataEntry>& save_entries, s32 focused, vm::ptr<CellSaveDataListSet> listSet) override;

View File

@ -47,6 +47,7 @@
<ClCompile Include="..\Utilities\rTime.cpp" />
<ClCompile Include="..\Utilities\rXml.cpp" />
<ClCompile Include="..\Utilities\Semaphore.cpp" />
<ClCompile Include="..\Utilities\SharedMutex.cpp" />
<ClCompile Include="..\Utilities\SleepQueue.cpp" />
<ClCompile Include="..\Utilities\StrFmt.cpp" />
<ClCompile Include="..\Utilities\Thread.cpp" />
@ -189,7 +190,6 @@
<ClCompile Include="Emu\Cell\SPUThread.cpp" />
<ClCompile Include="Emu\CPU\CPUThread.cpp" />
<ClCompile Include="Emu\CPU\CPUThreadManager.cpp" />
<ClCompile Include="Emu\DbgCommand.cpp" />
<ClCompile Include="Emu\Event.cpp" />
<ClCompile Include="Emu\FS\VFS.cpp" />
<ClCompile Include="Emu\FS\vfsDevice.cpp" />
@ -372,6 +372,7 @@
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="..\Utilities\Atomic.h" />
<ClInclude Include="..\Utilities\AutoPause.h" />
<ClInclude Include="..\Utilities\BEType.h" />
<ClInclude Include="..\Utilities\GNU.h" />
@ -383,6 +384,7 @@
<ClInclude Include="..\Utilities\rTime.h" />
<ClInclude Include="..\Utilities\rXml.h" />
<ClInclude Include="..\Utilities\Semaphore.h" />
<ClInclude Include="..\Utilities\SharedMutex.h" />
<ClInclude Include="..\Utilities\simpleini\ConvertUTF.h" />
<ClInclude Include="..\Utilities\simpleini\SimpleIni.h" />
<ClInclude Include="..\Utilities\SleepQueue.h" />
@ -526,7 +528,6 @@
<ClInclude Include="Emu\Io\PadHandler.h" />
<ClInclude Include="Emu\Memory\Memory.h" />
<ClInclude Include="Emu\Memory\MemoryBlock.h" />
<ClInclude Include="Emu\Memory\atomic.h" />
<ClInclude Include="Emu\RSX\CgBinaryProgram.h" />
<ClInclude Include="Emu\RSX\Common\FragmentProgramDecompiler.h" />
<ClInclude Include="Emu\RSX\Common\ProgramStateCache.h" />

View File

@ -515,9 +515,6 @@
<ClCompile Include="Emu\SysCalls\LogBase.cpp">
<Filter>Emu\SysCalls</Filter>
</ClCompile>
<ClCompile Include="Emu\DbgCommand.cpp">
<Filter>Emu</Filter>
</ClCompile>
<ClCompile Include="Ini.cpp">
<Filter>Utilities</Filter>
</ClCompile>
@ -989,6 +986,9 @@
<ClCompile Include="Emu\Cell\SPUASMJITRecompiler.cpp">
<Filter>Emu\CPU\Cell</Filter>
</ClCompile>
<ClCompile Include="..\Utilities\SharedMutex.cpp">
<Filter>Utilities</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Crypto\aes.h">
@ -1609,9 +1609,6 @@
<ClInclude Include="Emu\Cell\SPUContext.h">
<Filter>Emu\CPU\Cell</Filter>
</ClInclude>
<ClInclude Include="Emu\Memory\atomic.h">
<Filter>Emu\Memory</Filter>
</ClInclude>
<ClInclude Include="Emu\SysCalls\lv2\sys_fs.h">
<Filter>Emu\SysCalls\lv2</Filter>
</ClInclude>
@ -1876,5 +1873,11 @@
<ClInclude Include="Emu\SysCalls\Modules\cellSysutilAvc2.h">
<Filter>Emu\SysCalls\Modules</Filter>
</ClInclude>
<ClInclude Include="..\Utilities\SharedMutex.h">
<Filter>Utilities</Filter>
</ClInclude>
<ClInclude Include="..\Utilities\Atomic.h">
<Filter>Utilities</Filter>
</ClInclude>
</ItemGroup>
</Project>

View File

@ -42,9 +42,6 @@ wxDEFINE_EVENT(wxEVT_DBG_COMMAND, wxCommandEvent);
IMPLEMENT_APP(Rpcs3App)
Rpcs3App* TheApp;
extern std::unique_ptr<MsgDialogInstance> g_msg_dialog;
extern std::unique_ptr<SaveDataDialogInstance> g_savedata_dialog;
bool Rpcs3App::OnInit()
{
static const wxCmdLineEntryDesc desc[]
@ -57,85 +54,70 @@ bool Rpcs3App::OnInit()
parser.SetDesc(desc);
parser.SetCmdLine(argc, argv);
if (parser.Parse())
{
// help was given, terminating
this->Exit();
}
SetSendDbgCommandCallback([](DbgCommand id, CPUThread* t)
EmuCallbacks callbacks;
callbacks.call_after = [](std::function<void()> func)
{
wxGetApp().CallAfter(std::move(func));
};
callbacks.process_events = [this]()
{
m_MainFrame->Update();
wxGetApp().ProcessPendingEvents();
};
callbacks.send_dbg_command = [](DbgCommand id, CPUThread* t)
{
wxGetApp().SendDbgCommand(id, t);
});
};
SetCallAfterCallback([](std::function<void()> func)
callbacks.get_kb_handler = []() -> std::unique_ptr<KeyboardHandlerBase>
{
wxGetApp().CallAfter(func);
});
SetGetKeyboardHandlerCountCallback([]()
{
return 2;
});
SetGetKeyboardHandlerCallback([](int i) -> KeyboardHandlerBase*
{
switch (i)
switch (auto mode = Ini.KeyboardHandlerMode.GetValue())
{
case 0: return new NullKeyboardHandler();
case 1: return new WindowsKeyboardHandler();
case 0: return std::make_unique<NullKeyboardHandler>();
case 1: return std::make_unique<WindowsKeyboardHandler>();
default: throw EXCEPTION("Invalid Keyboard Handler Mode %d", +mode);
}
};
assert(!"Invalid keyboard handler number");
return new NullKeyboardHandler();
});
SetGetMouseHandlerCountCallback([]()
callbacks.get_mouse_handler = []() -> std::unique_ptr<MouseHandlerBase>
{
return 2;
});
SetGetMouseHandlerCallback([](int i) -> MouseHandlerBase*
{
switch (i)
switch (auto mode = Ini.MouseHandlerMode.GetValue())
{
case 0: return new NullMouseHandler();
case 1: return new WindowsMouseHandler();
case 0: return std::make_unique<NullMouseHandler>();
case 1: return std::make_unique<WindowsMouseHandler>();
default: throw EXCEPTION("Invalid Mouse Handler Mode %d", +mode);
}
};
assert(!"Invalid mouse handler number");
return new NullMouseHandler();
});
SetGetPadHandlerCountCallback([]()
callbacks.get_pad_handler = []() -> std::unique_ptr<PadHandlerBase>
{
switch (auto mode = Ini.PadHandlerMode.GetValue())
{
case 0: return std::make_unique<NullPadHandler>();
case 1: return std::make_unique<WindowsPadHandler>();
#if defined(_WIN32)
return 3;
#else
return 2;
#endif
});
SetGetPadHandlerCallback([](int i) -> PadHandlerBase*
{
switch (i)
{
case 0: return new NullPadHandler();
case 1: return new WindowsPadHandler();
#if defined(_WIN32)
case 2: return new XInputPadHandler();
case 2: return std::make_unique<XInputPadHandler>();
#endif
default: throw EXCEPTION("Invalid Pad Handler Mode %d", +mode);
}
};
assert(!"Invalid pad handler number");
return new NullPadHandler();
});
SetGetGSFrameCallback([]() -> GSFrameBase*
callbacks.get_gs_frame = []() -> std::unique_ptr<GSFrameBase>
{
return new GLGSFrame();
});
return std::make_unique<GLGSFrame>();
};
// TODO: unify with get_gs_frame callback
#if defined(DX12_SUPPORT)
SetGetD3DGSFrameCallback([]() ->GSFrameBase2*
{
@ -143,8 +125,17 @@ bool Rpcs3App::OnInit()
});
#endif
g_msg_dialog.reset(new MsgDialogFrame);
g_savedata_dialog.reset(new SaveDataDialogFrame);
callbacks.get_msg_dialog = []() -> std::unique_ptr<MsgDialogBase>
{
return std::make_unique<MsgDialogFrame>();
};
callbacks.get_save_dialog = []() -> std::unique_ptr<SaveDialogBase>
{
return std::make_unique<SaveDialogFrame>();
};
Emu.SetCallbacks(std::move(callbacks));
TheApp = this;
SetAppName(_PRGNAME_);
@ -204,10 +195,6 @@ void Rpcs3App::Exit()
Ini.Save();
wxApp::Exit();
#ifdef _WIN32
timeEndPeriod(1);
#endif
}
void Rpcs3App::SendDbgCommand(DbgCommand id, CPUThread* thr)
@ -221,6 +208,11 @@ Rpcs3App::Rpcs3App()
{
#ifdef _WIN32
timeBeginPeriod(1);
std::atexit([]
{
timeEndPeriod(1);
});
#endif
#if defined(__unix__) && !defined(__APPLE__)

View File

@ -162,6 +162,9 @@ template<typename T1, typename T2, typename T3 = const char*> struct triplet_t
#define COPY_EXPR(expr) [=]{ return expr; }
#define EXCEPTION(text, ...) fmt::exception(__FILE__, __LINE__, __FUNCTION__, text, ##__VA_ARGS__)
#define VM_CAST(value) vm::impl_cast(value, __FILE__, __LINE__, __FUNCTION__)
#define IS_INTEGRAL(t) (std::is_integral<t>::value || std::is_same<std::decay_t<t>, u128>::value)
#define IS_INTEGER(t) (std::is_integral<t>::value || std::is_enum<t>::value || std::is_same<std::decay_t<t>, u128>::value)
#define IS_BINARY_COMPARABLE(t1, t2) (IS_INTEGER(t1) && IS_INTEGER(t2) && sizeof(t1) == sizeof(t2))
template<typename T> struct id_traits;
@ -169,5 +172,5 @@ template<typename T> struct id_traits;
#define _PRGVER_ "0.0.0.5"
#include "Utilities/BEType.h"
#include "Utilities/Atomic.h"
#include "Utilities/StrFmt.h"
#include "Emu/Memory/atomic.h"