vm::atomic update, small fixes

This commit is contained in:
Nekotekina 2014-09-23 18:27:18 +04:00
parent e5a485e50b
commit a4a4e572a0
10 changed files with 145 additions and 123 deletions

View File

@ -95,6 +95,10 @@ int clock_gettime(int foo, struct timespec *ts);
#endif
#ifndef InterlockedCompareExchange
static __forceinline uint16_t InterlockedCompareExchange(volatile uint16_t* dest, uint16_t exch, uint16_t comp)
{
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
}
static __forceinline uint32_t InterlockedCompareExchange(volatile uint32_t* dest, uint32_t exch, uint32_t comp)
{
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
@ -106,6 +110,10 @@ static __forceinline uint64_t InterlockedCompareExchange(volatile uint64_t* dest
#endif
#ifndef InterlockedExchange
static __forceinline uint16_t InterlockedExchange(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedExchange16((volatile short*)dest, value);
}
static __forceinline uint32_t InterlockedExchange(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedExchange((volatile long*)dest, value);
@ -117,6 +125,10 @@ static __forceinline uint64_t InterlockedExchange(volatile uint64_t* dest, uint6
#endif
#ifndef InterlockedOr
static __forceinline uint16_t InterlockedOr(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedOr16((volatile short*)dest, value);
}
static __forceinline uint32_t InterlockedOr(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedOr((volatile long*)dest, value);
@ -128,6 +140,10 @@ static __forceinline uint64_t InterlockedOr(volatile uint64_t* dest, uint64_t va
#endif
#ifndef InterlockedAnd
static __forceinline uint16_t InterlockedAnd(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedAnd16((volatile short*)dest, value);
}
static __forceinline uint32_t InterlockedAnd(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedAnd((volatile long*)dest, value);
@ -139,6 +155,10 @@ static __forceinline uint64_t InterlockedAnd(volatile uint64_t* dest, uint64_t v
#endif
#ifndef InterlockedXor
static __forceinline uint16_t InterlockedXor(volatile uint16_t* dest, uint16_t value)
{
return _InterlockedXor16((volatile short*)dest, value);
}
static __forceinline uint32_t InterlockedXor(volatile uint32_t* dest, uint32_t value)
{
return _InterlockedXor((volatile long*)dest, value);

View File

@ -33,13 +33,13 @@ public:
static const T GetFreeValue()
{
static const u64 value = free_value;
return (const T&)value;
return (T&)value;
}
static const T GetDeadValue()
{
static const u64 value = dead_value;
return (const T&)value;
return (T&)value;
}
void initialize()

View File

@ -5,11 +5,17 @@ namespace vm
template<typename T, size_t size = sizeof(T)>
struct _to_atomic
{
static_assert(size == 4 || size == 8, "Invalid atomic type");
static_assert(size == 2 || size == 4 || size == 8, "Invalid atomic type");
typedef T type;
};
template<typename T>
struct _to_atomic<T, 2>
{
typedef uint16_t type;
};
template<typename T>
struct _to_atomic<T, 4>
{
@ -93,18 +99,21 @@ namespace vm
}
}
// atomic bitwise logical OR, returns previous data
__forceinline const T _or(const T& right) volatile
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right));
return (T&)res;
}
// atomic bitwise logical AND, returns previous data
__forceinline const T _and(const T& right) volatile
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right));
return (T&)res;
}
// atomic bitwise logical XOR, returns previous data
__forceinline const T _xor(const T& right) volatile
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right));

View File

@ -55,7 +55,7 @@ namespace cb_detail
template<typename T, int g_count, int f_count, int v_count>
struct _func_arg<T, ARG_STACK, g_count, f_count, v_count>
{
static_assert(f_count <= 12, "TODO: Unsupported stack argument type (float)");
static_assert(f_count <= 13, "TODO: Unsupported stack argument type (float)");
static_assert(v_count <= 12, "TODO: Unsupported stack argument type (vector)");
static_assert(sizeof(T) <= 8, "Invalid callback argument type for ARG_STACK");
@ -84,7 +84,7 @@ namespace cb_detail
const bool is_float = std::is_floating_point<T1>::value;
const bool is_vector = std::is_same<T1, u128>::value;
const _func_arg_type t = is_float
? ((f_count >= 12) ? ARG_STACK : ARG_FLOAT)
? ((f_count >= 13) ? ARG_STACK : ARG_FLOAT)
: (is_vector ? ((v_count >= 12) ? ARG_STACK : ARG_VECTOR) : ((g_count >= 8) ? ARG_STACK : ARG_GENERAL));
const int g = g_count + (is_float || is_vector ? 0 : 1);
const int f = f_count + (is_float ? 1 : 0);

View File

@ -39,10 +39,9 @@ s64 spursAttachLv2EventQueue(vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> po
return CELL_SPURS_CORE_ERROR_STAT;
}
u32 unk1 = 0;
#ifdef PRX_DEBUG
unk1 = cb_call<u32>(GetCurrentPPUThread(), libsre + 0x10900, libsre_rtoc);
#endif
s32 sdk_ver;
assert(process_get_sdk_version(process_getpid(), sdk_ver) == CELL_OK);
if (sdk_ver == -1) sdk_ver = 0x460000;
u8 _port = 0x3f;
u8 port_start = 0x10;
@ -54,7 +53,7 @@ s64 spursAttachLv2EventQueue(vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> po
{
return CELL_SPURS_CORE_ERROR_INVAL;
}
if (unk1 <= 0x17ffff && _port > 0xf)
if (sdk_ver <= 0x17ffff && _port > 0xf)
{
return CELL_SPURS_CORE_ERROR_PERM;
}

View File

@ -844,7 +844,8 @@ void syncLFQueueInit(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u8> buffer, u32 siz
queue->m_depth = depth;
queue->m_buffer = buffer;
queue->m_direction = direction;
*queue->m_hs = {};
*queue->m_hs1 = {};
*queue->m_hs2 = {};
queue->m_eaSignal = eaSignal;
if (direction == CELL_SYNC_QUEUE_ANY2ANY)
@ -857,10 +858,8 @@ void syncLFQueueInit(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u8> buffer, u32 siz
//m_bs[2]
//m_bs[3]
queue->m_v1 = -1;
queue->m_hs[0] = -1;
queue->m_hs[16] = -1;
queue->m_v2 = 0;
queue->m_v3 = 0;
queue->push2.write_relaxed({ be_t<u16>::make(-1) });
queue->pop2.write_relaxed({ be_t<u16>::make(-1) });
}
else
{
@ -871,9 +870,12 @@ void syncLFQueueInit(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u8> buffer, u32 siz
queue->m_bs[2] = -1;
queue->m_bs[3] = -1;
queue->m_v1 = 0;
queue->m_v2 = 0; // written as u64
queue->m_v3 = 0;
queue->push2.write_relaxed({});
queue->pop2.write_relaxed({});
}
queue->m_v2 = 0;
queue->m_eq_id = 0;
}
s32 syncLFQueueInitialize(vm::ptr<CellSyncLFQueue> queue, vm::ptr<u8> buffer, u32 size, u32 depth, CellSyncQueueDirection direction, vm::ptr<void> eaSignal)
@ -1082,14 +1084,9 @@ s32 syncLFQueueGetPushPointer(vm::ptr<CellSyncLFQueue> queue, s32& pointer, u32
}
}
u32 eq = (u32)queue->m_v3; // 0x7c
sys_event_data event;
assert(0);
// TODO: sys_event_queue_receive (event data is not used), assert if error returned
assert(sys_event_queue_receive(queue->m_eq_id, {}, 0) == CELL_OK);
var1 = 1;
}
assert(0);
}
s32 _cellSyncLFQueueGetPushPointer(vm::ptr<CellSyncLFQueue> queue, vm::ptr<be_t<u32>> pointer, u32 isBlocking, u32 useEventQueue)
@ -1134,14 +1131,13 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
while (true)
{
const u32 old_data = InterlockedCompareExchange(&queue->m_push2(), 0, 0);
CellSyncLFQueue new_;
new_.m_push2() = old_data;
const auto old = queue->push2.read_sync();
auto push2 = old;
const auto old2 = queue->push3.read_relaxed();
auto push = old2;
auto push3 = old2;
s32 var1 = pointer - (u16)push.m_h5;
s32 var1 = pointer - (u16)push3.m_h5;
if (var1 < 0)
{
var1 += depth * 2;
@ -1163,7 +1159,7 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
{
var9_ = 1 << var9_;
}
s32 var9 = ~(var9_ | (u16)push.m_h6);
s32 var9 = ~(var9_ | (u16)push3.m_h6);
// count leading zeros in u16
{
u16 v = var9;
@ -1176,7 +1172,7 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
}
}
s32 var5 = (s32)(u16)push.m_h6 | var9_;
s32 var5 = (s32)(u16)push3.m_h6 | var9_;
if (var9 & 0x30)
{
var5 = 0;
@ -1186,13 +1182,13 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
var5 <<= var9;
}
s32 var3 = (u16)push.m_h5 + var9;
s32 var3 = (u16)push3.m_h5 + var9;
if (var3 >= depth * 2)
{
var3 -= depth * 2;
}
u16 pack = new_.m_hs[0]; // three packed 5-bit fields
u16 pack = push2.pack; // three packed 5-bit fields
s32 var4 = ((pack >> 10) & 0x1f) - ((pack >> 5) & 0x1f);
if (var4 < 0)
@ -1230,23 +1226,23 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
var12 = (var12 + 1) << 10;
}
new_.m_hs[0] = (pack & 0x83ff) | var12;
var6 = (u16)queue->m_hs[1 + 2 * var11];
push2.pack = (pack & 0x83ff) | var12;
var6 = (u16)queue->m_hs1[var11];
}
else
{
var6 = -1;
}
push.m_h5 = (u16)var3;
push.m_h6 = (u16)var5;
push3.m_h5 = (u16)var3;
push3.m_h6 = (u16)var5;
if (InterlockedCompareExchange(&queue->m_push2(), new_.m_push2(), old_data) == old_data)
if (queue->push2.compare_and_swap_test(old, push2))
{
assert(var2 + var4 < 16);
if (var6 != -1)
{
bool exch = queue->push3.compare_and_swap_test(old2, push);
bool exch = queue->push3.compare_and_swap_test(old2, push3);
assert(exch);
if (exch)
{
@ -1256,10 +1252,10 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
}
else
{
pack = queue->m_hs[0];
pack = queue->push2.read_relaxed().pack;
if ((pack & 0x1f) == ((pack >> 10) & 0x1f))
{
if (queue->push3.compare_and_swap_test(old2, push))
if (queue->push3.compare_and_swap_test(old2, push3))
{
return CELL_OK;
}
@ -1267,8 +1263,6 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
}
}
}
assert(0);
}
s32 _cellSyncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(*)(u32 addr, u32 arg)> fpSendSignal)
@ -1480,14 +1474,9 @@ s32 syncLFQueueGetPopPointer(vm::ptr<CellSyncLFQueue> queue, s32& pointer, u32 i
}
}
u32 eq = (u32)queue->m_v3; // 0x7c
sys_event_data event;
assert(0);
// TODO: sys_event_queue_receive (event data is not used), assert if error returned
assert(sys_event_queue_receive(queue->m_eq_id, {}, 0) == CELL_OK);
var1 = 1;
}
assert(0);
}
s32 _cellSyncLFQueueGetPopPointer(vm::ptr<CellSyncLFQueue> queue, vm::ptr<be_t<u32>> pointer, u32 isBlocking, u32 arg4, u32 useEventQueue)
@ -1532,14 +1521,13 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
while (true)
{
const u32 old_data = InterlockedCompareExchange(&queue->m_pop2(), 0, 0);
CellSyncLFQueue new_;
new_.m_pop2() = old_data;
const auto old = queue->pop2.read_sync();
auto pop2 = old;
const auto old2 = queue->pop3.read_relaxed();
auto pop = old2;
auto pop3 = old2;
s32 var1 = pointer - (u16)pop.m_h1;
s32 var1 = pointer - (u16)pop3.m_h1;
if (var1 < 0)
{
var1 += depth * 2;
@ -1561,7 +1549,7 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
{
var9_ = 1 << var9_;
}
s32 var9 = ~(var9_ | (u16)pop.m_h2);
s32 var9 = ~(var9_ | (u16)pop3.m_h2);
// count leading zeros in u16
{
u16 v = var9;
@ -1574,7 +1562,7 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
}
}
s32 var5 = (s32)(u16)pop.m_h2 | var9_;
s32 var5 = (s32)(u16)pop3.m_h2 | var9_;
if (var9 & 0x30)
{
var5 = 0;
@ -1584,13 +1572,13 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
var5 <<= var9;
}
s32 var3 = (u16)pop.m_h1 + var9;
s32 var3 = (u16)pop3.m_h1 + var9;
if (var3 >= depth * 2)
{
var3 -= depth * 2;
}
u16 pack = new_.m_hs[16]; // three packed 5-bit fields
u16 pack = pop2.pack; // three packed 5-bit fields
s32 var4 = ((pack >> 10) & 0x1f) - ((pack >> 5) & 0x1f);
if (var4 < 0)
@ -1632,18 +1620,18 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
var12 = (var12 + 1) << 10;
}
new_.m_hs[0] = (pack & 0x83ff) | var12;
var6 = (u16)queue->m_hs[17 + 2 * var11];
pop2.pack = (pack & 0x83ff) | var12;
var6 = (u16)queue->m_hs2[var11];
}
pop.m_h1 = (u16)var3;
pop.m_h2 = (u16)var5;
pop3.m_h1 = (u16)var3;
pop3.m_h2 = (u16)var5;
if (InterlockedCompareExchange(&queue->m_pop2(), new_.m_pop2(), old_data) == old_data)
if (queue->pop2.compare_and_swap_test(old, pop2))
{
if (var6 != -1)
{
bool exch = queue->pop3.compare_and_swap_test(old2, pop);
bool exch = queue->pop3.compare_and_swap_test(old2, pop3);
assert(exch);
if (exch)
{
@ -1653,10 +1641,10 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
}
else
{
pack = queue->m_hs[16];
pack = queue->pop2.read_relaxed().pack;
if ((pack & 0x1f) == ((pack >> 10) & 0x1f))
{
if (queue->pop3.compare_and_swap_test(old2, pop))
if (queue->pop3.compare_and_swap_test(old2, pop3))
{
return CELL_OK;
}
@ -1664,8 +1652,6 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
}
}
}
assert(0);
}
s32 _cellSyncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, vm::ptr<s32(*)(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull)
@ -1793,7 +1779,6 @@ s32 cellSyncLFQueueClear(vm::ptr<CellSyncLFQueue> queue)
return CELL_SYNC_ERROR_ALIGN;
}
// TODO: optimize if possible
while (true)
{
const auto old = queue->pop1.read_sync();
@ -1804,7 +1789,7 @@ s32 cellSyncLFQueueClear(vm::ptr<CellSyncLFQueue> queue)
s32 var1, var2;
if (queue->m_direction.ToBE() != se32(CELL_SYNC_QUEUE_ANY2ANY))
{
var1 = var2 = (u16)queue->m_hs[16];
var1 = var2 = (u16)queue->pop2.read_relaxed().pack;
}
else
{
@ -1844,7 +1829,6 @@ s32 cellSyncLFQueueSize(vm::ptr<CellSyncLFQueue> queue, vm::ptr<be_t<u32>> size)
return CELL_SYNC_ERROR_ALIGN;
}
// TODO: optimize if possible
while (true)
{
const auto old = queue->pop3.read_sync();
@ -1865,8 +1849,6 @@ s32 cellSyncLFQueueSize(vm::ptr<CellSyncLFQueue> queue, vm::ptr<be_t<u32>> size)
return CELL_OK;
}
}
assert(0);
}
s32 cellSyncLFQueueDepth(vm::ptr<CellSyncLFQueue> queue, vm::ptr<be_t<u32>> depth)
@ -1937,28 +1919,36 @@ s32 cellSyncLFQueueGetEntrySize(vm::ptr<const CellSyncLFQueue> queue, vm::ptr<be
return CELL_OK;
}
s32 syncLFQueueAttachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
s32 syncLFQueueAttachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
{
// TODO
assert(0);
#ifdef PRX_DEBUG
return cb_call<s32, vm::ptr<u32>, u32, vm::ptr<CellSyncLFQueue>>(GetCurrentPPUThread(), libsre + 0x19A8, libsre_rtoc,
spus, num, queue);
#else
assert(!syncLFQueueAttachLv2EventQueue);
return CELL_OK;
#endif
}
s32 _cellSyncLFQueueAttachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
s32 _cellSyncLFQueueAttachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
{
cellSync->Todo("_cellSyncLFQueueAttachLv2EventQueue(spus_addr=0x%x, num=%d, queue_addr=0x%x)", spus.addr(), num, queue.addr());
return syncLFQueueAttachLv2EventQueue(spus, num, queue);
}
s32 syncLFQueueDetachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
s32 syncLFQueueDetachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
{
// TODO
assert(0);
#ifdef PRX_DEBUG
return cb_call<s32, vm::ptr<u32>, u32, vm::ptr<CellSyncLFQueue>>(GetCurrentPPUThread(), libsre + 0x1DA0, libsre_rtoc,
spus, num, queue);
#else
assert(!syncLFQueueDetachLv2EventQueue);
return CELL_OK;
#endif
}
s32 _cellSyncLFQueueDetachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
s32 _cellSyncLFQueueDetachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue)
{
cellSync->Todo("_cellSyncLFQueueDetachLv2EventQueue(spus_addr=0x%x, num=%d, queue_addr=0x%x)", spus.addr(), num, queue.addr());

View File

@ -105,6 +105,11 @@ struct CellSyncLFQueue
be_t<u16> m_h4;
};
struct pop2_t
{
be_t<u16> pack;
};
struct pop3_t
{
be_t<u16> m_h1;
@ -119,6 +124,11 @@ struct CellSyncLFQueue
be_t<u16> m_h8;
};
struct push2_t
{
be_t<u16> pack;
};
struct push3_t
{
be_t<u16> m_h5;
@ -127,36 +137,28 @@ struct CellSyncLFQueue
union
{
vm::atomic<pop1_t> pop1;
vm::atomic<pop1_t> pop1; // 0x0
vm::atomic<pop3_t> pop3;
};
union
{
vm::atomic<push1_t> push1;
vm::atomic<push1_t> push1; // 0x8
vm::atomic<push3_t> push3;
};
be_t<u32> m_size; // 0x10
be_t<u32> m_depth; // 0x14
be_t<u32> m_size; // 0x10
be_t<u32> m_depth; // 0x14
vm::bptr<u8, 1, u64> m_buffer; // 0x18
u8 m_bs[4]; // 0x20
u8 m_bs[4]; // 0x20
be_t<CellSyncQueueDirection> m_direction; // 0x24
be_t<u32> m_v1; // 0x28
vm::atomic<u32> init; // 0x2C
be_t<u16> m_hs[32]; // 0x30
be_t<u32> m_v1; // 0x28
vm::atomic<u32> init; // 0x2C
vm::atomic<push2_t> push2; // 0x30
be_t<u16> m_hs1[15]; // 0x32
vm::atomic<pop2_t> pop2; // 0x50
be_t<u16> m_hs2[15]; // 0x52
vm::bptr<void, 1, u64> m_eaSignal; // 0x70
be_t<u32> m_v2; // 0x78
be_t<u32> m_v3; // 0x7C
volatile u32& m_push2()
{
return *reinterpret_cast<u32*>((u8*)this + 0x30);
}
volatile u32& m_pop2()
{
return *reinterpret_cast<u32*>((u8*)this + 0x50);
}
be_t<u32> m_v2; // 0x78
be_t<u32> m_eq_id; // 0x7C
};
static_assert(sizeof(CellSyncLFQueue) == 128, "CellSyncLFQueue: wrong size");
@ -178,5 +180,5 @@ s32 syncLFQueueGetPopPointer(vm::ptr<CellSyncLFQueue> queue, s32& pointer, u32 i
s32 syncLFQueueGetPopPointer2(vm::ptr<CellSyncLFQueue> queue, s32& pointer, u32 isBlocking, u32 useEventQueue);
s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, const std::function<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull);
s32 syncLFQueueCompletePopPointer2(vm::ptr<CellSyncLFQueue> queue, s32 pointer, const std::function<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull);
s32 syncLFQueueAttachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
s32 syncLFQueueDetachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
s32 syncLFQueueAttachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
s32 syncLFQueueDetachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);

View File

@ -57,14 +57,14 @@ namespace detail
template<typename T, int g_count, int f_count, int v_count>
struct bind_arg<T, ARG_STACK, g_count, f_count, v_count>
{
static_assert(f_count <= 12, "TODO: Unsupported stack argument type (float)");
static_assert(f_count <= 13, "TODO: Unsupported stack argument type (float)");
static_assert(v_count <= 12, "TODO: Unsupported stack argument type (vector)");
static_assert(sizeof(T) <= 8, "Invalid function argument type for ARG_STACK");
static __forceinline T func(PPUThread& CPU)
{
// TODO: check stack argument displacement
const u64 res = CPU.GetStackArg(8 + std::max(g_count - 8, 0) + std::max(f_count - 12, 0) + std::max(v_count - 12, 0));
const u64 res = CPU.GetStackArg(8 + std::max(g_count - 8, 0) + std::max(f_count - 13, 0) + std::max(v_count - 12, 0));
return (T&)res;
}
};
@ -144,7 +144,7 @@ namespace detail
const bool is_float = std::is_floating_point<T>::value;
const bool is_vector = std::is_same<T, u128>::value;
const bind_arg_type t = is_float
? ((f_count >= 12) ? ARG_STACK : ARG_FLOAT)
? ((f_count >= 13) ? ARG_STACK : ARG_FLOAT)
: (is_vector ? ((v_count >= 12) ? ARG_STACK : ARG_VECTOR) : ((g_count >= 8) ? ARG_STACK : ARG_GENERAL));
const int g = g_count + (is_float || is_vector ? 0 : 1);
const int f = f_count + (is_float ? 1 : 0);

View File

@ -151,10 +151,11 @@ s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_arra
return CELL_OK;
}
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> event, u64 timeout)
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event, u64 timeout)
{
sys_event.Log("sys_event_queue_receive(equeue_id=%d, event_addr=0x%x, timeout=%lld)",
equeue_id, event.addr(), timeout);
// dummy_event argument is ignored, data returned in registers
sys_event.Log("sys_event_queue_receive(equeue_id=%d, dummy_event_addr=0x%x, timeout=%lld)",
equeue_id, dummy_event.addr(), timeout);
EventQueue* eq;
if (!Emu.GetIdManager().GetIDData(equeue_id, eq))
@ -193,19 +194,20 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> event, u64 ti
}
}
case SMR_SIGNAL:
{
eq->events.pop(*event);
eq->owner.unlock(tid);
sys_event.Log(" *** event received: source=0x%llx, d1=0x%llx, d2=0x%llx, d3=0x%llx",
(u64)event->source, (u64)event->data1, (u64)event->data2, (u64)event->data3);
/* passing event data in registers */
PPUThread& t = GetCurrentPPUThread();
t.GPR[4] = event->source;
t.GPR[5] = event->data1;
t.GPR[6] = event->data2;
t.GPR[7] = event->data3;
return CELL_OK;
}
{
sys_event_data event;
eq->events.pop(event);
eq->owner.unlock(tid);
sys_event.Log(" *** event received: source=0x%llx, d1=0x%llx, d2=0x%llx, d3=0x%llx",
(u64)event.source, (u64)event.data1, (u64)event.data2, (u64)event.data3);
/* passing event data in registers */
PPUThread& t = GetCurrentPPUThread();
t.GPR[4] = event.source;
t.GPR[5] = event.data1;
t.GPR[6] = event.data2;
t.GPR[7] = event.data3;
return CELL_OK;
}
case SMR_FAILED: break;
default: eq->sq.invalidate(tid); return CELL_ECANCELED;
}

View File

@ -223,7 +223,7 @@ u32 event_queue_create(u32 protocol, s32 type, u64 name_u64, u64 event_queue_key
// SysCalls
s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, s32 size);
s32 sys_event_queue_destroy(u32 equeue_id, s32 mode);
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> event, u64 timeout);
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event, u64 timeout);
s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_array, s32 size, vm::ptr<be_t<u32>> number);
s32 sys_event_queue_drain(u32 event_queue_id);