Reservation implemented

Some SPU bugs fixed;
sys_spu_thread_get_exit_status
sys_spu_thread_group_destroy
This commit is contained in:
Nekotekina 2014-02-07 02:55:48 +04:00
parent cfe154d916
commit 6c606be09f
16 changed files with 272 additions and 148 deletions

View File

@ -118,11 +118,11 @@ public:
template<typename T, T (get_tid)()>
class SMutexLockerBase
{
typedef SMutexBase<T> T_SMutex;
T_SMutex& sm;
SMutexBase<T>& sm;
public:
const T tid;
SMutexLockerBase(T_SMutex& _sm)
SMutexLockerBase(SMutexBase<T>& _sm)
: sm(_sm)
, tid(get_tid())
{

View File

@ -1,6 +1,8 @@
#include "stdafx.h"
#include "CPUThread.h"
reservation_struct reservation;
CPUThread* GetCurrentCPUThread()
{
return (CPUThread*)GetCurrentNamedThread();

View File

@ -1,6 +1,48 @@
#pragma once
#include "Emu/Memory/MemoryBlock.h"
#include "Emu/CPU/CPUDecoder.h"
#include "Utilities/SMutex.h"
struct reservation_struct
{
SMutex mutex; // mutex for updating reservation_owner and data
u32 owner; // id of thread that got reservation
u32 addr;
u32 size;
union
{
u32 data32[32];
u64 data64[16];
};
__forceinline void clear()
{
owner = 0;
}
__forceinline bool compare128(u8* pointer)
{
return
data64[0] == *(u64*)(pointer + 0) &&
data64[1] == *(u64*)(pointer + 8) &&
data64[2] == *(u64*)(pointer + 16) &&
data64[3] == *(u64*)(pointer + 24) &&
data64[4] == *(u64*)(pointer + 32) &&
data64[5] == *(u64*)(pointer + 40) &&
data64[6] == *(u64*)(pointer + 48) &&
data64[7] == *(u64*)(pointer + 56) &&
data64[8] == *(u64*)(pointer + 64) &&
data64[9] == *(u64*)(pointer + 72) &&
data64[10] == *(u64*)(pointer + 80) &&
data64[11] == *(u64*)(pointer + 88) &&
data64[12] == *(u64*)(pointer + 96) &&
data64[13] == *(u64*)(pointer + 104) &&
data64[14] == *(u64*)(pointer + 112) &&
data64[15] == *(u64*)(pointer + 120);
}
};
extern reservation_struct reservation;
enum CPUThreadType
{

View File

@ -2362,9 +2362,12 @@ private:
void LWARX(u32 rd, u32 ra, u32 rb)
{
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
CPU.reserve_addr = addr;
CPU.reserve = true;
CPU.GPR[rd] = Memory.Read32(addr);
SMutexLocker lock(reservation.mutex);
reservation.owner = lock.tid;
reservation.addr = addr;
reservation.size = 4;
reservation.data32[0] = CPU.GPR[rd] = Memory.Read32(addr);
}
void LDX(u32 rd, u32 ra, u32 rb)
{
@ -2535,9 +2538,12 @@ private:
void LDARX(u32 rd, u32 ra, u32 rb)
{
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
CPU.reserve_addr = addr;
CPU.reserve = true;
CPU.GPR[rd] = Memory.Read64(addr);
SMutexLocker lock(reservation.mutex);
reservation.owner = lock.tid;
reservation.addr = addr;
reservation.size = 8;
reservation.data64[0] = CPU.GPR[rd] = Memory.Read64(addr);
}
void DCBF(u32 ra, u32 rb)
{
@ -2650,25 +2656,19 @@ private:
}
void STWCX_(u32 rs, u32 ra, u32 rb)
{
CPU.SetCR(0, CPU.XER.SO ? CR_SO : 0);
if(CPU.reserve)
{
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
if(addr == CPU.reserve_addr)
{
Memory.Write32(addr, CPU.GPR[rs]);
CPU.SetCR_EQ(0, true);
CPU.reserve = false;
}
else
{
static const bool u = 0;
if(u) Memory.Write32(addr, CPU.GPR[rs]);
CPU.SetCR_EQ(0, u);
CPU.reserve = false;
}
SMutexLocker lock(reservation.mutex);
if (lock.tid == reservation.owner && reservation.addr == addr && reservation.size == 4 && reservation.data32[0] == Memory.Read32(addr))
{
Memory.Write32(addr, CPU.GPR[rs]);
CPU.SetCR_EQ(0, true);
reservation.clear();
}
else
{
CPU.SetCR_EQ(0, false);
if (lock.tid == reservation.owner) reservation.clear();
}
}
void STWX(u32 rs, u32 ra, u32 rb)
@ -2709,23 +2709,19 @@ private:
}
void STDCX_(u32 rs, u32 ra, u32 rb)
{
CPU.SetCR(0, CPU.XER.SO ? CR_SO : 0);
if(!CPU.reserve)
{
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
const u64 addr = ra ? CPU.GPR[ra] + CPU.GPR[rb] : CPU.GPR[rb];
if(addr == CPU.reserve_addr)
{
Memory.Write64(addr, CPU.GPR[rs]);
CPU.SetCR_EQ(0, true);
}
else
{
static const bool u = 0;
if(u) Memory.Write64(addr, CPU.GPR[rs]);
CPU.SetCR_EQ(0, u);
CPU.reserve = false;
}
SMutexLocker lock(reservation.mutex);
if (lock.tid == reservation.owner && reservation.addr == addr && reservation.size == 8 && reservation.data64[0] == Memory.Read64(addr))
{
Memory.Write64(addr, CPU.GPR[rs]);
CPU.SetCR_EQ(0, true);
reservation.clear();
}
else
{
CPU.SetCR_EQ(0, false);
if (lock.tid == reservation.owner) reservation.clear();
}
}
void STBX(u32 rs, u32 ra, u32 rb)

View File

@ -44,9 +44,6 @@ void PPUThread::DoReset()
VSCR.VSCR = 0;
cycle = 0;
reserve = false;
reserve_addr = 0;
}
void PPUThread::AddArgv(const wxString& arg)

View File

@ -602,9 +602,6 @@ public:
};
};
u64 reserve_addr;
bool reserve;
u64 cycle;
public:

View File

@ -32,15 +32,8 @@ private:
//0 - 10
void STOP(u32 code)
{
if(code & 0x2000)
{
CPU.SetExitStatus(code & 0xfff);
}
else
{
ConLog.Warning("STOP: 0x%x", code);
//Emu.Pause();
}
ConLog.Warning("STOP: 0x%x (exit_status -> 0)", code);
CPU.SetExitStatus(0);
CPU.Stop();
}
void LNOP()

View File

@ -4,8 +4,6 @@
#include "MFC.h"
#include <mutex>
extern std::mutex g_SyncMutex; //can provide compatability for CellSyncMutex through SPU<>PPU and SPU<>SPU
static const char* spu_reg_name[128] =
{
"$LR", "$SP", "$2", "$3", "$4", "$5", "$6", "$7",
@ -516,11 +514,11 @@ public:
case MFC_PUT_CMD:
case MFC_GET_CMD:
{
if (enable_log) ConLog.Write("DMA %s%s%s: lsa = 0x%x, ea = 0x%llx, tag = 0x%x, size = 0x%x, cmd = 0x%x",
/* if (enable_log) ConLog.Write("DMA %s%s%s: lsa = 0x%x, ea = 0x%llx, tag = 0x%x, size = 0x%x, cmd = 0x%x",
op & MFC_PUT_CMD ? "PUT" : "GET",
op & MFC_BARRIER_MASK ? "B" : "",
op & MFC_FENCE_MASK ? "F" : "",
lsa, ea, tag, size, cmd);
lsa, ea, tag, size, cmd); */
MFCArgs.CMDStatus.SetValue(dmac.Cmd(cmd, tag, lsa, ea, size));
}
break;
@ -528,22 +526,56 @@ public:
case MFC_GETLLAR_CMD:
case MFC_PUTLLC_CMD:
case MFC_PUTLLUC_CMD:
case MFC_PUTQLLUC_CMD:
{
if (op == MFC_GETLLAR_CMD)
/* if (enable_log) ConLog.Write("DMA %s: lsa=0x%x, ea = 0x%llx, (tag) = 0x%x, (size) = 0x%x, cmd = 0x%x",
op == MFC_GETLLAR_CMD ? "GETLLAR" : op == MFC_PUTLLC_CMD ? "PUTLLC" : op == MFC_PUTLLUC_CMD ? "PUTLLUC" : "PUTQLLUC",
lsa, ea, tag, size, cmd); */
if (op == MFC_GETLLAR_CMD) // get reservation
{
g_SyncMutex.lock();
SMutexLocker lock(reservation.mutex);
reservation.owner = lock.tid;
reservation.addr = ea;
reservation.size = 128;
dmac.ProcessCmd(MFC_GET_CMD, tag, lsa, ea, 128);
memcpy(&reservation.data64[0], Memory + ea, 128);
Prxy.AtomicStat.PushUncond(MFC_GETLLAR_SUCCESS);
}
ConLog.Warning("DMA %s: lsa=0x%x, ea = 0x%llx, (tag) = 0x%x, (size) = 0x%x, cmd = 0x%x",
op == MFC_GETLLAR_CMD ? "GETLLAR" : op == MFC_PUTLLC_CMD ? "PUTLLC" : "PUTLLUC",
lsa, ea, tag, size, cmd);
dmac.ProcessCmd(op == MFC_GETLLAR_CMD ? MFC_GET_CMD : MFC_PUT_CMD, tag, lsa, ea, 128);
Prxy.AtomicStat.PushUncond(op == MFC_GETLLAR_CMD ? MFC_GETLLAR_SUCCESS : op == MFC_PUTLLC_CMD ? MFC_PUTLLC_SUCCESS : MFC_PUTLLUC_SUCCESS);
if (op == MFC_PUTLLC_CMD || op == MFC_PUTLLUC_CMD)
else if (op == MFC_PUTLLC_CMD) // store conditional
{
g_SyncMutex.unlock();
SMutexLocker lock(reservation.mutex);
if (reservation.owner == lock.tid) // succeeded
{
if (reservation.addr == ea && reservation.size == 128 && reservation.compare128(Memory + ea))
{
dmac.ProcessCmd(MFC_PUT_CMD, tag, lsa, ea, 128);
Prxy.AtomicStat.PushUncond(MFC_PUTLLC_SUCCESS);
}
else
{
Prxy.AtomicStat.PushUncond(MFC_PUTLLC_FAILURE);
}
reservation.clear();
}
else // failed
{
Prxy.AtomicStat.PushUncond(MFC_PUTLLC_FAILURE);
}
}
else // store unconditional
{
SMutexLocker lock(reservation.mutex);
dmac.ProcessCmd(MFC_PUT_CMD, tag, lsa, ea, 128);
if (op == MFC_PUTLLUC_CMD)
{
Prxy.AtomicStat.PushUncond(MFC_PUTLLUC_SUCCESS);
}
if ((reservation.addr + reservation.size > ea && reservation.addr <= ea + size) ||
(ea + size > reservation.addr && ea <= reservation.addr + reservation.size))
{
reservation.clear();
}
}
}
break;

View File

@ -749,7 +749,7 @@ int32_t cellGcmUnmapEaIoAddress(u64 ea)
ea = ea >> 20;
io = Memory.Read16(offsetTable.io + (ea*sizeof(u16)));
for(int i=0; i<size; i++)
for(u32 i=0; i<size; i++)
{
Memory.Write16(offsetTable.io + ((ea+i)*sizeof(u16)), 0xFFFF);
Memory.Write16(offsetTable.ea + ((io+i)*sizeof(u16)), 0xFFFF);
@ -772,7 +772,7 @@ int32_t cellGcmUnmapIoAddress(u64 io)
io = io >> 20;
ea = Memory.Read16(offsetTable.ea + (io*sizeof(u16)));
for(int i=0; i<size; i++)
for(u32 i=0; i<size; i++)
{
Memory.Write16(offsetTable.io + ((ea+i)*sizeof(u16)), 0xFFFF);
Memory.Write16(offsetTable.ea + ((io+i)*sizeof(u16)), 0xFFFF);

View File

@ -4,9 +4,7 @@
#include <mutex>
void cellSync_init();
void cellSync_unload();
Module cellSync("cellSync", cellSync_init, nullptr, cellSync_unload);
std::mutex g_SyncMutex;
Module cellSync("cellSync", cellSync_init);
// Return Codes
enum
@ -56,8 +54,13 @@ int cellSyncMutexInitialize(mem_ptr_t<CellSyncMutex> mutex)
return CELL_SYNC_ERROR_ALIGN;
}
{ // global mutex
std::lock_guard<std::mutex> lock(g_SyncMutex); //???
{
SMutexLocker lock(reservation.mutex);
if ((reservation.addr + reservation.size > mutex.GetAddr() && reservation.addr <= mutex.GetAddr() + 4) ||
(mutex.GetAddr() + 4 > reservation.addr && mutex.GetAddr() <= reservation.addr + reservation.size))
{
reservation.clear();
}
mutex->m_data = 0;
return CELL_OK;
}
@ -77,8 +80,13 @@ int cellSyncMutexLock(mem_ptr_t<CellSyncMutex> mutex)
}
be_t<u16> old_order;
{ // global mutex
std::lock_guard<std::mutex> lock(g_SyncMutex);
{
SMutexLocker lock(reservation.mutex);
if ((reservation.addr + reservation.size > mutex.GetAddr() && reservation.addr <= mutex.GetAddr() + 4) ||
(mutex.GetAddr() + 4 > reservation.addr && mutex.GetAddr() <= reservation.addr + reservation.size))
{
reservation.clear();
}
old_order = mutex->m_order;
mutex->m_order = mutex->m_order + 1;
if (old_order == mutex->m_freed)
@ -98,7 +106,6 @@ int cellSyncMutexLock(mem_ptr_t<CellSyncMutex> mutex)
mutex.GetAddr(), (u16)old_order, (u16)mutex->m_order, (u16)mutex->m_freed);
}
}
//while (_InterlockedExchange((volatile long*)&mutex->m_data, 1)) Sleep(1);
_mm_mfence();
return CELL_OK;
}
@ -115,8 +122,13 @@ int cellSyncMutexTryLock(mem_ptr_t<CellSyncMutex> mutex)
{
return CELL_SYNC_ERROR_ALIGN;
}
{ /* global mutex */
std::lock_guard<std::mutex> lock(g_SyncMutex);
{
SMutexLocker lock(reservation.mutex);
if ((reservation.addr + reservation.size > mutex.GetAddr() && reservation.addr <= mutex.GetAddr() + 4) ||
(mutex.GetAddr() + 4 > reservation.addr && mutex.GetAddr() <= reservation.addr + reservation.size))
{
reservation.clear();
}
if (mutex->m_order != mutex->m_freed)
{
return CELL_SYNC_ERROR_BUSY;
@ -140,7 +152,12 @@ int cellSyncMutexUnlock(mem_ptr_t<CellSyncMutex> mutex)
}
{ /* global mutex */
std::lock_guard<std::mutex> lock(g_SyncMutex);
SMutexLocker lock(reservation.mutex);
if ((reservation.addr + reservation.size > mutex.GetAddr() && reservation.addr <= mutex.GetAddr() + 4) ||
(mutex.GetAddr() + 4 > reservation.addr && mutex.GetAddr() <= reservation.addr + reservation.size))
{
reservation.clear();
}
mutex->m_freed = mutex->m_freed + 1;
return CELL_OK;
}
@ -152,9 +169,4 @@ void cellSync_init()
cellSync.AddFunc(0x1bb675c2, cellSyncMutexLock);
cellSync.AddFunc(0xd06918c4, cellSyncMutexTryLock);
cellSync.AddFunc(0x91f2b7b0, cellSyncMutexUnlock);
}
void cellSync_unload()
{
g_SyncMutex.unlock();
}

View File

@ -94,8 +94,8 @@ static func_caller* sc_table[1024] =
null_func, null_func, null_func, null_func, null_func, //154
null_func, bind_func(sys_spu_image_open), null_func, null_func, null_func, //159
bind_func(sys_raw_spu_create), null_func, null_func, null_func, null_func, //164
null_func, null_func, null_func, null_func, bind_func(sys_spu_initialize), //169
bind_func(sys_spu_thread_group_create), bind_func(sys_spu_thread_set_argument), bind_func(sys_spu_thread_initialize), //172
bind_func(sys_spu_thread_get_exit_status), bind_func(sys_spu_thread_set_argument), null_func, null_func, bind_func(sys_spu_initialize), //169
bind_func(sys_spu_thread_group_create), bind_func(sys_spu_thread_group_destroy), bind_func(sys_spu_thread_initialize), //172
bind_func(sys_spu_thread_group_start), bind_func(sys_spu_thread_group_suspend), //174
null_func, null_func, null_func, bind_func(sys_spu_thread_group_join), null_func, //179
null_func, bind_func(sys_spu_thread_write_ls), bind_func(sys_spu_thread_read_ls), null_func, bind_func(sys_spu_thread_write_snr), //184

View File

@ -192,7 +192,6 @@ extern int sys_ppu_thread_restart(u32 thread_id);
extern int sys_ppu_thread_create(u32 thread_id_addr, u32 entry, u64 arg, int prio, u32 stacksize, u64 flags, u32 threadname_addr);
extern void sys_ppu_thread_once(u32 once_ctrl_addr, u32 entry);
extern int sys_ppu_thread_get_id(const u32 id_addr);
extern int sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, u32 spup_addr);
//memory
extern int sys_memory_container_create(u32 cid_addr, u32 yield_size);
@ -300,12 +299,14 @@ extern int _sys_heap_memalign(u32 heap_id, u32 align, u32 size, u64 p4);
extern int sys_spu_image_open(mem_ptr_t<sys_spu_image> img, u32 path_addr);
extern int sys_spu_thread_initialize(mem32_t thread, u32 group, u32 spu_num, mem_ptr_t<sys_spu_image> img, mem_ptr_t<sys_spu_thread_attribute> attr, mem_ptr_t<sys_spu_thread_argument> arg);
extern int sys_spu_thread_set_argument(u32 id, mem_ptr_t<sys_spu_thread_argument> arg);
extern int sys_spu_thread_group_destroy(u32 id);
extern int sys_spu_thread_group_start(u32 id);
extern int sys_spu_thread_group_suspend(u32 id);
extern int sys_spu_thread_group_create(mem32_t id, u32 num, int prio, mem_ptr_t<sys_spu_thread_group_attribute> attr);
extern int sys_spu_thread_create(mem32_t thread_id, mem32_t entry, u64 arg, int prio, u32 stacksize, u64 flags, u32 threadname_addr);
extern int sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup);
extern int sys_spu_thread_group_join(u32 id, mem32_t cause, mem32_t status);
extern int sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, u32 spup_addr);
extern int sys_raw_spu_create(mem32_t id, u32 attr_addr);
extern int sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu);
extern int sys_spu_thread_write_ls(u32 id, u32 address, u64 value, u32 type);
@ -315,6 +316,7 @@ extern int sys_spu_thread_set_spu_cfg(u32 id, u64 value);
extern int sys_spu_thread_get_spu_cfg(u32 id, mem64_t value);
extern int sys_spu_thread_write_snr(u32 id, u32 number, u32 value);
extern int sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num);
extern int sys_spu_thread_get_exit_status(u32 id, mem32_t status);
//sys_time
extern int sys_time_get_timezone(mem32_t timezone, mem32_t summertime);

View File

@ -1,7 +1,6 @@
#include "stdafx.h"
#include "Emu/SysCalls/SysCalls.h"
#include "Emu/SysCalls/lv2/SC_Lwmutex.h"
#include "Utilities/SMutex.h"
#include <mutex>
SysCallBase sc_lwmutex("sys_lwmutex");
@ -13,19 +12,19 @@ int sys_lwmutex_create(mem_ptr_t<sys_lwmutex_t> lwmutex, mem_ptr_t<sys_lwmutex_a
if (!lwmutex.IsGood() || !attr.IsGood()) return CELL_EFAULT;
switch ((u32)attr->attr_recursive)
switch (attr->attr_recursive.ToBE())
{
case SYS_SYNC_RECURSIVE: break;
case SYS_SYNC_NOT_RECURSIVE: break;
case se32(SYS_SYNC_RECURSIVE): break;
case se32(SYS_SYNC_NOT_RECURSIVE): break;
default: return CELL_EINVAL;
}
switch ((u32)attr->attr_protocol)
switch (attr->attr_protocol.ToBE())
{
case SYS_SYNC_PRIORITY: sc_lwmutex.Warning("TODO: SYS_SYNC_PRIORITY attr"); break;
case SYS_SYNC_RETRY: sc_lwmutex.Warning("TODO: SYS_SYNC_RETRY attr"); break;
case SYS_SYNC_PRIORITY_INHERIT: sc_lwmutex.Warning("TODO: SYS_SYNC_PRIORITY_INHERIT attr"); break;
case SYS_SYNC_FIFO: sc_lwmutex.Warning("TODO: SYS_SYNC_FIFO attr"); break;
case se32(SYS_SYNC_PRIORITY): sc_lwmutex.Warning("TODO: SYS_SYNC_PRIORITY attr"); break;
case se32(SYS_SYNC_RETRY): break;
case se32(SYS_SYNC_PRIORITY_INHERIT): sc_lwmutex.Error("Invalid SYS_SYNC_PRIORITY_INHERIT attr"); break;
case se32(SYS_SYNC_FIFO): sc_lwmutex.Warning("TODO: SYS_SYNC_FIFO attr"); break;
default: return CELL_EINVAL;
}
@ -46,12 +45,12 @@ int sys_lwmutex_destroy(mem_ptr_t<sys_lwmutex_t> lwmutex)
if (!lwmutex.IsGood()) return CELL_EFAULT;
if (!lwmutex->attribute) return CELL_EINVAL;
// try to make it unable to lock
if (lwmutex->owner.trylock(~0) != SMR_OK) return CELL_EBUSY;
lwmutex->attribute = 0;
return CELL_OK;
switch (int res = lwmutex->trylock(~0))
{
case CELL_OK: lwmutex->attribute = 0;
default: return res;
}
}
int sys_lwmutex_lock(mem_ptr_t<sys_lwmutex_t> lwmutex, u64 timeout)
@ -60,8 +59,6 @@ int sys_lwmutex_lock(mem_ptr_t<sys_lwmutex_t> lwmutex, u64 timeout)
if (!lwmutex.IsGood()) return CELL_EFAULT;
if (!lwmutex->attribute) return CELL_EINVAL;
return lwmutex->lock(GetCurrentPPUThread().GetId(), timeout ? ((timeout < 1000) ? 1 : (timeout / 1000)) : 0);
}
@ -71,8 +68,6 @@ int sys_lwmutex_trylock(mem_ptr_t<sys_lwmutex_t> lwmutex)
if (!lwmutex.IsGood()) return CELL_EFAULT;
if (!lwmutex->attribute) return CELL_EINVAL;
return lwmutex->trylock(GetCurrentPPUThread().GetId());
}
@ -82,8 +77,6 @@ int sys_lwmutex_unlock(mem_ptr_t<sys_lwmutex_t> lwmutex)
if (!lwmutex.IsGood()) return CELL_EFAULT;
if (!lwmutex->unlock(GetCurrentPPUThread().GetId())) return CELL_EPERM;
return CELL_OK;
return lwmutex->unlock(GetCurrentPPUThread().GetId());
}

View File

@ -10,7 +10,7 @@ enum
SYS_SYNC_PRIORITY = 2,
// Basic Priority Inheritance Protocol
SYS_SYNC_PRIORITY_INHERIT = 3,
// ????
// Not selected while unlocking
SYS_SYNC_RETRY = 4,
//
SYS_SYNC_ATTR_PROTOCOL_MASK = 0xF,
@ -53,11 +53,13 @@ struct sys_lwmutex_t
be_t<u32> sleep_queue;
be_t<u32> pad;
int trylock(u32 tid)
int enter(u32 tid) // check and process (non-)recursive mutex
{
if (tid == (u32)owner.GetOwner()) // recursive or deadlock
if (!attribute) return CELL_EINVAL;
if (tid == (u32)owner.GetOwner())
{
if (attribute & se32(SYS_SYNC_RECURSIVE))
if (attribute.ToBE() & se32(SYS_SYNC_RECURSIVE))
{
recursive_count += 1;
if (!recursive_count) return CELL_EKRESOURCE;
@ -68,18 +70,28 @@ struct sys_lwmutex_t
return CELL_EDEADLK;
}
}
return CELL_EBUSY;
}
int trylock(u32 tid)
{
switch (int res = enter(tid))
{
case CELL_EBUSY: break;
default: return res;
}
switch (owner.trylock(tid))
{
case SMR_OK: recursive_count = 1; return CELL_OK;
default: return CELL_EBUSY;
case SMR_OK: recursive_count = 1; return CELL_OK;
default: return CELL_EBUSY;
}
}
bool unlock(u32 tid)
int unlock(u32 tid)
{
if (tid != (u32)owner.GetOwner())
{
return false;
return CELL_EPERM;
}
else
{
@ -88,23 +100,22 @@ struct sys_lwmutex_t
{
owner.unlock(tid);
}
return true;
return CELL_OK;
}
}
int lock(u32 tid, u64 timeout)
{
switch (int res = trylock(tid))
switch (int res = enter(tid))
{
case CELL_OK: return CELL_OK;
case CELL_EBUSY: break;
default: return res;
case CELL_EBUSY: break;
default: return res;
}
switch (owner.lock(tid, timeout))
{
case SMR_OK: recursive_count = 1; return CELL_OK;
case SMR_TIMEOUT: return CELL_ETIMEDOUT;
default: return CELL_EINVAL;
case SMR_OK: recursive_count = 1; return CELL_OK;
case SMR_TIMEOUT: return CELL_ETIMEDOUT;
default: return CELL_EINVAL;
}
}
};

View File

@ -10,7 +10,7 @@ int sys_rwlock_create(mem32_t rw_lock_id, mem_ptr_t<sys_rwlock_attribute_t> attr
if (!rw_lock_id.IsGood() || !attr.IsGood()) return CELL_EFAULT;
switch (attr->attr_protocol)
switch (attr->attr_protocol.ToBE())
{
case se32(SYS_SYNC_PRIORITY): sys_rwlock.Warning("TODO: SYS_SYNC_PRIORITY attr"); break;
case se32(SYS_SYNC_RETRY): sys_rwlock.Error("Invalid SYS_SYNC_RETRY attr"); break;
@ -19,7 +19,7 @@ int sys_rwlock_create(mem32_t rw_lock_id, mem_ptr_t<sys_rwlock_attribute_t> attr
default: return CELL_EINVAL;
}
if (attr->attr_pshared != se32(0x200))
if (attr->attr_pshared.ToBE() != se32(0x200))
{
sys_rwlock.Error("Invalid attr_pshared(0x%x)", (u32)attr->attr_pshared);
return CELL_EINVAL;

View File

@ -177,24 +177,69 @@ int sys_spu_thread_set_argument(u32 id, mem_ptr_t<sys_spu_thread_argument> arg)
return CELL_OK;
}
//173
int sys_spu_thread_group_start(u32 id)
//165
int sys_spu_thread_get_exit_status(u32 id, mem32_t status)
{
sc_spu.Warning("sys_spu_thread_group_start(id=0x%x)", id);
sc_spu.Warning("sys_spu_thread_get_exit_status(id=0x%x, status_addr=0x%x)", id, status.GetAddr());
if(!Emu.GetIdManager().CheckID(id))
if (!status.IsGood())
{
return CELL_EFAULT;
}
CPUThread* thr = Emu.GetCPU().GetThread(id);
if(!thr || (thr->GetType() != CPU_THREAD_SPU && thr->GetType() != CPU_THREAD_RAW_SPU))
{
return CELL_ESRCH;
}
if (!thr->IsStopped()) // (!!!) if SPU thread doesn't have exit status
{
return CELL_ESTAT;
}
status = thr->GetExitStatus();
return CELL_OK;
}
//171
int sys_spu_thread_group_destroy(u32 id)
{
sc_spu.Warning("sys_spu_thread_group_destroy(id=0x%x)", id);
SpuGroupInfo* group_info;
if(!Emu.GetIdManager().GetIDData(id, group_info))
{
return CELL_ESRCH;
}
//Emu.Pause();
for (int i = 0; i < group_info->list.GetCount(); i++)
if (group_info->lock) // ???
{
return CELL_EBUSY;
}
for (u32 i = 0; i < group_info->list.GetCount(); i++)
{
Emu.GetCPU().RemoveThread(group_info->list[i]);
}
Emu.GetIdManager().RemoveID(id);
return CELL_OK;
}
//173
int sys_spu_thread_group_start(u32 id)
{
sc_spu.Warning("sys_spu_thread_group_start(id=0x%x)", id);
SpuGroupInfo* group_info;
if(!Emu.GetIdManager().GetIDData(id, group_info))
{
return CELL_ESRCH;
}
for (u32 i = 0; i < group_info->list.GetCount(); i++)
{
CPUThread* t;
if (t = Emu.GetCPU().GetThread(group_info->list[i]))
@ -218,10 +263,9 @@ int sys_spu_thread_group_suspend(u32 id)
}
//Emu.Pause();
for (int i = 0; i < group_info->list.GetCount(); i++)
for (u32 i = 0; i < group_info->list.GetCount(); i++)
{
CPUThread* t;
if (t = Emu.GetCPU().GetThread(group_info->list[i]))
if (CPUThread* t = Emu.GetCPU().GetThread(group_info->list[i]))
{
t->Pause();
}
@ -273,14 +317,17 @@ int sys_spu_thread_group_join(u32 id, mem32_t cause, mem32_t status)
cause = SYS_SPU_THREAD_GROUP_JOIN_ALL_THREADS_EXIT;
status = 0; //unspecified because of ALL_THREADS_EXIT
for (int i = 0; i < group_info->list.GetCount(); i++)
for (u32 i = 0; i < group_info->list.GetCount(); i++)
{
while (Emu.GetCPU().GetThread(group_info->list[i]))
while (CPUThread* t = Emu.GetCPU().GetThread(group_info->list[i]))
{
Sleep(1);
if (!t->IsRunning())
{
break;
}
if (Emu.IsStopped()) return CELL_OK;
Sleep(1);
}
group_info->list[i] = 0;
}
group_info->lock = 0; // release lock
@ -514,7 +561,7 @@ int sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, u32
return CELL_ESRCH;
}
for(int i=0; i<group->list.GetCount(); ++i)
for(u32 i=0; i<group->list.GetCount(); ++i)
{
CPUThread* t;
if(t = Emu.GetCPU().GetThread(group->list[i]))