Memory alignment fix

Some mutexes added, _sys_heap_memalign implemented
This commit is contained in:
Nekotekina 2014-02-02 23:49:10 +04:00
parent 15dd0bb0f2
commit 7fce5589f8
16 changed files with 236 additions and 116 deletions

View File

@ -76,6 +76,8 @@ void CPUThreadManager::RemoveThread(const u32 id)
s32 CPUThreadManager::GetThreadNumById(CPUThreadType type, u32 id)
{
std::lock_guard<std::mutex> lock(m_mtx_thread);
s32 num = 0;
for(u32 i=0; i<m_threads.GetCount(); ++i)
@ -89,16 +91,17 @@ s32 CPUThreadManager::GetThreadNumById(CPUThreadType type, u32 id)
CPUThread* CPUThreadManager::GetThread(u32 id)
{
for(u32 i=0; i<m_threads.GetCount(); ++i)
{
if(m_threads[i].GetId() == id) return &m_threads[i];
}
CPUThread* res;
return nullptr;
Emu.GetIdManager().GetIDData(id, res);
return res;
}
void CPUThreadManager::Exec()
{
std::lock_guard<std::mutex> lock(m_mtx_thread);
for(u32 i=0; i<m_threads.GetCount(); ++i)
{
m_threads[i].Exec();

View File

@ -32,7 +32,7 @@ void PPCThread::InitStack()
{
if(m_stack_addr) return;
if(m_stack_size == 0) m_stack_size = 0x10000;
m_stack_addr = Memory.StackMem.Alloc(Memory.AlignAddr(m_stack_size, 0x100));
m_stack_addr = Memory.StackMem.AllocAlign(m_stack_size, 0x100);
m_stack_point = m_stack_addr + m_stack_size;
/*

View File

@ -68,14 +68,16 @@ void PPUThread::InitRegs()
SetPc(pc);
const s32 thread_num = Emu.GetCPU().GetThreadNumById(GetType(), GetId());
/*
const s32 thread_num = Emu.GetCPU().GetThread NumById(GetType(), GetId());
if(thread_num < 0)
{
ConLog.Error("GetThreadNumById failed.");
ConLog.Error("GetThread NumById failed.");
Emu.Pause();
return;
}
*/
/*
const s32 tls_size = Emu.GetTLSFilesz() * thread_num;
@ -120,7 +122,7 @@ void PPUThread::InitRegs()
GPR[6] = m_args[3];
}
u32 prx_mem = Memory.PRXMem.Alloc(0x10000);
u32 prx_mem = Memory.PRXMem.AllocAlign(0x10000);
Memory.Write64(prx_mem, 0xDEADBEEFABADCAFE);
GPR[0] = pc;

View File

@ -10,11 +10,13 @@ DynamicMemoryBlockBase<PT>::DynamicMemoryBlockBase()
template<typename PT>
const u32 DynamicMemoryBlockBase<PT>::GetUsedSize() const
{
std::lock_guard<std::mutex> lock(m_lock);
u32 size = 0;
for(u32 i=0; i<m_used_mem.GetCount(); ++i)
for(u32 i=0; i<m_allocated.GetCount(); ++i)
{
size += m_used_mem[i].size;
size += m_allocated[i].size;
}
return size;
@ -35,38 +37,51 @@ bool DynamicMemoryBlockBase<PT>::IsInMyRange(const u64 addr, const u32 size)
template<typename PT>
bool DynamicMemoryBlockBase<PT>::IsMyAddress(const u64 addr)
{
for(u32 i=0; i<m_used_mem.GetCount(); ++i)
{
if(addr >= m_used_mem[i].addr && addr < m_used_mem[i].addr + m_used_mem[i].size)
{
return true;
}
}
if (!IsInMyRange(addr)) return false;
return false;
const u32 index = MemoryBlock::FixAddr(addr) >> 12;
return m_pages[index] && !m_locked[index];
}
template<typename PT>
MemoryBlock* DynamicMemoryBlockBase<PT>::SetRange(const u64 start, const u32 size)
{
m_max_size = size;
std::lock_guard<std::mutex> lock(m_lock);
m_max_size = PAGE_4K(size);
MemoryBlock::SetRange(start, 0);
const u32 page_count = m_max_size >> 12;
m_pages.SetCount(page_count);
m_locked.SetCount(page_count);
memset(m_pages.GetPtr(), 0, sizeof(u8*) * page_count);
memset(m_locked.GetPtr(), 0, sizeof(u8*) * page_count);
return this;
}
template<typename PT>
void DynamicMemoryBlockBase<PT>::Delete()
{
m_used_mem.Clear();
std::lock_guard<std::mutex> lock(m_lock);
m_allocated.Clear();
m_max_size = 0;
m_pages.Clear();
m_locked.Clear();
MemoryBlock::Delete();
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Alloc(u64 addr, u32 size)
bool DynamicMemoryBlockBase<PT>::AllocFixed(u64 addr, u32 size)
{
size = PAGE_4K(size); // align size
addr &= ~4095; // align start address
if(!IsInMyRange(addr, size))
{
assert(0);
@ -78,43 +93,77 @@ bool DynamicMemoryBlockBase<PT>::Alloc(u64 addr, u32 size)
return false;
}
for(u32 i=0; i<m_used_mem.GetCount(); ++i)
std::lock_guard<std::mutex> lock(m_lock);
for(u32 i=0; i<m_allocated.GetCount(); ++i)
{
if(addr >= m_used_mem[i].addr && addr < m_used_mem[i].addr + m_used_mem[i].size) return false;
if(addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) return false;
}
AppendUsedMem(addr, size);
AppendMem(addr, size);
return true;
}
template<typename PT>
void DynamicMemoryBlockBase<PT>::AppendUsedMem(u64 addr, u32 size)
void DynamicMemoryBlockBase<PT>::AppendMem(u64 addr, u32 size) /* private */
{
m_used_mem.Move(new MemBlockInfo(addr, size));
u8* pointer = (u8*)m_allocated[m_allocated.Move(new MemBlockInfo(addr, size))].mem;
const u32 first = MemoryBlock::FixAddr(addr) >> 12;
const u32 last = first + ((size - 1) >> 12);
for (u32 i = first; i <= last; i++)
{
m_pages[i] = pointer;
m_locked[i] = nullptr;
pointer += 4096;
}
}
template<typename PT>
u64 DynamicMemoryBlockBase<PT>::Alloc(u32 size)
u64 DynamicMemoryBlockBase<PT>::AllocAlign(u32 size, u32 align)
{
for(u64 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - size;)
size = PAGE_4K(size);
u32 exsize;
if (align <= 4096)
{
align = 0;
exsize = size;
}
else
{
align &= ~4095;
exsize = size + align - 1;
}
std::lock_guard<std::mutex> lock(m_lock);
for(u64 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
{
bool is_good_addr = true;
for(u32 i=0; i<m_used_mem.GetCount(); ++i)
for(u32 i=0; i<m_allocated.GetCount(); ++i)
{
if((addr >= m_used_mem[i].addr && addr < m_used_mem[i].addr + m_used_mem[i].size) ||
(m_used_mem[i].addr >= addr && m_used_mem[i].addr < addr + size))
if((addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) ||
(m_allocated[i].addr >= addr && m_allocated[i].addr < addr + exsize))
{
is_good_addr = false;
addr = m_used_mem[i].addr + m_used_mem[i].size;
addr = m_allocated[i].addr + m_allocated[i].size;
break;
}
}
if(!is_good_addr) continue;
AppendUsedMem(addr, size);
if (align)
{
addr = (addr + (align - 1)) & ~(align - 1);
}
AppendMem(addr, size);
return addr;
}
@ -125,18 +174,38 @@ u64 DynamicMemoryBlockBase<PT>::Alloc(u32 size)
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Alloc()
{
return Alloc(GetSize() - GetUsedSize()) != 0;
return AllocAlign(GetSize() - GetUsedSize(), 0) != 0;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Free(u64 addr)
{
for(u32 i=0; i<m_used_mem.GetCount(); ++i)
std::lock_guard<std::mutex> lock(m_lock);
for (u32 num = 0; num < m_allocated.GetCount(); num++)
{
if(addr == m_used_mem[i].addr)
if (addr == m_allocated[num].addr)
{
if(IsLocked(m_used_mem[i].addr)) return false;
m_used_mem.RemoveAt(i);
/* if(IsLocked(m_allocated[num].addr)) return false; */
const u32 first = MemoryBlock::FixAddr(addr) >> 12;
const u32 last = first + ((m_allocated[num].size - 1) >> 12);
// check if locked:
for (u32 i = first; i <= last; i++)
{
if (!m_pages[i] || m_locked[i]) return false;
}
// clear pointers:
for (u32 i = first; i <= last; i++)
{
m_pages[i] = nullptr;
m_locked[i] = nullptr;
}
m_allocated.RemoveAt(num);
return true;
}
}
@ -145,15 +214,15 @@ bool DynamicMemoryBlockBase<PT>::Free(u64 addr)
}
template<typename PT>
u8* DynamicMemoryBlockBase<PT>::GetMem(u64 addr) const
u8* DynamicMemoryBlockBase<PT>::GetMem(u64 addr) const // lock-free, addr is fixed
{
for(u32 i=0; i<m_used_mem.GetCount(); ++i)
{
u64 _addr = MemoryBlock::FixAddr(m_used_mem[i].addr);
const u32 index = addr >> 12;
if(addr >= _addr && addr < _addr + m_used_mem[i].size)
if (index < m_pages.GetCount())
{
if (u8* res = m_pages[index])
{
return (u8*)m_used_mem[i].mem + addr - _addr;
return res + (addr & 4095);
}
}
@ -163,28 +232,28 @@ u8* DynamicMemoryBlockBase<PT>::GetMem(u64 addr) const
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::IsLocked(const u64 addr)
bool DynamicMemoryBlockBase<PT>::IsLocked(u64 addr) // lock-free
{
for(u32 i=0; i<m_locked_mem.GetCount(); ++i)
if (IsInMyRange(addr))
{
if(addr == m_locked_mem[i].addr)
const u32 index = MemoryBlock::FixAddr(addr) >> 12;
if (index < m_locked.GetCount())
{
return true;
if (m_locked[index]) return true;
}
}
return false;
}
template<typename PT>
void DynamicMemoryBlockBase<PT>::AppendLockedMem(u64 addr, u32 size)
{
m_locked_mem.Move(new MemBlockInfo(addr, size));
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Lock(u64 addr, u32 size)
{
size = PAGE_4K(size); // align size
addr &= ~4095; // align start address
if(!IsInMyRange(addr, size))
{
assert(0);
@ -196,33 +265,58 @@ bool DynamicMemoryBlockBase<PT>::Lock(u64 addr, u32 size)
return false;
}
AppendLockedMem(addr, size);
const u32 first = MemoryBlock::FixAddr(addr) >> 12;
const u32 last = first + ((size - 1) >> 12);
for (u32 i = first; i <= last; i++)
{
if (u8* pointer = m_pages[i])
{
m_locked[i] = pointer;
m_pages[i] = nullptr;
}
else // already locked or empty
{
}
}
return true;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Unlock(u64 addr , u32 size)
bool DynamicMemoryBlockBase<PT>::Unlock(u64 addr, u32 size)
{
for(u32 i=0; i<m_locked_mem.GetCount(); ++i)
size = PAGE_4K(size); // align size
addr &= ~4095; // align start address
if(!IsInMyRange(addr, size))
{
if(addr == m_locked_mem[i].addr)
assert(0);
return false;
}
if(IsMyAddress(addr) || IsMyAddress(addr + size - 1))
{
return false;
}
const u32 first = MemoryBlock::FixAddr(addr) >> 12;
const u32 last = first + ((size - 1) >> 12);
for (u32 i = first; i <= last; i++)
{
if (u8* pointer = m_locked[i])
{
m_pages[i] = pointer;
m_locked[i] = nullptr;
}
else // already unlocked or empty
{
if(m_locked_mem.Get(i).size > size)
{
m_locked_mem.Get(i).size -= size;
}
else if(m_locked_mem.Get(i).size == size)
{
m_locked_mem.RemoveAt(i);
}
else
{
return false;
}
return true;
}
}
return false;
return true;
}

View File

@ -341,7 +341,7 @@ public:
u64 Alloc(const u32 size, const u32 align)
{
return UserMemory->Alloc(AlignAddr(size, align));
return UserMemory->AllocAlign(size, align);
}
bool Free(const u64 addr)

View File

@ -1,5 +1,7 @@
#pragma once
#define PAGE_4K(x) (x + 4095) & ~(4095)
struct MemInfo
{
u64 addr;
@ -21,8 +23,8 @@ struct MemBlockInfo : public MemInfo
void* mem;
MemBlockInfo(u64 _addr, u32 _size)
: MemInfo(_addr, _size)
, mem(malloc(_size))
: MemInfo(_addr, PAGE_4K(_size))
, mem(_aligned_malloc(PAGE_4K(_size), 128))
{
if(!mem)
{
@ -35,7 +37,7 @@ struct MemBlockInfo : public MemInfo
~MemBlockInfo()
{
free(mem);
_aligned_free(mem);
mem = nullptr;
}
};
@ -120,8 +122,8 @@ public:
u8* GetMem() const { return mem; }
virtual u8* GetMem(u64 addr) const { return mem + addr; }
virtual bool Alloc(u64 addr, u32 size) { return false; }
virtual u64 Alloc(u32 size) { return 0; }
virtual bool AllocFixed(u64 addr, u32 size) { return false; }
virtual u64 AllocAlign(u32 size, u32 align = 0) { return 0; }
virtual bool Alloc() { return false; }
virtual bool Free(u64 addr) { return false; }
virtual bool Lock(u64 addr, u32 size) { return false; }
@ -190,8 +192,11 @@ class NullMemoryBlock : public MemoryBlock
template<typename PT>
class DynamicMemoryBlockBase : public PT
{
Array<MemBlockInfo> m_used_mem;
Array<MemBlockInfo> m_locked_mem;
mutable std::mutex m_lock;
Array<MemBlockInfo> m_allocated; // allocation info
Array<u8*> m_pages; // real addresses of every 4096 byte pages (array size should be fixed)
Array<u8*> m_locked; // locked pages should be moved here
u32 m_max_size;
public:
@ -209,8 +214,8 @@ public:
virtual void Delete();
virtual bool Alloc(u64 addr, u32 size);
virtual u64 Alloc(u32 size);
virtual bool AllocFixed(u64 addr, u32 size);
virtual u64 AllocAlign(u32 size, u32 align = 0);
virtual bool Alloc();
virtual bool Free(u64 addr);
virtual bool Lock(u64 addr, u32 size);
@ -219,8 +224,7 @@ public:
virtual u8* GetMem(u64 addr) const;
private:
void AppendUsedMem(u64 addr, u32 size);
void AppendLockedMem(u64 addr, u32 size);
void AppendMem(u64 addr, u32 size);
};
class VirtualMemoryBlock : public MemoryBlock

View File

@ -63,7 +63,7 @@ int cellGcmInit(u32 context_addr, u32 cmdSize, u32 ioSize, u32 ioAddress)
{
local_size = 0xf900000; //TODO
local_addr = Memory.RSXFBMem.GetStartAddr();
Memory.RSXFBMem.Alloc(local_size);
Memory.RSXFBMem.AllocAlign(local_size);
}
cellGcmSys.Warning("*** local memory(addr=0x%x, size=0x%x)", local_addr, local_size);
@ -78,7 +78,7 @@ int cellGcmInit(u32 context_addr, u32 cmdSize, u32 ioSize, u32 ioAddress)
current_config.coreFrequency = re32(500000000);
InitOffsetTable();
Memory.RSXCMDMem.Alloc(cmdSize);
Memory.RSXCMDMem.AllocAlign(cmdSize);
Memory.MemoryBlocks.push_back(Memory.RSXIOMem.SetRange(0x50000000, 0x10000000/*256MB*/));//TODO: implement allocateAdressSpace in memoryBase
cellGcmMapEaIoAddress(ioAddress, 0, ioSize);
@ -89,7 +89,7 @@ int cellGcmInit(u32 context_addr, u32 cmdSize, u32 ioSize, u32 ioAddress)
current_context.current = current_context.begin;
current_context.callback = re32(Emu.GetRSXCallback() - 4);
gcm_info.context_addr = Memory.MainMem.Alloc(0x1000);
gcm_info.context_addr = Memory.MainMem.AllocAlign(0x1000);
gcm_info.control_addr = gcm_info.context_addr + 0x40;
Memory.WriteData(gcm_info.context_addr, current_context);
@ -682,7 +682,7 @@ int32_t cellGcmMapLocalMemory(u64 address, u64 size)
{
local_size = 0xf900000; //TODO
local_addr = Memory.RSXFBMem.GetStartAddr();
Memory.RSXFBMem.Alloc(local_size);
Memory.RSXFBMem.AllocAlign(local_size);
Memory.Write32(address, local_addr);
Memory.Write32(size, local_size);
}

View File

@ -164,6 +164,7 @@ void sysPrxForUser_init()
//sysPrxForUser.AddFunc(0xaede4b03, sys_heap_free);
//sysPrxForUser.AddFunc(0x8a561d92, sys_heap_delete_heap);
sysPrxForUser.AddFunc(0xb2fcf2c8, sys_heap_create_heap);
sysPrxForUser.AddFunc(0x44265c08, _sys_heap_memalign);
sysPrxForUser.AddFunc(0xb257540b, sys_mmapper_allocate_memory);
sysPrxForUser.AddFunc(0xdc578057, sys_mmapper_map_memory);

View File

@ -100,7 +100,7 @@ static func_caller* sc_table[1024] =
null_func, null_func, null_func, bind_func(sys_spu_thread_group_join), null_func, //179
null_func, bind_func(sys_spu_thread_write_ls), bind_func(sys_spu_thread_read_ls), null_func, bind_func(sys_spu_thread_write_snr), //184
null_func, null_func, bind_func(sys_spu_thread_set_spu_cfg), bind_func(sys_spu_thread_get_spu_cfg), null_func, //189
bind_func(sys_spu_thread_write_spu_mb), bind_func(sys_spu_thread_connect_event), null_func, null_func, null_func, //194
bind_func(sys_spu_thread_write_spu_mb), bind_func(sys_spu_thread_connect_event), null_func, bind_func(sys_spu_thread_bind_queue), null_func, //194
null_func, null_func, null_func, null_func, null_func, //199
null_func, null_func, null_func, null_func, null_func, //204
null_func, null_func, null_func, null_func, null_func, //209

View File

@ -293,6 +293,7 @@ extern int sys_tty_write(u32 ch, u64 buf_addr, u32 len, u64 pwritelen_addr);
//sys_heap
extern int sys_heap_create_heap(const u32 heap_addr, const u32 start_addr, const u32 size);
extern int sys_heap_malloc(const u32 heap_addr, const u32 size);
extern int _sys_heap_memalign(u32 heap_id, u32 align, u32 size, u64 p4);
//sys_spu
extern int sys_spu_image_open(mem_ptr_t<sys_spu_image> img, u32 path_addr);
@ -312,6 +313,7 @@ extern int sys_spu_thread_write_spu_mb(u32 id, u32 value);
extern int sys_spu_thread_set_spu_cfg(u32 id, u64 value);
extern int sys_spu_thread_get_spu_cfg(u32 id, mem64_t value);
extern int sys_spu_thread_write_snr(u32 id, u32 number, u32 value);
extern int sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num);
//sys_time
extern int sys_time_get_timezone(mem32_t timezone, mem32_t summertime);

View File

@ -20,7 +20,10 @@ struct HeapInfo
int sys_heap_create_heap(const u32 heap_addr, const u32 align, const u32 size)
{
sc_heap.Warning("sys_heap_create_heap(heap_addr=0x%x, align=0x%x, size=0x%x)", heap_addr, align, size);
return sc_heap.GetNewId(new HeapInfo(heap_addr, align, size));
u32 heap_id = sc_heap.GetNewId(new HeapInfo(heap_addr, align, size));
sc_heap.Warning("*** sys_heap created(): id=0x%x", heap_id);
return heap_id;
}
int sys_heap_malloc(const u32 heap_id, const u32 size)
@ -32,3 +35,13 @@ int sys_heap_malloc(const u32 heap_id, const u32 size)
return Memory.Alloc(size, heap->align);
}
int _sys_heap_memalign(u32 heap_id, u32 align, u32 size, u64 p4)
{
sc_heap.Warning("_sys_heap_memalign(heap_id=0x%x, align=0x%x, size=0x%x, p4=0x%llx, ... ???)", heap_id, align, size, p4);
HeapInfo* heap;
if(!sc_heap.CheckId(heap_id, heap)) return CELL_ESRCH;
return Memory.Alloc(size, align);
}

View File

@ -6,7 +6,7 @@ SysCallBase sc_mem("memory");
int sys_memory_container_create(u32 cid_addr, u32 yield_size)
{
sc_mem.Warning("(HACK!) sys_memory_container_create(cid_addr=0x%x,yield_size=0x%x)", cid_addr, yield_size);
sc_mem.Warning("sys_memory_container_create(cid_addr=0x%x,yield_size=0x%x)", cid_addr, yield_size);
if(!Memory.IsGoodAddr(cid_addr, 4))
{
@ -15,17 +15,13 @@ int sys_memory_container_create(u32 cid_addr, u32 yield_size)
yield_size &= ~0xfffff; //round down to 1 MB granularity
//alignment hack (Memory.Alloc does not support alignment yet): alloc size is increased
u64 addr = Memory.Alloc(yield_size + 0x100000, 0x100000); //1 MB alignment (???)
u64 addr = Memory.Alloc(yield_size, 0x100000); //1 MB alignment
if(!addr)
{
return CELL_ENOMEM;
}
//fix alignment:
addr = (addr + 0x100000) & ~0xfffff;
Memory.Write32(cid_addr, sc_mem.GetNewId(new MemoryContainerInfo(addr, yield_size)));
return CELL_OK;
}
@ -49,20 +45,18 @@ int sys_memory_container_destroy(u32 cid)
int sys_memory_allocate(u32 size, u32 flags, u32 alloc_addr_addr)
{
//0x30000100;
sc_mem.Warning("(HACK!) sys_memory_allocate(size=0x%x, flags=0x%x)", size, flags);
sc_mem.Warning("sys_memory_allocate(size=0x%x, flags=0x%x)", size, flags);
u32 addr;
switch(flags)
{
case SYS_MEMORY_PAGE_SIZE_1M:
if(size & 0xfffff) return CELL_EALIGN;
addr = Memory.Alloc(size + 0x100000, 0x100000);
addr = (addr + 0x100000) & ~0xfffff;
addr = Memory.Alloc(size, 0x100000);
break;
case SYS_MEMORY_PAGE_SIZE_64K:
if(size & 0xffff) return CELL_EALIGN;
addr = Memory.Alloc(size + 0x10000, 0x10000);
addr = (addr + 0x10000) & ~0xffff;
addr = Memory.Alloc(size, 0x10000);
break;
default: return CELL_EINVAL;

View File

@ -40,7 +40,7 @@ u32 LoadSpuImage(vfsStream& stream, u32& spu_ep)
ELFLoader l(stream);
l.LoadInfo();
const u32 alloc_size = 256 * 1024 /*0x1000000 - stream.GetSize()*/;
u32 spu_offset = Memory.MainMem.Alloc(alloc_size);
u32 spu_offset = Memory.MainMem.AllocAlign(alloc_size);
l.LoadData(spu_offset);
spu_ep = l.GetEntry();
return spu_offset;
@ -133,7 +133,7 @@ int sys_spu_thread_initialize(mem32_t thread, u32 group, u32 spu_num, mem_ptr_t<
CPUThread& new_thread = Emu.GetCPU().AddThread(CPU_THREAD_SPU);
//copy SPU image:
u32 spu_offset = Memory.MainMem.Alloc(256 * 1024);
u32 spu_offset = Memory.MainMem.AllocAlign(256 * 1024);
memcpy(Memory + spu_offset, Memory + (u32)img->segs_addr, 256 * 1024);
//initialize from new place:
new_thread.SetOffset(spu_offset);
@ -537,3 +537,10 @@ int sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, u32
}
return CELL_OK;
}
int sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
{
sc_spu.Warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num);
return CELL_OK;
}

View File

@ -305,7 +305,7 @@ void Emulator::Load()
ConLog.Write("offset = 0x%llx", Memory.MainMem.GetStartAddr());
ConLog.Write("max addr = 0x%x", l.GetMaxAddr());
thread.SetOffset(Memory.MainMem.GetStartAddr());
Memory.MainMem.Alloc(Memory.MainMem.GetStartAddr() + l.GetMaxAddr(), 0xFFFFED - l.GetMaxAddr());
Memory.MainMem.AllocFixed(Memory.MainMem.GetStartAddr() + l.GetMaxAddr(), 0xFFFFED - l.GetMaxAddr());
thread.SetEntry(l.GetEntry() - Memory.MainMem.GetStartAddr());
break;
@ -314,12 +314,12 @@ void Emulator::Load()
m_ppu_callback_thr = &GetCPU().AddThread(CPU_THREAD_PPU);
thread.SetEntry(l.GetEntry());
Memory.StackMem.Alloc(0x1000);
Memory.StackMem.AllocAlign(0x1000);
thread.InitStack();
thread.AddArgv(m_elf_path);
//thread.AddArgv("-emu");
m_rsx_callback = Memory.MainMem.Alloc(4 * 4) + 4;
m_rsx_callback = Memory.MainMem.AllocAlign(4 * 4) + 4;
Memory.Write32(m_rsx_callback - 4, m_rsx_callback);
mem32_ptr_t callback_data(m_rsx_callback);
@ -327,7 +327,7 @@ void Emulator::Load()
callback_data += SC(2);
callback_data += BCLR(0x10 | 0x04, 0, 0, 0);
m_ppu_thr_exit = Memory.MainMem.Alloc(4 * 4);
m_ppu_thr_exit = Memory.MainMem.AllocAlign(4 * 4);
mem32_ptr_t ppu_thr_exit_data(m_ppu_thr_exit);
ppu_thr_exit_data += ADDI(3, 0, 0);

View File

@ -181,9 +181,9 @@ bool ELF32Loader::LoadPhdrData(u64 _offset)
switch(machine)
{
case MACHINE_SPU: Memory.MainMem.Alloc(phdr_arr[i].p_vaddr + offset, phdr_arr[i].p_memsz); break;
case MACHINE_MIPS: Memory.PSPMemory.RAM.Alloc(phdr_arr[i].p_vaddr + offset, phdr_arr[i].p_memsz); break;
case MACHINE_ARM: Memory.PSVMemory.RAM.Alloc(phdr_arr[i].p_vaddr + offset, phdr_arr[i].p_memsz); break;
case MACHINE_SPU: Memory.MainMem.AllocFixed(phdr_arr[i].p_vaddr + offset, phdr_arr[i].p_memsz); break;
case MACHINE_MIPS: Memory.PSPMemory.RAM.AllocFixed(phdr_arr[i].p_vaddr + offset, phdr_arr[i].p_memsz); break;
case MACHINE_ARM: Memory.PSVMemory.RAM.AllocFixed(phdr_arr[i].p_vaddr + offset, phdr_arr[i].p_memsz); break;
default:
continue;

View File

@ -240,7 +240,7 @@ bool ELF64Loader::LoadPhdrData(u64 offset)
case 0x00000001: //LOAD
if(phdr_arr[i].p_memsz)
{
Memory.MainMem.Alloc(offset + phdr_arr[i].p_vaddr, phdr_arr[i].p_memsz);
Memory.MainMem.AllocFixed(offset + phdr_arr[i].p_vaddr, phdr_arr[i].p_memsz);
if(phdr_arr[i].p_filesz)
{
@ -358,8 +358,8 @@ bool ELF64Loader::LoadPhdrData(u64 offset)
ConLog.Write("*** text: 0x%x", stub.s_text);
#endif
static const u32 section = 4 * 3;
u64 tbl = Memory.MainMem.Alloc(stub.s_imports * 4 * 2);
u64 dst = Memory.MainMem.Alloc(stub.s_imports * section);
u64 tbl = Memory.MainMem.AllocAlign(stub.s_imports * 4 * 2);
u64 dst = Memory.MainMem.AllocAlign(stub.s_imports * section);
for(u32 i=0; i<stub.s_imports; ++i)
{