DynamicMemoryBlockBase.h removed

Map() and Unmap(): roughly rewritten.
I checked every memory-related syscall and I'm sure that they don't
really need any mirroring. Some unused code deleted.
This commit is contained in:
Nekotekina 2014-09-03 01:48:44 +04:00
parent 505dacf152
commit d0e532e7ce
9 changed files with 232 additions and 398 deletions

View File

@ -1,210 +0,0 @@
#pragma once
//DynamicMemoryBlockBase
template<typename PT>
DynamicMemoryBlockBase<PT>::DynamicMemoryBlockBase()
: PT()
, m_max_size(0)
{
}
template<typename PT>
const u32 DynamicMemoryBlockBase<PT>::GetUsedSize() const
{
std::lock_guard<std::mutex> lock(m_lock);
u32 size = 0;
for (u32 i = 0; i<m_allocated.size(); ++i)
{
size += m_allocated[i].size;
}
return size;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::IsInMyRange(const u64 addr)
{
return addr >= MemoryBlock::GetStartAddr() && addr < MemoryBlock::GetStartAddr() + GetSize();
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::IsInMyRange(const u64 addr, const u32 size)
{
return IsInMyRange(addr) && IsInMyRange(addr + size - 1);
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::IsMyAddress(const u64 addr)
{
return IsInMyRange(addr);
}
template<typename PT>
MemoryBlock* DynamicMemoryBlockBase<PT>::SetRange(const u64 start, const u32 size)
{
std::lock_guard<std::mutex> lock(m_lock);
m_max_size = PAGE_4K(size);
if (!MemoryBlock::SetRange(start, 0))
{
assert(0);
return nullptr;
}
return this;
}
template<typename PT>
void DynamicMemoryBlockBase<PT>::Delete()
{
std::lock_guard<std::mutex> lock(m_lock);
m_allocated.clear();
m_max_size = 0;
MemoryBlock::Delete();
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::AllocFixed(u64 addr, u32 size)
{
size = PAGE_4K(size + (addr & 4095)); // align size
addr &= ~4095; // align start address
if (!IsInMyRange(addr, size))
{
assert(0);
return false;
}
std::lock_guard<std::mutex> lock(m_lock);
for (u32 i = 0; i<m_allocated.size(); ++i)
{
if (addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) return false;
}
AppendMem(addr, size);
return true;
}
template<typename PT>
void DynamicMemoryBlockBase<PT>::AppendMem(u64 addr, u32 size) /* private */
{
m_allocated.emplace_back(addr, size);
}
template<typename PT>
u64 DynamicMemoryBlockBase<PT>::AllocAlign(u32 size, u32 align)
{
size = PAGE_4K(size);
u32 exsize;
if (align <= 4096)
{
align = 0;
exsize = size;
}
else
{
align &= ~4095;
exsize = size + align - 1;
}
std::lock_guard<std::mutex> lock(m_lock);
for (u64 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
{
bool is_good_addr = true;
for (u32 i = 0; i<m_allocated.size(); ++i)
{
if ((addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) ||
(m_allocated[i].addr >= addr && m_allocated[i].addr < addr + exsize))
{
is_good_addr = false;
addr = m_allocated[i].addr + m_allocated[i].size;
break;
}
}
if (!is_good_addr) continue;
if (align)
{
addr = (addr + (align - 1)) & ~(align - 1);
}
//LOG_NOTICE(MEMORY, "AllocAlign(size=0x%x) -> 0x%llx", size, addr);
AppendMem(addr, size);
return addr;
}
return 0;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Alloc()
{
return AllocAlign(GetSize() - GetUsedSize()) != 0;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Free(u64 addr)
{
std::lock_guard<std::mutex> lock(m_lock);
for (u32 num = 0; num < m_allocated.size(); num++)
{
if (addr == m_allocated[num].addr)
{
//LOG_NOTICE(MEMORY, "Free(0x%llx)", addr);
m_allocated.erase(m_allocated.begin() + num);
return true;
}
}
//LOG_ERROR(MEMORY, "DynamicMemoryBlock::Free(addr=0x%llx): failed", addr);
//for (u32 i = 0; i < m_allocated.size(); i++)
//{
// LOG_NOTICE(MEMORY, "*** Memory Block: addr = 0x%llx, size = 0x%x", m_allocated[i].addr, m_allocated[i].size);
//}
assert(!"DynamicMemoryBlock::Free() failed");
return false;
}
template<typename PT>
u8* DynamicMemoryBlockBase<PT>::GetMem(u64 addr) const
{
return MemoryBlock::GetMem(addr);
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::IsLocked(u64 addr)
{
// TODO
assert(0);
return false;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Lock(u64 addr, u32 size)
{
// TODO
assert(0);
return false;
}
template<typename PT>
bool DynamicMemoryBlockBase<PT>::Unlock(u64 addr, u32 size)
{
// TODO
assert(0);
return false;
}

View File

@ -24,7 +24,7 @@ void MemoryBase::InvalidAddress(const char* func, const u64 addr)
void MemoryBase::RegisterPages(u64 addr, u32 size)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
//LOG_NOTICE(MEMORY, "RegisterPages(addr=0x%llx, size=0x%x)", addr, size);
for (u64 i = addr / 4096; i < (addr + size) / 4096; i++)
@ -45,7 +45,7 @@ void MemoryBase::RegisterPages(u64 addr, u32 size)
void MemoryBase::UnregisterPages(u64 addr, u32 size)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
//LOG_NOTICE(MEMORY, "UnregisterPages(addr=0x%llx, size=0x%x)", addr, size);
for (u64 i = addr / 4096; i < (addr + size) / 4096; i++)
@ -66,7 +66,7 @@ void MemoryBase::UnregisterPages(u64 addr, u32 size)
u32 MemoryBase::InitRawSPU(MemoryBlock* raw_spu)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
u32 index;
for (index = 0; index < sizeof(RawSPUMem) / sizeof(RawSPUMem[0]); index++)
@ -84,7 +84,7 @@ u32 MemoryBase::InitRawSPU(MemoryBlock* raw_spu)
void MemoryBase::CloseRawSPU(MemoryBlock* raw_spu, const u32 num)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
for (int i = 0; i < MemoryBlocks.size(); ++i)
{
@ -99,7 +99,7 @@ void MemoryBase::CloseRawSPU(MemoryBlock* raw_spu, const u32 num)
void MemoryBase::Init(MemoryType type)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
if (m_inited) return;
m_inited = true;
@ -152,7 +152,7 @@ void MemoryBase::Init(MemoryType type)
void MemoryBase::Close()
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
if (!m_inited) return;
m_inited = false;
@ -167,24 +167,12 @@ void MemoryBase::Close()
RSXIOMem.Delete();
MemoryBlocks.clear();
//#ifdef _WIN32
// if (!VirtualFree(m_base_addr, 0, MEM_RELEASE))
// {
// LOG_ERROR(MEMORY, "VirtualFree(0x%llx) failed", (u64)m_base_addr);
// }
//#else
// if (::munmap(m_base_addr, 0x100000000))
// {
// LOG_ERROR(MEMORY, "::munmap(0x%llx) failed", (u64)m_base_addr);
// }
//#endif
}
void MemoryBase::WriteMMIO32(u32 addr, const u32 data)
{
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] &&
((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Write32(addr, data))
@ -200,7 +188,7 @@ u32 MemoryBase::ReadMMIO32(u32 addr)
{
u32 res;
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
if (RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET] &&
((RawSPUThread*)RawSPUMem[(addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET])->Read32(addr, &res))
@ -213,35 +201,38 @@ u32 MemoryBase::ReadMMIO32(u32 addr)
return res;
}
bool MemoryBase::Map(const u64 dst_addr, const u64 src_addr, const u32 size)
bool MemoryBase::Map(const u64 addr, const u32 size)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
if (IsGoodAddr(dst_addr) || !IsGoodAddr(src_addr))
if ((u32)addr != addr || (u64)addr + (u64)size > 0x100000000ull)
{
return false;
}
else
{
for (u32 i = (u32)addr / 4096; i <= ((u32)addr + size - 1) / 4096; i++)
{
if (m_pages[i]) return false;
}
}
MemoryBlocks.push_back((new MemoryMirror())->SetRange(GetMemFromAddr(src_addr), dst_addr, size));
LOG_WARNING(MEMORY, "memory mapped 0x%llx to 0x%llx size=0x%x", src_addr, dst_addr, size);
MemoryBlocks.push_back((new MemoryBlock())->SetRange(addr, size));
LOG_WARNING(MEMORY, "MemoryBase::Map(0x%llx, 0x%x)", addr, size);
return true;
}
bool MemoryBase::Unmap(const u64 addr)
{
std::lock_guard<std::recursive_mutex> lock(m_mutex);
LV2_LOCK();
bool result = false;
for (uint i = 0; i<MemoryBlocks.size(); ++i)
for (u32 i = 0; i < MemoryBlocks.size(); i++)
{
if (MemoryBlocks[i]->IsMirror())
if (MemoryBlocks[i]->GetStartAddr() == addr)
{
if (MemoryBlocks[i]->GetStartAddr() == addr)
{
delete MemoryBlocks[i];
MemoryBlocks.erase(MemoryBlocks.begin() + i);
return true;
}
delete MemoryBlocks[i];
MemoryBlocks.erase(MemoryBlocks.begin() + i);
return true;
}
}
return false;
@ -361,6 +352,195 @@ bool MemoryBlock::IsMyAddress(const u64 addr)
return mem && addr >= GetStartAddr() && addr < GetEndAddr();
}
DynamicMemoryBlockBase::DynamicMemoryBlockBase()
: MemoryBlock()
, m_max_size(0)
{
}
const u32 DynamicMemoryBlockBase::GetUsedSize() const
{
LV2_LOCK();
u32 size = 0;
for (u32 i = 0; i<m_allocated.size(); ++i)
{
size += m_allocated[i].size;
}
return size;
}
bool DynamicMemoryBlockBase::IsInMyRange(const u64 addr)
{
return addr >= MemoryBlock::GetStartAddr() && addr < MemoryBlock::GetStartAddr() + GetSize();
}
bool DynamicMemoryBlockBase::IsInMyRange(const u64 addr, const u32 size)
{
return IsInMyRange(addr) && IsInMyRange(addr + size - 1);
}
bool DynamicMemoryBlockBase::IsMyAddress(const u64 addr)
{
return IsInMyRange(addr);
}
MemoryBlock* DynamicMemoryBlockBase::SetRange(const u64 start, const u32 size)
{
LV2_LOCK();
m_max_size = PAGE_4K(size);
if (!MemoryBlock::SetRange(start, 0))
{
assert(0);
return nullptr;
}
return this;
}
void DynamicMemoryBlockBase::Delete()
{
LV2_LOCK();
m_allocated.clear();
m_max_size = 0;
MemoryBlock::Delete();
}
bool DynamicMemoryBlockBase::AllocFixed(u64 addr, u32 size)
{
size = PAGE_4K(size + (addr & 4095)); // align size
addr &= ~4095; // align start address
if (!IsInMyRange(addr, size))
{
assert(0);
return false;
}
LV2_LOCK();
for (u32 i = 0; i<m_allocated.size(); ++i)
{
if (addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) return false;
}
AppendMem(addr, size);
return true;
}
void DynamicMemoryBlockBase::AppendMem(u64 addr, u32 size) /* private */
{
m_allocated.emplace_back(addr, size);
}
u64 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
{
size = PAGE_4K(size);
u32 exsize;
if (align <= 4096)
{
align = 0;
exsize = size;
}
else
{
align &= ~4095;
exsize = size + align - 1;
}
LV2_LOCK();
for (u64 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
{
bool is_good_addr = true;
for (u32 i = 0; i<m_allocated.size(); ++i)
{
if ((addr >= m_allocated[i].addr && addr < m_allocated[i].addr + m_allocated[i].size) ||
(m_allocated[i].addr >= addr && m_allocated[i].addr < addr + exsize))
{
is_good_addr = false;
addr = m_allocated[i].addr + m_allocated[i].size;
break;
}
}
if (!is_good_addr) continue;
if (align)
{
addr = (addr + (align - 1)) & ~(align - 1);
}
//LOG_NOTICE(MEMORY, "AllocAlign(size=0x%x) -> 0x%llx", size, addr);
AppendMem(addr, size);
return addr;
}
return 0;
}
bool DynamicMemoryBlockBase::Alloc()
{
return AllocAlign(GetSize() - GetUsedSize()) != 0;
}
bool DynamicMemoryBlockBase::Free(u64 addr)
{
LV2_LOCK();
for (u32 num = 0; num < m_allocated.size(); num++)
{
if (addr == m_allocated[num].addr)
{
//LOG_NOTICE(MEMORY, "Free(0x%llx)", addr);
m_allocated.erase(m_allocated.begin() + num);
return true;
}
}
LOG_ERROR(MEMORY, "DynamicMemoryBlock::Free(addr=0x%llx): failed", addr);
for (u32 i = 0; i < m_allocated.size(); i++)
{
LOG_NOTICE(MEMORY, "*** Memory Block: addr = 0x%llx, size = 0x%x", m_allocated[i].addr, m_allocated[i].size);
}
return false;
}
u8* DynamicMemoryBlockBase::GetMem(u64 addr) const
{
return MemoryBlock::GetMem(addr);
}
bool DynamicMemoryBlockBase::IsLocked(u64 addr)
{
assert(!__FUNCTION__);
return false;
}
bool DynamicMemoryBlockBase::Lock(u64 addr, u32 size)
{
assert(!__FUNCTION__);
return false;
}
bool DynamicMemoryBlockBase::Unlock(u64 addr, u32 size)
{
assert(!__FUNCTION__);
return false;
}
VirtualMemoryBlock::VirtualMemoryBlock() : MemoryBlock(), m_reserve_size(0)
{
}
@ -465,24 +645,6 @@ u32 VirtualMemoryBlock::UnmapAddress(u64 addr)
return 0;
}
bool VirtualMemoryBlock::Read8(const u64 addr, u8* value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
*value = Memory.Read8(realAddr);
return true;
}
bool VirtualMemoryBlock::Read16(const u64 addr, u16* value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
*value = Memory.Read16(realAddr);
return true;
}
bool VirtualMemoryBlock::Read32(const u64 addr, u32* value)
{
u64 realAddr;
@ -492,42 +654,6 @@ bool VirtualMemoryBlock::Read32(const u64 addr, u32* value)
return true;
}
bool VirtualMemoryBlock::Read64(const u64 addr, u64* value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
*value = Memory.Read64(realAddr);
return true;
}
bool VirtualMemoryBlock::Read128(const u64 addr, u128* value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
*value = Memory.Read128(realAddr);
return true;
}
bool VirtualMemoryBlock::Write8(const u64 addr, const u8 value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
Memory.Write8(realAddr, value);
return true;
}
bool VirtualMemoryBlock::Write16(const u64 addr, const u16 value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
Memory.Write16(realAddr, value);
return true;
}
bool VirtualMemoryBlock::Write32(const u64 addr, const u32 value)
{
u64 realAddr;
@ -537,24 +663,6 @@ bool VirtualMemoryBlock::Write32(const u64 addr, const u32 value)
return true;
}
bool VirtualMemoryBlock::Write64(const u64 addr, const u64 value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
Memory.Write64(realAddr, value);
return true;
}
bool VirtualMemoryBlock::Write128(const u64 addr, const u128 value)
{
u64 realAddr;
if(!getRealAddr(addr, realAddr))
return false;
Memory.Write128(realAddr, value);
return true;
}
bool VirtualMemoryBlock::getRealAddr(u64 addr, u64& result)
{
for(u32 i=0; i<m_mapped_memory.size(); ++i)

View File

@ -29,7 +29,6 @@ class MemoryBase
{
std::vector<MemoryBlock*> MemoryBlocks;
u32 m_pages[0x100000000 / 4096]; // information about every page
std::recursive_mutex m_mutex;
public:
MemoryBlock* UserMemory;
@ -60,17 +59,17 @@ public:
struct : Wrapper32LE
{
DynamicMemoryBlockLE RAM;
DynamicMemoryBlockLE Userspace;
DynamicMemoryBlock RAM;
DynamicMemoryBlock Userspace;
} PSV;
struct : Wrapper32LE
{
DynamicMemoryBlockLE Scratchpad;
DynamicMemoryBlockLE VRAM;
DynamicMemoryBlockLE RAM;
DynamicMemoryBlockLE Kernel;
DynamicMemoryBlockLE Userspace;
DynamicMemoryBlock Scratchpad;
DynamicMemoryBlock VRAM;
DynamicMemoryBlock RAM;
DynamicMemoryBlock Kernel;
DynamicMemoryBlock Userspace;
} PSP;
bool m_inited;
@ -391,7 +390,7 @@ public:
return UserMemory->Unlock(addr, size);
}
bool Map(const u64 dst_addr, const u64 src_addr, const u32 size);
bool Map(const u64 addr, const u32 size);
bool Unmap(const u64 addr);

View File

@ -93,9 +93,6 @@ private:
public:
virtual void Delete();
virtual bool IsNULL() { return false; }
virtual bool IsMirror() { return false; }
u64 FixAddr(const u64 addr) const;
virtual MemoryBlock* SetRange(const u64 start, const u32 size);
@ -117,40 +114,8 @@ public:
virtual bool Unlock(u64 addr, u32 size) { return false; }
};
class MemoryBlockLE : public MemoryBlock
class DynamicMemoryBlockBase : public MemoryBlock
{
};
class MemoryMirror : public MemoryBlock
{
public:
virtual bool IsMirror() { return true; }
virtual MemoryBlock* SetRange(const u64 start, const u32 size)
{
range_start = start;
range_size = size;
return this;
}
void SetMemory(u8* memory)
{
mem = memory;
}
MemoryBlock* SetRange(u8* memory, const u64 start, const u32 size)
{
SetMemory(memory);
return SetRange(start, size);
}
};
template<typename PT>
class DynamicMemoryBlockBase : public PT
{
mutable std::mutex m_lock;
std::vector<MemBlockInfo> m_allocated; // allocation info
u32 m_max_size;
@ -215,17 +180,9 @@ public:
// Return the total amount of reserved memory
virtual u32 GetReservedAmount();
bool Read8(const u64 addr, u8* value);
bool Read16(const u64 addr, u16* value);
bool Read32(const u64 addr, u32* value);
bool Read64(const u64 addr, u64* value);
bool Read128(const u64 addr, u128* value);
bool Write8(const u64 addr, const u8 value);
bool Write16(const u64 addr, const u16 value);
bool Write32(const u64 addr, const u32 value);
bool Write64(const u64 addr, const u64 value);
bool Write128(const u64 addr, const u128 value);
// try to get the real address given a mapped address
// return true for success
@ -242,8 +199,5 @@ public:
u64 getMappedAddress(u64 realAddress);
};
#include "DynamicMemoryBlockBase.h"
typedef DynamicMemoryBlockBase<MemoryBlock> DynamicMemoryBlock;
typedef DynamicMemoryBlockBase<MemoryBlockLE> DynamicMemoryBlockLE;
typedef DynamicMemoryBlockBase DynamicMemoryBlock;

View File

@ -58,30 +58,24 @@ s32 sys_mmapper_allocate_memory(u32 size, u64 flags, vm::ptr<be_t<u32>> mem_id)
sys_mmapper.Warning("sys_mmapper_allocate_memory(size=0x%x, flags=0x%llx, mem_id_addr=0x%x)", size, flags, mem_id.addr());
// Check page granularity.
u32 addr;
switch(flags & (SYS_MEMORY_PAGE_SIZE_1M | SYS_MEMORY_PAGE_SIZE_64K))
{
case SYS_MEMORY_PAGE_SIZE_1M:
if(size & 0xfffff)
return CELL_EALIGN;
addr = (u32)Memory.Alloc(size, 0x100000);
break;
case SYS_MEMORY_PAGE_SIZE_64K:
if(size & 0xffff)
return CELL_EALIGN;
addr = (u32)Memory.Alloc(size, 0x10000);
break;
default:
return CELL_EINVAL;
}
if(!addr)
return CELL_ENOMEM;
// Generate a new mem ID.
*mem_id = sys_mmapper.GetNewId(new mmapper_info(addr, size, flags));
*mem_id = sys_mmapper.GetNewId(new mmapper_info(size, flags));
return CELL_OK;
}
@ -102,25 +96,21 @@ s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm:
case SYS_MEMORY_PAGE_SIZE_1M:
if(size & 0xfffff)
return CELL_EALIGN;
ct->addr = (u32)Memory.Alloc(size, 0x100000);
break;
case SYS_MEMORY_PAGE_SIZE_64K:
if(size & 0xffff)
return CELL_EALIGN;
ct->addr = (u32)Memory.Alloc(size, 0x10000);
break;
default:
return CELL_EINVAL;
}
if(!ct->addr)
return CELL_ENOMEM;
ct->size = size;
// Generate a new mem ID.
*mem_id = sys_mmapper.GetNewId(new mmapper_info(ct->addr, ct->size, flags), TYPE_MEM);
*mem_id = sys_mmapper.GetNewId(new mmapper_info(ct->size, flags), TYPE_MEM);
return CELL_OK;
}
@ -153,7 +143,6 @@ s32 sys_mmapper_free_memory(u32 mem_id)
return CELL_ESRCH;
// Release the allocated memory and remove the ID.
Memory.Free(info->addr);
sys_mmapper.RemoveId(mem_id);
return CELL_OK;
@ -169,7 +158,7 @@ s32 sys_mmapper_map_memory(u32 start_addr, u32 mem_id, u64 flags)
return CELL_ESRCH;
// Map the memory into the process address.
if(!Memory.Map(start_addr, info->addr, info->size))
if(!Memory.Map(start_addr, info->size))
sys_mmapper.Error("sys_mmapper_map_memory failed!");
// Keep track of mapped addresses.
@ -194,7 +183,7 @@ s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, u32 alloc_
for (int i = 0; i < SYS_MMAPPER_FIXED_SIZE; i += 0x100000)
{
addr = start_addr + i;
found = Memory.Map(addr, info->addr, info->size);
found = Memory.Map(addr, info->size);
if(found)
{
sys_mmapper.Warning("Found and mapped address 0x%x", addr);

View File

@ -5,13 +5,11 @@
struct mmapper_info
{
u32 addr;
u32 size;
u64 flags;
mmapper_info(u32 _addr, u32 _size, u64 _flags)
: addr(_addr)
, size(_size)
mmapper_info(u32 _size, u64 _flags)
: size(_size)
, flags(_flags)
{
}

View File

@ -175,7 +175,7 @@ public:
__forceinline bool IsReady() const { return m_status == Ready; }
};
#define LV2_LOCK(x) std::lock_guard<std::recursive_mutex> x(Emu.GetCoreMutex())
#define LV2_LOCK() std::lock_guard<std::recursive_mutex>(Emu.GetCoreMutex())
extern Emulator Emu;

View File

@ -308,7 +308,6 @@
<ClInclude Include="Emu\Io\Null\NullPadHandler.h" />
<ClInclude Include="Emu\Io\Pad.h" />
<ClInclude Include="Emu\Io\PadHandler.h" />
<ClInclude Include="Emu\Memory\DynamicMemoryBlockBase.h" />
<ClInclude Include="Emu\Memory\Memory.h" />
<ClInclude Include="Emu\Memory\MemoryBlock.h" />
<ClInclude Include="Emu\RSX\GCM.h" />

View File

@ -1057,9 +1057,6 @@
<ClInclude Include="Emu\SysCalls\Modules\cellMsgDialog.h">
<Filter>Emu\SysCalls\Modules</Filter>
</ClInclude>
<ClInclude Include="Emu\Memory\DynamicMemoryBlockBase.h">
<Filter>Emu\Memory</Filter>
</ClInclude>
<ClInclude Include="Emu\SysCalls\Modules\cellSync.h">
<Filter>Emu\SysCalls\Modules</Filter>
</ClInclude>